Skip to content

Commit

Permalink
Merge pull request #213 from lilab-bcb/compat
Browse files Browse the repository at this point in the history
Updated for latest anndata. Updated pre-commit
  • Loading branch information
joshua-gould authored Sep 17, 2024
2 parents 2c00357 + d620c40 commit 11cc154
Show file tree
Hide file tree
Showing 11 changed files with 56 additions and 420 deletions.
10 changes: 5 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.6.0
hooks:
# - id: double-quote-string-fixer # for single quotes: uncomment and add black config “skip-string-normalization”
- id: trailing-whitespace
Expand All @@ -11,19 +11,19 @@ repos:
- id: docformatter
args: ["--in-place", "--wrap-summaries=100", "--wrap-descriptions=100", "--config=./pyproject.toml"]
- repo: https://github.com/psf/black
rev: 23.9.1
rev: 24.8.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.12.0
rev: 5.13.2
hooks:
- id: isort
- repo: https://github.com/csachs/pyproject-flake8
rev: v6.1.0
rev: v7.0.0
hooks:
- id: pyproject-flake8
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.3
rev: v4.0.0-alpha.8
hooks:
- id: prettier
types_or: [css, javascript]
6 changes: 3 additions & 3 deletions cirrocumulus/abstract_backed_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
import pandas as pd
import scipy.sparse
from anndata import AnnData
from anndata._core.sparse_dataset import sparse_dataset

from cirrocumulus.abstract_dataset import AbstractDataset
from cirrocumulus.anndata_util import ADATA_LAYERS_UNS_KEY, ADATA_MODULE_UNS_KEY
from cirrocumulus.sparse_dataset import SparseDataset


# string_dtype = h5py.check_string_dtype(dataset.dtype)
Expand Down Expand Up @@ -73,8 +73,8 @@ def get_X(self, var_ids, keys, node):
get_item = var_ids.get_indexer_for(keys)

if self.is_group(node):
sparse_dataset = SparseDataset(node) # sparse
X = sparse_dataset[:, get_item]
ds = sparse_dataset(node) # sparse
X = ds[:, get_item]
else: # dense
X = self.slice_dense_array(node, get_item)
var = pd.DataFrame(index=keys)
Expand Down
8 changes: 5 additions & 3 deletions cirrocumulus/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,11 @@ def concat_spatial(paths: list[str], output_path: str, ncols: int = 2):

def create_parser(description=False):
parser = argparse.ArgumentParser(
description="Concatenate datasets in a grid layout. If all the datasets are spatial datasets, then tissue images are concatenated."
if description
else None
description=(
"Concatenate datasets in a grid layout. If all the datasets are spatial datasets, then tissue images are concatenated."
if description
else None
)
)
parser.add_argument(
"dataset",
Expand Down
16 changes: 10 additions & 6 deletions cirrocumulus/diff_exp.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,11 +137,15 @@ def __init__(
scores=scores,
pvals=pvals,
logfoldchanges=foldchanges,
frac_expressed1=frac_expressed_df.loc[group_one].values
if frac_expressed_df is not None
else None,
frac_expressed2=frac_expressed_df.loc[group_two].values
if frac_expressed_df is not None
else None,
frac_expressed1=(
frac_expressed_df.loc[group_one].values
if frac_expressed_df is not None
else None
),
frac_expressed2=(
frac_expressed_df.loc[group_two].values
if frac_expressed_df is not None
else None
),
)
self.pair2results = pair2results
6 changes: 4 additions & 2 deletions cirrocumulus/local_db_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,10 @@ def __init__(self, paths: list[str]):
if url.lower().endswith(".json.gz") or url.lower().endswith(".json"):
import gzip

with gzip.open(fs.open(url)) if url.lower().endswith(".json.gz") else fs.open(
url
with (
gzip.open(fs.open(url))
if url.lower().endswith(".json.gz")
else fs.open(url)
) as f:
d = json.load(f)
if "id" in d:
Expand Down
Loading

0 comments on commit 11cc154

Please sign in to comment.