Skip to content

Commit

Permalink
Merge pull request #105 from Parietal-INRIA/maint/add-ruff
Browse files Browse the repository at this point in the history
Use ruff for linting and formatting
  • Loading branch information
pbarbarant authored Nov 28, 2024
2 parents 131367e + 406e9bd commit 024884a
Show file tree
Hide file tree
Showing 30 changed files with 370 additions and 206 deletions.
15 changes: 0 additions & 15 deletions .flake8

This file was deleted.

5 changes: 2 additions & 3 deletions .github/workflows/testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,8 @@ jobs:
run: |
python -m pip install --upgrade pip
pip install -e .[dev]
- name: Flake8
run: |
flake8 --verbose .
- name: Run Ruff
run: ruff check --output-format=github .
- name: Test with pytest
run: |
pytest
29 changes: 18 additions & 11 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,19 +14,26 @@ repos:
- id: trailing-whitespace
- id: check-toml

- repo: https://github.com/pycqa/isort
rev: 5.12.0
# Checks for .rst files
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
hooks:
- id: isort
args: ["--profile", "black"]
- id: rst-directive-colons
- id: rst-inline-touching-normal

- repo: https://github.com/psf/black
rev: 23.7.0
# Format TOML files
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
rev: v2.14.0
hooks:
- id: black
- id: pretty-format-toml
args: [--autofix, --indent, '4']

- repo: https://github.com/pyCQA/flake8
rev: 6.1.0
# Lint and format with Ruff
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.0
hooks:
- id: flake8
additional_dependencies: [flake8-docstrings, flake8-use-fstring]
# Run the linter.
- id: ruff
args: ["check", "--select", "I", "--fix", "--show-fixes"]
# Run the formatter.
- id: ruff-format
1 change: 0 additions & 1 deletion doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import sys
from pathlib import Path

import sphinx

import fmralign

Expand Down
22 changes: 14 additions & 8 deletions doc/sphinxext/github_link.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,14 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision):
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='https://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
>>> _linkcode_resolve(
... "py",
... {"module": "tty", "fullname": "setraw"},
... package="tty",
... url_fmt="https://hg.python.org/cpython/file/"
... "{revision}/Lib/{package}/{path}#L{lineno}",
... revision="xxxx",
... )
'https://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""

Expand Down Expand Up @@ -59,12 +61,16 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision):
if not fn:
return

fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
fn = os.path.relpath(
fn, start=os.path.dirname(__import__(package).__file__)
)
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
return url_fmt.format(
revision=revision, package=package, path=fn, lineno=lineno
)


def make_linkcode_resolve(package, url_fmt):
Expand Down
4 changes: 3 additions & 1 deletion examples/plot_alignment_methods_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@
# Select visual cortex, create a mask and resample it to the right resolution

mask_visual = new_img_like(atlas, atlas.get_fdata() == 1)
resampled_mask_visual = resample_to_img(mask_visual, mask, interpolation="nearest")
resampled_mask_visual = resample_to_img(
mask_visual, mask, interpolation="nearest"
)

# Plot the mask we will use
plotting.plot_roi(
Expand Down
4 changes: 3 additions & 1 deletion examples/plot_alignment_simulated_2D_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,9 @@ def _plot_distributions_and_alignment(
# We make a target distribution Y as well
Y = np.asarray(
[
_rotate((x[origin_index], y[origin_index]), (x_, y_), math.radians(-10))
_rotate(
(x[origin_index], y[origin_index]), (x_, y_), math.radians(-10)
)
for x_, y_ in zip(x, y)
]
)
Expand Down
5 changes: 4 additions & 1 deletion examples/plot_int_alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@
#

from nilearn.image import index_img

from fmralign.alignment_methods import IndividualizedNeuralTuning
from fmralign.hyperalignment.piecewise_alignment import PiecewiseAlignment
from fmralign.hyperalignment.regions import compute_parcels
Expand All @@ -126,7 +127,9 @@
target_test_masked = np.array(masked_imgs)[:, test_index, :]


parcels = compute_parcels(niimg=template_train[0], mask=masker, n_parcels=100, n_jobs=5)
parcels = compute_parcels(
niimg=template_train[0], mask=masker, n_parcels=100, n_jobs=5
)
denoiser = PiecewiseAlignment(n_jobs=5)
denoised_signal = denoiser.fit_transform(X=denoising_data, regions=parcels)
target_denoised_data = denoised_signal[-1]
Expand Down
4 changes: 3 additions & 1 deletion examples/plot_pairwise_roi_alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@
# Select visual cortex, create a mask and resample it to the right resolution

mask_visual = new_img_like(atlas, atlas.get_fdata() == 1)
resampled_mask_visual = resample_to_img(mask_visual, mask, interpolation="nearest")
resampled_mask_visual = resample_to_img(
mask_visual, mask, interpolation="nearest"
)

# Plot the mask we will use
plot_roi(
Expand Down
4 changes: 3 additions & 1 deletion examples/plot_template_alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,9 @@
score_voxelwise(target_test, prediction_from_average, masker, loss="corr")
)
template_score = masker.inverse_transform(
score_voxelwise(target_test, prediction_from_template[0], masker, loss="corr")
score_voxelwise(
target_test, prediction_from_template[0], masker, loss="corr"
)
)


Expand Down
24 changes: 17 additions & 7 deletions examples/plot_toy_int_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,16 @@
--matplotlib`` in a terminal, or use ``jupyter-notebook``.
"""

import numpy as np
import matplotlib.pyplot as plt
import numpy as np

from fmralign.alignment_methods import IndividualizedNeuralTuning as INT
from fmralign.fetch_example_data import generate_dummy_signal
from fmralign.hyperalignment.correlation import (
tuning_correlation,
stimulus_correlation,
compute_pearson_corr,
matrix_MDS,
stimulus_correlation,
tuning_correlation,
)

###############################################################################
Expand Down Expand Up @@ -143,7 +144,10 @@
# MDS of tunning matrices
corr_tunning = compute_pearson_corr(tuning_pred_run_1, tuning_pred_run_2)
T_first_part_transformed, T_second_part_transformed = matrix_MDS(
tuning_pred_run_1, tuning_pred_run_2, n_components=2, dissimilarity=1 - corr_tunning
tuning_pred_run_1,
tuning_pred_run_2,
n_components=2,
dissimilarity=(1 - corr_tunning),
)

ax[0, 2].scatter(
Expand Down Expand Up @@ -171,16 +175,22 @@
ax[1, 0].set_title("Correlation of estimated stimulus vs ground truth (Run 1)")
ax[1, 0].set_xlabel("Latent components, Run 1")
ax[1, 0].set_ylabel("Latent components, ground truth")
fig.colorbar(ax[1, 0].imshow(correlation_stimulus_true_est_first_part), ax=ax[1, 0])
fig.colorbar(
ax[1, 0].imshow(correlation_stimulus_true_est_first_part), ax=ax[1, 0]
)

correlation_stimulus_true_est_second_part = stimulus_correlation(
stimulus_pred_run_2.T, stimulus_run_2.T
)
ax[1, 1].imshow(correlation_stimulus_true_est_second_part)
ax[1, 1].set_title("Correlation of estimated stimulus vs ground truth (Run 2))")
ax[1, 1].set_title(
"Correlation of estimated stimulus vs ground truth (Run 2))"
)
ax[1, 1].set_xlabel("Latent components, Run 2")
ax[1, 1].set_ylabel("Latent components, ground truth")
fig.colorbar(ax[1, 1].imshow(correlation_stimulus_true_est_second_part), ax=ax[1, 1])
fig.colorbar(
ax[1, 1].imshow(correlation_stimulus_true_est_second_part), ax=ax[1, 1]
)


# Reconstruction
Expand Down
28 changes: 22 additions & 6 deletions fmralign/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ def _intersect_clustering_mask(clustering, mask):
new_ = np.zeros_like(dat)
new_[dat > 0] = 1
clustering_mask = new_img_like(clustering, new_)
return intersect_masks([clustering_mask, mask], threshold=1, connected=True)
return intersect_masks(
[clustering_mask, mask], threshold=1, connected=True
)


def piecewise_transform(labels, estimators, X):
Expand All @@ -42,7 +44,9 @@ def piecewise_transform(labels, estimators, X):

for i in range(len(unique_labels)):
label = unique_labels[i]
X_transform[:, labels == label] = estimators[i].transform(X[:, labels == label])
X_transform[:, labels == label] = estimators[i].transform(
X[:, labels == label]
)
return X_transform


Expand Down Expand Up @@ -71,7 +75,13 @@ def _check_labels(labels, threshold=1000):


def _make_parcellation(
imgs, clustering_index, clustering, n_pieces, masker, smoothing_fwhm=5, verbose=0
imgs,
clustering_index,
clustering,
n_pieces,
masker,
smoothing_fwhm=5,
verbose=0,
):
"""
Use nilearn Parcellation class in our pipeline.
Expand Down Expand Up @@ -106,13 +116,17 @@ def _make_parcellation(
Parcellation of features in clusters
"""
# check if clustering is provided
if isinstance(clustering, nib.nifti1.Nifti1Image) or os.path.isfile(clustering):
if isinstance(clustering, nib.nifti1.Nifti1Image) or os.path.isfile(
clustering
):
check_same_fov(masker.mask_img_, clustering)
labels = apply_mask_fmri(clustering, masker.mask_img_).astype(int)

# otherwise check it's needed, if not return 1 everywhere
elif n_pieces == 1:
labels = np.ones(int(masker.mask_img_.get_fdata().sum()), dtype=np.int8)
labels = np.ones(
int(masker.mask_img_.get_fdata().sum()), dtype=np.int8
)

# otherwise check requested clustering method
elif isinstance(clustering, str) and n_pieces > 1:
Expand Down Expand Up @@ -140,7 +154,9 @@ def _make_parcellation(
)
err.args += (errmsg,)
raise err
labels = apply_mask_fmri(parcellation.labels_img_, masker.mask_img_).astype(int)
labels = apply_mask_fmri(
parcellation.labels_img_, masker.mask_img_
).astype(int)

if verbose > 0:
unique_labels, counts = np.unique(labels, return_counts=True)
Expand Down
10 changes: 5 additions & 5 deletions fmralign/alignment_methods.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
# -*- coding: utf-8 -*-
"""Module implementing alignment estimators on ndarrays."""

import warnings

import numpy as np
import torch
import ot
import scipy
import torch
from fugw.mappings import FUGW, FUGWSparse
from fugw.scripts import coarse_to_fine, lmds
from joblib import Parallel, delayed
from scipy import linalg
import ot
from scipy.optimize import linear_sum_assignment
from scipy.sparse import diags
from scipy.spatial.distance import cdist
Expand All @@ -19,9 +22,6 @@
from fmralign.hyperalignment.linalg import safe_svd, svd_pca
from fmralign.hyperalignment.piecewise_alignment import PiecewiseAlignment

from fugw.mappings import FUGW, FUGWSparse
from fugw.scripts import coarse_to_fine, lmds


def scaled_procrustes(X, Y, scaling=False, primal=None):
"""
Expand Down
21 changes: 15 additions & 6 deletions fmralign/fetch_example_data.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
import os

import numpy as np
import pandas as pd
from nilearn.datasets._utils import fetch_files, get_dataset_dir
from fastsrm.srm import projection
import numpy as np
from nilearn.datasets._utils import fetch_files, get_dataset_dir


def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
Expand Down Expand Up @@ -45,9 +45,13 @@ def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
"""
# The URLs can be retrieved from the nilearn account on OSF
if subjects == "all":
subjects = ["sub-{i:02d}" for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]]
subjects = [
"sub-{i:02d}" for i in [1, 2, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15]
]
dataset_name = "ibc"
data_dir = get_dataset_dir(dataset_name, data_dir=data_dir, verbose=verbose)
data_dir = get_dataset_dir(
dataset_name, data_dir=data_dir, verbose=verbose
)

# download or retrieve metadatas, put it in a dataframe,
# list all condition and specify path to the right directory
Expand All @@ -64,7 +68,9 @@ def fetch_ibc_subjects_contrasts(subjects, data_dir=None, verbose=1):
)
metadata_df = pd.read_csv(metadata_path[0])
conditions = metadata_df.condition.unique()
metadata_df["path"] = metadata_df["path"].str.replace("path_to_dir", data_dir)
metadata_df["path"] = metadata_df["path"].str.replace(
"path_to_dir", data_dir
)
# filter the dataframe to return only rows relevant for subjects argument
metadata_df = metadata_df[metadata_df.subject.isin(subjects)]

Expand Down Expand Up @@ -202,7 +208,10 @@ def generate_dummy_signal(
data_train, data_test = [], []
Ts = []
for _ in range(n_subjects):
if generative_method == "custom" or generative_method == "multiviewica":
if (
generative_method == "custom"
or generative_method == "multiviewica"
):
W = T_mean + T_std * np.random.randn(latent_dim, n_voxels)
else:
W = projection(rng.randn(latent_dim, n_voxels))
Expand Down
Loading

0 comments on commit 024884a

Please sign in to comment.