Skip to content

Commit

Permalink
Merge pull request #184 from sot/ruff
Browse files Browse the repository at this point in the history
Ruff
  • Loading branch information
jeanconn authored Dec 5, 2024
2 parents 139c22c + 5ebd0e3 commit 10aaa76
Show file tree
Hide file tree
Showing 30 changed files with 370 additions and 212 deletions.
10 changes: 0 additions & 10 deletions .github/workflows/black.yml

This file was deleted.

19 changes: 0 additions & 19 deletions .github/workflows/flake8.yml

This file was deleted.

10 changes: 10 additions & 0 deletions .github/workflows/python-formatting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
name: check format using ruff
on: [push]
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: chartboost/ruff-action@v1
with:
args: format --check
8 changes: 8 additions & 0 deletions .github/workflows/python-linting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
name: lint code using ruff
on: [push]
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: chartboost/ruff-action@v1
20 changes: 8 additions & 12 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
repos:
- repo: https://github.com/psf/black
rev: 24.10.0
hooks:
- id: black
language_version: python3.11

- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
name: isort (python)
language_version: python3.11
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.8.1
hooks:
# Run the linter.
- id: ruff
# Run the formatter.
- id: ruff-format
2 changes: 1 addition & 1 deletion chandra_aca/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import ska_helpers

from .transform import * # noqa
from .transform import *

__version__ = ska_helpers.get_version(__package__)

Expand Down
49 changes: 30 additions & 19 deletions chandra_aca/aca_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@

def _operator_factory(operator, inplace=False):
"""
Generate data model methods.
Generate data model methods like __add__(self, other) and
__iadd__(self, other). These always operate in the coordinate
system of the left and right operands. If both are in ACA
Expand Down Expand Up @@ -89,6 +91,8 @@ def _operator(self, other):

class ACAImage(np.ndarray):
"""
ACA Image class.
ACAImage is an ndarray subclass that supports functionality for the Chandra
ACA. Most importantly it allows image indexing and slicing in absolute
"aca" coordinates, where the image lower left coordinate is specified
Expand Down Expand Up @@ -123,6 +127,8 @@ class ACAImage(np.ndarray):
@property
def aca(self):
"""
Return a light copy of self with _aca_coords on.
Return a light copy (same data) of self but with the _aca_coords
attribute switched on so that indexing is absolute.
"""
Expand Down Expand Up @@ -189,7 +195,7 @@ def _adjust_item(self, item):
"""
This is the money method that does all the work of manipulating
an item and subsequent row0/col0 when accessing and slicing.
"""
""" # noqa: D205
# Allow slicing via an existing ACAImage object
aca_coords = self._aca_coords
if isinstance(item, ACAImage):
Expand Down Expand Up @@ -241,8 +247,8 @@ def _adjust_item(self, item):
rc_off = it.start if it.start >= 0 else shape[i] + it.start
out_rc[i] = rc0 + rc_off
elif it is not ...:
it = np.array(it)
rc_off = np.where(it >= 0, it, shape[i] + it)
it_arr = np.array(it)
rc_off = np.where(it_arr >= 0, it_arr, shape[i] + it_arr)
out_rc[i] = rc0 + rc_off

return item, out_rc[0], out_rc[1]
Expand Down Expand Up @@ -296,8 +302,7 @@ def __setattr__(self, attr, value):

def centroid_fm(self, bgd=None, pix_zero_loc="center", norm_clip=None):
"""
First moment centroid of ``self`` using 6x6 mousebitten image for input
6x6 or 8x8 images.
First moment centroid of ``self`` using 6x6 mousebitten image for input 6x6 or 8x8 images.
Note that the returned ``norm`` is the sum of the background-subtracted 6x6
mousebitten image, not the entire image.
Expand Down Expand Up @@ -345,8 +350,10 @@ def col0(self, value):

@classmethod
def _read_flicker_cdfs(cls):
"""Read flickering pixel model cumulative distribution functions
and associated metadata. Set up class variables accordingly.
"""
Read flickering pixel model cumulative distribution functions and associated metadata.
Set up class variables accordingly.
The flicker_cdf file here was created using:
/proj/sot/ska/www/ASPECT/ipynb/chandra_aca/flickering-pixel-model.ipynb
Expand All @@ -371,9 +378,7 @@ def _read_flicker_cdfs(cls):
)

# CDF bin range (e-/sec) for each for in flicker_cdfs.
cdf_bins = []
for ii in range(hdr["n_bin"]):
cdf_bins.append(hdr[f"cdf_bin{ii}"])
cdf_bins = [hdr[f"cdf_bin{ii}"] for ii in range(hdr["n_bin"])]
cls.flicker_cdf_bins = np.array(cdf_bins)

def flicker_init(self, flicker_mean_time=10000, flicker_scale=1.0, seed=None):
Expand Down Expand Up @@ -450,7 +455,10 @@ def flicker_init(self, flicker_mean_time=10000, flicker_scale=1.0, seed=None):
self.flicker_times = t_flicker * phase

def flicker_update(self, dt, use_numba=True):
"""Propagate the image forward by ``dt`` seconds and update any pixels
"""
Do a flicker update.
Propagate the image forward by ``dt`` seconds and update any pixels
that have flickered during that interval.
This has the option to use one of two implementations. The default is
Expand Down Expand Up @@ -499,7 +507,9 @@ def _flicker_update_vectorized(self, dt):
rand_ampls = np.random.uniform(0.0, 1.0, size=len(idxs))
rand_times = np.random.uniform(0.0, 1.0, size=len(idxs))

for idx, rand_time, rand_ampl in zip(idxs, rand_times, rand_ampls):
for idx, rand_time, rand_ampl in zip(
idxs, rand_times, rand_ampls, strict=False
):
# Determine the new value after flickering and set in array view.
# First get the right CDF from the list of CDFs based on the pixel value.
cdf_idx = self.flicker_cdf_idxs[idx]
Expand Down Expand Up @@ -545,6 +555,8 @@ def _flicker_update_numba(
flicker_mean_time,
):
"""
Do a flicker update.
Propagate the image forward by ``dt`` seconds and update any pixels
that have flickered during that interval.
"""
Expand Down Expand Up @@ -613,8 +625,7 @@ def np_interp(yin, xin, xout):

def _prep_6x6(img, bgd=None):
"""
Subtract background and in case of 8x8 image
cut and return the 6x6 inner section.
Subtract background and in case of 8x8 image cut and return the 6x6 inner section.
"""
if isinstance(bgd, np.ndarray):
bgd = bgd.view(np.ndarray)
Expand Down Expand Up @@ -671,9 +682,8 @@ def centroid_fm(img, bgd=None, pix_zero_loc="center", norm_clip=None):
norm = np.sum(img)
if norm_clip is not None:
norm = norm.clip(norm_clip, None)
else:
if norm <= 0:
raise ValueError("non-positive image norm {}".format(norm))
elif norm <= 0:
raise ValueError("non-positive image norm {}".format(norm))

row = np.sum(rw * img) / norm
col = np.sum(cw * img) / norm
Expand All @@ -691,6 +701,8 @@ def centroid_fm(img, bgd=None, pix_zero_loc="center", norm_clip=None):

class AcaPsfLibrary(object):
"""
AcaPsfLibrary class
Access the ACA PSF library, whch is a library of 8x8 images providing the integrated
(pixelated) ACA PSF over a grid of subpixel locations.
Expand Down Expand Up @@ -748,8 +760,7 @@ def get_psf_image(
aca_image=True,
):
"""
Get interpolated ACA PSF image that corresponds to pixel location
``row``, ``col``.
Get interpolated ACA PSF image that corresponds to pixel location ``row``, ``col``.
Parameters
----------
Expand Down
13 changes: 7 additions & 6 deletions chandra_aca/attitude.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Calculate attitude based on star centroid values using a fast linear
least-squares method.
Calculate attitude based on star centroid values using a fast linear least-squares method.
Note this requires Python 3.5+.
Validation:
http://nbviewer.jupyter.org/url/asc.harvard.edu/mta/ASPECT/ipynb/chandra_aca/calc_att_validate.ipynb
"""

from __future__ import division

import numpy as np
Expand Down Expand Up @@ -129,7 +129,7 @@ def calc_roll_pitch_yaw(yag, zag, yag_obs, zag_obs, sigma=None):
zag_obs.shape = 1, n_stars

outs = []
for yo, zo in zip(yag_obs, zag_obs):
for yo, zo in zip(yag_obs, zag_obs, strict=False):
out = _calc_roll_pitch_yaw(yag, zag, yo, zo, sigma=sigma)
outs.append(out)

Expand All @@ -144,8 +144,9 @@ def calc_roll_pitch_yaw(yag, zag, yag_obs, zag_obs, sigma=None):

def _calc_roll_pitch_yaw(yag, zag, yag_obs, zag_obs, sigma=None, iter=1):
"""
Internal version that does the real work of calc_roll_pitch_yaw and
works on only one sample at a time.
Internal version that does the real work of calc_roll_pitch_yaw.
This works on only one sample at a time.
"""
weights = None if (sigma is None) else 1 / np.array(sigma)
yag_avg = np.average(yag, weights=weights)
Expand Down Expand Up @@ -250,7 +251,7 @@ def calc_att(att, yag, zag, yag_obs, zag_obs, sigma=None):

if isinstance(rolls, np.ndarray) and rolls.ndim >= 1:
out = []
for roll, pitch, yaw in zip(rolls, pitches, yaws):
for roll, pitch, yaw in zip(rolls, pitches, yaws, strict=False):
dq = Quat([yaw, -pitch, roll])
out.append(q_att * dq)
else:
Expand Down
8 changes: 7 additions & 1 deletion chandra_aca/centroid_resid.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ def __init__(self, start, stop):

def set_centroids(self, source, slot, alg=8, apply_dt=True):
"""
Assign centroids.
Assign centroids from ``source`` and ``slot`` to the objects centroid attributes
(yag, zag, yag_times, zag_times)
Expand Down Expand Up @@ -275,6 +277,8 @@ def zag_times(self, vals):

def set_offsets(self):
"""
Apply time offsets to centroids.
Apply time offsets to centroids based on type and source of centroid, obsid
(suggesting 8x8 or 6x6 data), telemetry source ('maude' or 'cxc') and aspect solution
source. These time offsets were fit. See fit notebooks at:
Expand All @@ -299,7 +303,7 @@ def set_offsets(self):
)
return
fetch_source = fetch.data_source.sources()[0]
if fetch_source != "cxc" and fetch_source != "maude":
if fetch_source not in ("cxc", "maude"):
warnings.warn(
"Only maude and cxc fetch data sources are supported for offsets. "
"Not applying offsets."
Expand Down Expand Up @@ -340,6 +344,8 @@ def set_offsets(self):

def calc_residuals(self):
"""
Calculate star residuals.
Calculate residuals based on attitude and ra/dec of star. Note that the sampling and times
of yags may be different from zags so these should be done independently.
Expand Down
27 changes: 17 additions & 10 deletions chandra_aca/dark_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,10 @@


def dark_temp_scale(t_ccd, t_ccd_ref=-19.0, scale_4c=None):
"""Return the multiplicative scale factor to convert a CCD dark map
"""
Calculate dark current scale factor.
Return the multiplicative scale factor to convert a CCD dark map
or dark current value from temperature ``t_ccd`` to temperature
``t_ccd_ref``::
Expand Down Expand Up @@ -127,7 +130,9 @@ def get_dark_hist(date, t_ccd):

def smooth_broken_pow(pars, x):
"""
Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma):
Smoothed broken power-law.
Pars are same as bpl1d (NOT + gaussian sigma):
1: gamma1
2: gamma2
3: x_b (break point)
Expand All @@ -146,7 +151,9 @@ def smooth_broken_pow(pars, x):

def smooth_twice_broken_pow(pars, x):
"""
Smoothed broken power-law. Pars are same as bpl1d (NOT + gaussian sigma):
Smoothed twice-broken power-law.
Pars are same as bpl1d (NOT + gaussian sigma):
1: gamma1
2: gamma2
3: gamma3
Expand Down Expand Up @@ -174,6 +181,8 @@ def smooth_twice_broken_pow(pars, x):

def temp_scalefac(T_ccd):
"""
Calculate scale factor.
Return the multiplicative scale factor to convert a CCD dark map from
the nominal -19C temperature to the temperature T. Based on best global fit for
dark current model in plot_predicted_warmpix.py. Previous value was 0.62 instead
Expand Down Expand Up @@ -253,8 +262,7 @@ def get_sbp_pars(dates):

def get_warm_fracs(warm_threshold, date="2013:001:12:00:00", T_ccd=-19.0):
"""
Calculate fraction of pixels in modeled dark current distribution
above warm threshold(s).
Calculate fraction of pixels in modeled dark current distribution above warm threshold(s).
Parameters
----------
Expand All @@ -274,11 +282,11 @@ def get_warm_fracs(warm_threshold, date="2013:001:12:00:00", T_ccd=-19.0):
warm_thresholds, is_scalar = as_array(warm_threshold)

warmpixes = []
for warm_threshold in warm_thresholds:
for warm_thresh in warm_thresholds:
# First get the full bins to right of warm_threshold
ii = np.searchsorted(xbins, warm_threshold)
ii = np.searchsorted(xbins, warm_thresh)
warmpix = np.sum(y[ii:])
lx = np.log(warm_threshold)
lx = np.log(warm_thresh)
lx0 = np.log(xbins[ii - 1])
lx1 = np.log(xbins[ii])
ly0 = np.log(y[ii - 1])
Expand All @@ -298,8 +306,7 @@ def get_warm_fracs(warm_threshold, date="2013:001:12:00:00", T_ccd=-19.0):

def synthetic_dark_image(date, t_ccd_ref=None):
"""
Generate a synthetic dark current image corresponding to the specified
``date`` and ``t_ccd``.
Generate a synthetic dark current image corresponding to the specified ``date`` and ``t_ccd``.
Parameters
----------
Expand Down
Loading

0 comments on commit 10aaa76

Please sign in to comment.