Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Jan 18, 2025
1 parent 820dc7b commit 0d90dbf
Show file tree
Hide file tree
Showing 16 changed files with 28 additions and 36 deletions.
4 changes: 1 addition & 3 deletions rsciio/blockfile/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,9 +450,7 @@ def file_writer(
np.asanyarray(navigator).tofile(f)
# Zero pad until next data block
if f.tell() > int(header["Data_offset_2"][0]):
raise ValueError(
"Signal navigation size does not match " "data dimensions."
)
raise ValueError("Signal navigation size does not match data dimensions.")
zero_pad = int(header["Data_offset_2"][0]) - f.tell()
np.zeros((zero_pad,), np.byte).tofile(f)
file_location = f.tell()
Expand Down
3 changes: 1 addition & 2 deletions rsciio/digitalmicrograph/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,8 +631,7 @@ def size(self):
if self.imdict.ImageData.DataType in (27, 28): # Packed complex
if self.imdict.ImageData.Data.size % 2:
raise IOError(
"ImageData.Data.size should be an even integer for "
"this datatype."
"ImageData.Data.size should be an even integer for this datatype."
)
else:
return int(self.imdict.ImageData.Data.size / 2)
Expand Down
4 changes: 2 additions & 2 deletions rsciio/edax/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -851,7 +851,7 @@ def spd_reader(
# Read the .ipr header (if possible)
if read_ipr:
with open(ipr_fname, "rb") as f:
_logger.debug(" From .spd reader - " "reading .ipr {}".format(ipr_fname))
_logger.debug(" From .spd reader - reading .ipr {}".format(ipr_fname))
ipr_header = __get_ipr_header(f, endianess)
original_metadata["ipr_header"] = sarray2dict(ipr_header)

Expand All @@ -872,7 +872,7 @@ def spd_reader(
# Read the .spc header (if possible)
if read_spc:
with open(spc_fname, "rb") as f:
_logger.debug(" From .spd reader - " "reading .spc {}".format(spc_fname))
_logger.debug(" From .spd reader - reading .spc {}".format(spc_fname))
spc_header = __get_spc_header(f, endianess, load_all_spc)
spc_dict = sarray2dict(spc_header)
original_metadata["spc_header"] = spc_dict
Expand Down
3 changes: 1 addition & 2 deletions rsciio/emd/_emd_ncem.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ def read_file(self, file, lazy=None, dataset_path=None, stack_group=None):
if isinstance(dataset_path, list):
if stack_group:
_logger.warning(
"The argument 'dataset_path' and "
"'stack_group' are not compatible."
"The argument 'dataset_path' and 'stack_group' are not compatible."
)
stack_group = False
dataset_path = dataset_path.copy()
Expand Down
4 changes: 1 addition & 3 deletions rsciio/empad/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,7 @@ def _parse_xml(filename):
}
)
else:
raise IOError(
"Unsupported Empad file: the scan parameters cannot " "be imported."
)
raise IOError("Unsupported Empad file: the scan parameters cannot be imported.")

Check warning on line 79 in rsciio/empad/_api.py

View check run for this annotation

Codecov / codecov/patch

rsciio/empad/_api.py#L79

Added line #L79 was not covered by tests

return om, info

Expand Down
2 changes: 1 addition & 1 deletion rsciio/hspy/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def file_writer(
# will be flushed with using 'w' mode
mode = kwds.get("mode", "w" if write_dataset else "a")
if mode != "a" and not write_dataset:
raise ValueError("`mode='a'` is required to use " "`write_dataset=False`.")
raise ValueError("`mode='a'` is required to use `write_dataset=False`.")
f = h5py.File(filename, mode=mode)

f.attrs["file_format"] = "HyperSpy"
Expand Down
2 changes: 1 addition & 1 deletion rsciio/jeol/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ def _check_divisor(factor, number, string):
if isinstance(downsample, Iterable):
if len(downsample) > 2:
raise ValueError(
"`downsample` can't be an iterable of length " "different from 2."
"`downsample` can't be an iterable of length different from 2."
)
downsample_width = downsample[0]
downsample_height = downsample[1]
Expand Down
6 changes: 3 additions & 3 deletions rsciio/netcdf/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def nc_hyperspy_reader_0dot1(ncfile, filename):
calibration_dict[attrib[0]] = value
else:
_logger.warning(
"Warning: the attribute '%s' is not defined in " "the file '%s'",
"Warning: the attribute '%s' is not defined in the file '%s'",
attrib[0],
filename,
)
Expand All @@ -143,7 +143,7 @@ def nc_hyperspy_reader_0dot1(ncfile, filename):
acquisition_dict[attrib[0]] = value
else:
_logger.warning(
"Warning: the attribute '%s' is not defined in " "the file '%s'",
"Warning: the attribute '%s' is not defined in the file '%s'",
attrib[0],
filename,
)
Expand All @@ -152,7 +152,7 @@ def nc_hyperspy_reader_0dot1(ncfile, filename):
treatments_dict[attrib[0]] = eval("dc." + attrib[1])
else:
_logger.warning(
"Warning: the attribute '%s' is not defined in " "the file '%s'",
"Warning: the attribute '%s' is not defined in the file '%s'",
attrib[0],
filename,
)
Expand Down
2 changes: 1 addition & 1 deletion rsciio/nexus/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -705,7 +705,7 @@ def _check_search_keys(search_keys):
elif search_keys is None:
return search_keys
else:
raise ValueError("search keys must be None, a string, " "or a list of strings")
raise ValueError("search keys must be None, a string, or a list of strings")


def _find_data(group, search_keys=None, hardlinks_only=False, absolute_path=None):
Expand Down
2 changes: 1 addition & 1 deletion rsciio/pantarhei/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def _navigation_first(i):
meta_data[key].append(item_in_numpy_order[new_order[i]])
except Exception as e: # pragma: no cover
raise Exception(
f"Could not load meta data: {key} " f"in hyperspy file: {e}."
f"Could not load meta data: {key} in hyperspy file: {e}."
)
axes = []
for i, (label, calib) in enumerate(zip(data_labels, calibration_ordered)):
Expand Down
6 changes: 2 additions & 4 deletions rsciio/renishaw/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,8 +553,7 @@ def _check_block_exists(self, block_name):
def _check_block_size(name, error_area, expected_size, actual_size):
if expected_size < actual_size:
_logger.warning(
f"Unexpected size of {name} Block."
f"{error_area} may be read incorrectly."
f"Unexpected size of {name} Block.{error_area} may be read incorrectly."
)
elif expected_size > actual_size:
if name == "WXDA_0":
Expand Down Expand Up @@ -1062,8 +1061,7 @@ def _set_nav_via_ORGN(self, orgn_data):
else:
reason = "Non-ordered axis is not supported"
_logger.warning(
f"{reason}, a default axis with scale 1 "
"and offset 0 will be used."
f"{reason}, a default axis with scale 1 and offset 0 will be used."
)
del nav_dict[axis]["units"]
else:
Expand Down
10 changes: 5 additions & 5 deletions rsciio/semper/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ def _check_format(cls, data):
iform = 4 # int32
else:
supported_formats = [np.dtype(i).name for i in cls.IFORM_DICT.values()]
msg = "The SEMPER file format does not support " "{} data type. ".format(
msg = "The SEMPER file format does not support {} data type. ".format(
data.dtype.name
)
msg += "Supported data types are: " + ", ".join(supported_formats)
Expand Down Expand Up @@ -375,9 +375,9 @@ def load_from_unf(cls, filename, lazy=False):
rec_length = np.fromfile(f, dtype="<i4", count=1)[0] # length of header
header = np.fromfile(f, dtype=cls.HEADER_DTYPES[: rec_length // 2], count=1)
metadata.update(sarray2dict(header))
assert (
np.frombuffer(f.read(4), dtype=np.int32)[0] == rec_length
), "Error while reading the header (length is not correct)!"
assert np.frombuffer(f.read(4), dtype=np.int32)[0] == rec_length, (
"Error while reading the header (length is not correct)!"
)
data_format = cls.IFORM_DICT[metadata["IFORM"]]
iversn, remain = divmod(metadata["IFLAG"], 10000)
ilabel, ntitle = divmod(remain, 1000)
Expand All @@ -396,7 +396,7 @@ def load_from_unf(cls, filename, lazy=False):
try:
metadata.update(cls._read_label(f))
except Exception as e:
warning = "Could not read label, trying to proceed " "without it!"
warning = "Could not read label, trying to proceed without it!"

Check warning on line 399 in rsciio/semper/_api.py

View check run for this annotation

Codecov / codecov/patch

rsciio/semper/_api.py#L399

Added line #L399 was not covered by tests
warning += " (Error message: {})".format(str(e))
warnings.warn(warning)
# Read picture data:
Expand Down
2 changes: 1 addition & 1 deletion rsciio/tests/test_ripple.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def test_data(pdict, tmp_path):
"Acquisition_instrument.TEM.Stage.tilt_alpha",
"Acquisition_instrument.TEM.Detector.EDS.azimuth_angle",
"Acquisition_instrument.TEM.Detector.EDS.elevation_angle",
"Acquisition_instrument.TEM.Detector." "EDS.energy_resolution_MnKa",
"Acquisition_instrument.TEM.Detector.EDS.energy_resolution_MnKa",
"Acquisition_instrument.TEM.Detector.EDS.live_time",
)
if metadata:
Expand Down
2 changes: 1 addition & 1 deletion rsciio/tia/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def parse_ExperimentalDescription(et, dictree):
value = float(value) if units else value
except ValueError:
_logger.warning(
f"Expected decimal value for {label}, " f"but received {value} instead"
f"Expected decimal value for {label}, but received {value} instead"
)
dictree[item] = value

Expand Down
8 changes: 4 additions & 4 deletions rsciio/tvips/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def _get_main_header_from_signal(signal, version=2, frame_header_extra_bytes=0):
offsety = round((offsety * _UREG(unit)).to(to_unit).magnitude)
else:
warnings.warn(
"Image scale units could not be converted, " "saving axes scales as is.",
"Image scale units could not be converted, saving axes scales as is.",
UserWarning,
)
metadata = DTBox(signal["metadata"], box_dots=True)
Expand Down Expand Up @@ -291,7 +291,7 @@ def file_reader(
f.seek(0)
# read the main header in file 0
header = np.fromfile(f, dtype=TVIPS_RECORDER_GENERAL_HEADER, count=1)
dtype = np.dtype(f"u{header['bitsperpixel'][0]//8}")
dtype = np.dtype(f"u{header['bitsperpixel'][0] // 8}")
dimx = header["dimx"][0]
dimy = header["dimy"][0]
# the size of the frame header varies with version
Expand Down Expand Up @@ -377,8 +377,8 @@ def file_reader(
final_frame = scan_start_frame + total_scan_frames
if final_frame > max_frame_index:
raise ValueError(
f"Shape {scan_shape} requires image index {final_frame-1} "
f"which is out of bounds. Final frame index: {max_frame_index-1}."
f"Shape {scan_shape} requires image index {final_frame - 1} "
f"which is out of bounds. Final frame index: {max_frame_index - 1}."
)
indices = np.arange(scan_start_frame, final_frame).reshape(scan_shape)

Expand Down
4 changes: 2 additions & 2 deletions rsciio/usid/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def _get_dim_dict(labels, units, val_func, ignore_non_uniform_dims=True):
except ValueError:
# non-uniform dimension! - see notes above
if ignore_non_uniform_dims:
warn("Ignoring non-uniformity of dimension: " "{}".format(dim_name))
warn("Ignoring non-uniformity of dimension: {}".format(dim_name))
step_size = 1
dim_vals[0] = 0
else:
Expand Down Expand Up @@ -267,7 +267,7 @@ def _usidataset_to_signal_dict(h5_main, ignore_non_uniform_dims=True, lazy=False
num_spec_dims = len(spec_dict)
num_pos_dims = len(pos_dict)
_logger.info(
"Dimensions: Positions: {}, Spectroscopic: {}" ".".format(
"Dimensions: Positions: {}, Spectroscopic: {}.".format(
num_pos_dims, num_spec_dims
)
)
Expand Down

0 comments on commit 0d90dbf

Please sign in to comment.