diff --git a/api/assign_chirps/index.html b/api/assign_chirps/index.html index 44dfd86..d64bdea 100644 --- a/api/assign_chirps/index.html +++ b/api/assign_chirps/index.html @@ -55,7 +55,18 @@ - + @@ -1629,8 +1640,7 @@

Parameters

Source code in chirpdetector/assign_chirps.py -
485
-486
+            
486
 487
 488
 489
@@ -1690,68 +1700,69 @@ 

Parameters

543 544 545 -546
def assign_chirps(
-    assign_data: Dict[str, np.ndarray],
-    chirp_df: pd.DataFrame,
-    data: Dataset,
-) -> None:
-    """Assign chirps to wavetracker tracks.
-
-    This function uses the extracted envelope troughs to assign chirps to
-    tracks. It computes a cost function that is high when the trough prominence
-    is high and the distance to the chirp center is low. For each chirp, the
-    track with the highest cost function value is chosen.
-
-    Parameters
-    ----------
-    - `assign_data`: `dict`
-        Dictionary containing the data needed for assignment
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-    - `data`: `gridtools.datasets.Dataset`
-        Dataset object containing the data
-    """
-    # extract data from assign_data
-    peak_prominences = assign_data["proms"]
-    peak_distances = assign_data["peaks"]
-    peak_times = assign_data["ptimes"]
-    chirp_indices = assign_data["cindices"]
-    track_ids = assign_data["track_ids"]
-
-    # compute cost function.
-    # this function is high when the trough prominence is high
-    # (-> chirp with high contrast)
-    # and when the trough is close to the chirp center as detected by the
-    # r-cnn (-> detected chirp is close to the actual chirp)
-    cost = peak_prominences / peak_distances**2
-
-    # set cost to zero for cases where no peak was found
-    cost[np.isnan(cost)] = 0
-
-    # for each chirp, choose the track where the cost is highest
-    # TODO: to avoid confusion make a cost function where high is good and low
-    # is bad. this is more like a "gain function"
-    chosen_tracks = []
-    chosen_track_times = []
-    for idx in np.unique(chirp_indices):
-        candidate_tracks = track_ids[chirp_indices == idx]
-        candidate_costs = cost[chirp_indices == idx]
-        candidate_times = peak_times[chirp_indices == idx]
-        chosen_tracks.append(candidate_tracks[np.argmax(candidate_costs)])
-        chosen_track_times.append(candidate_times[np.argmax(candidate_costs)])
-
-    # store chosen tracks in chirp_df
-    chirp_df["assigned_track"] = chosen_tracks
-
-    # store chirp time estimated from envelope trough in chirp_df
-    chirp_df["envelope_trough_time"] = chosen_track_times
-
-    # save chirp_df
-    chirp_df.to_csv(data.path / "chirpdetector_bboxes.csv", index=False)
-
-    # save old format:
-    np.save(data.path / "chirp_ids_rcnn.npy", chosen_tracks)
-    np.save(data.path / "chirp_times_rcnn.npy", chosen_track_times)
+546
+547
def assign_chirps(
+    assign_data: Dict[str, np.ndarray],
+    chirp_df: pd.DataFrame,
+    data: Dataset,
+) -> None:
+    """Assign chirps to wavetracker tracks.
+
+    This function uses the extracted envelope troughs to assign chirps to
+    tracks. It computes a cost function that is high when the trough prominence
+    is high and the distance to the chirp center is low. For each chirp, the
+    track with the highest cost function value is chosen.
+
+    Parameters
+    ----------
+    - `assign_data`: `dict`
+        Dictionary containing the data needed for assignment
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+    - `data`: `gridtools.datasets.Dataset`
+        Dataset object containing the data
+    """
+    # extract data from assign_data
+    peak_prominences = assign_data["proms"]
+    peak_distances = assign_data["peaks"]
+    peak_times = assign_data["ptimes"]
+    chirp_indices = assign_data["cindices"]
+    track_ids = assign_data["track_ids"]
+
+    # compute cost function.
+    # this function is high when the trough prominence is high
+    # (-> chirp with high contrast)
+    # and when the trough is close to the chirp center as detected by the
+    # r-cnn (-> detected chirp is close to the actual chirp)
+    cost = peak_prominences / peak_distances**2
+
+    # set cost to zero for cases where no peak was found
+    cost[np.isnan(cost)] = 0
+
+    # for each chirp, choose the track where the cost is highest
+    # TODO: to avoid confusion make a cost function where high is good and low
+    # is bad. this is more like a "gain function"
+    chosen_tracks = []
+    chosen_track_times = []
+    for idx in np.unique(chirp_indices):
+        candidate_tracks = track_ids[chirp_indices == idx]
+        candidate_costs = cost[chirp_indices == idx]
+        candidate_times = peak_times[chirp_indices == idx]
+        chosen_tracks.append(candidate_tracks[np.argmax(candidate_costs)])
+        chosen_track_times.append(candidate_times[np.argmax(candidate_costs)])
+
+    # store chosen tracks in chirp_df
+    chirp_df["assigned_track"] = chosen_tracks
+
+    # store chirp time estimated from envelope trough in chirp_df
+    chirp_df["envelope_trough_time"] = chosen_track_times
+
+    # save chirp_df
+    chirp_df.to_csv(data.path / "chirpdetector_bboxes.csv", index=False)
+
+    # save old format:
+    np.save(data.path / "chirp_ids_rcnn.npy", chosen_tracks)
+    np.save(data.path / "chirp_times_rcnn.npy", chosen_track_times)
 
@@ -1782,8 +1793,7 @@

Parameters

Source code in chirpdetector/assign_chirps.py -
549
-550
+            
550
 551
 552
 553
@@ -1824,49 +1834,50 @@ 

Parameters

588 589 590 -591
def assign_cli(path: pathlib.Path) -> None:
-    """Assign chirps to wavetracker tracks.
-
-    this is the command line interface for the assign_chirps function.
-
-    Parameters
-    ----------
-    - `path`: `pathlib.path`
-        path to the directory containing the chirpdetector.toml file
-    """
-    if not path.is_dir():
-        msg = f"{path} is not a directory"
-        raise ValueError(msg)
-
-    if not (path / "chirpdetector.toml").is_file():
-        msg = f"{path} does not contain a chirpdetector.toml file"
-        raise ValueError(msg)
-
-    logger = make_logger(__name__, path / "chirpdetector.log")
-    # config = load_config(path / "chirpdetector.toml")
-    recs = list(path.iterdir())
-    recs = [r for r in recs if r.is_dir()]
-    # recs = [path / "subset_2020-03-18-10_34_t0_9320.0_t1_9920.0"]
-
-    msg = f"found {len(recs)} recordings in {path}, starting assignment"
-    prog.console.log(msg)
-    logger.info(msg)
-
-    prog.console.rule("starting assignment")
-    with prog:
-        task = prog.add_task("assigning chirps", total=len(recs))
-        for rec in recs:
-            msg = f"assigning chirps in {rec}"
-            logger.info(msg)
-            prog.console.log(msg)
-
-            data = load(rec)
-            chirp_df = pd.read_csv(rec / "chirpdetector_bboxes.csv")
-            assign_data, chirp_df, data = extract_assignment_data(
-                data, chirp_df
-            )
-            assign_chirps(assign_data, chirp_df, data)
-            prog.update(task, advance=1)
+591
+592
def assign_cli(path: pathlib.Path) -> None:
+    """Assign chirps to wavetracker tracks.
+
+    this is the command line interface for the assign_chirps function.
+
+    Parameters
+    ----------
+    - `path`: `pathlib.path`
+        path to the directory containing the chirpdetector.toml file
+    """
+    if not path.is_dir():
+        msg = f"{path} is not a directory"
+        raise ValueError(msg)
+
+    if not (path / "chirpdetector.toml").is_file():
+        msg = f"{path} does not contain a chirpdetector.toml file"
+        raise ValueError(msg)
+
+    logger = make_logger(__name__, path / "chirpdetector.log")
+    # config = load_config(path / "chirpdetector.toml")
+    recs = list(path.iterdir())
+    recs = [r for r in recs if r.is_dir()]
+    # recs = [path / "subset_2020-03-18-10_34_t0_9320.0_t1_9920.0"]
+
+    msg = f"found {len(recs)} recordings in {path}, starting assignment"
+    prog.console.log(msg)
+    logger.info(msg)
+
+    prog.console.rule("starting assignment")
+    with prog:
+        task = prog.add_task("assigning chirps", total=len(recs))
+        for rec in recs:
+            msg = f"assigning chirps in {rec}"
+            logger.info(msg)
+            prog.console.log(msg)
+
+            data = load(rec)
+            chirp_df = pd.read_csv(rec / "chirpdetector_bboxes.csv")
+            assign_data, chirp_df, data = extract_assignment_data(
+                data, chirp_df
+            )
+            assign_chirps(assign_data, chirp_df, data)
+            prog.update(task, advance=1)
 
@@ -1901,7 +1912,9 @@

Returns

Source code in chirpdetector/assign_chirps.py -
208
+            
206
+207
+208
 209
 210
 211
@@ -1914,24 +1927,22 @@ 

Returns

218 219 220 -221 -222 -223
def bbox_to_chirptimes(chirp_df: pd.DataFrame) -> pd.DataFrame:
-    """Convert chirp bboxes to chirp times.
-
-    Parameters
-    ----------
-    - `chirp_df`: `pd.dataframe`
-        dataframe containing the chirp bboxes
-
-    Returns
-    -------
-    - `chirp_df`: `pd.dataframe`
-        dataframe containing the chirp bboxes with chirp times.
-    """
-    chirp_df["chirp_times"] = np.mean(chirp_df[["t1", "t2"]], axis=1)
-
-    return chirp_df
+221
def bbox_to_chirptimes(chirp_df: pd.DataFrame) -> pd.DataFrame:
+    """Convert chirp bboxes to chirp times.
+
+    Parameters
+    ----------
+    - `chirp_df`: `pd.dataframe`
+        dataframe containing the chirp bboxes
+
+    Returns
+    -------
+    - `chirp_df`: `pd.dataframe`
+        dataframe containing the chirp bboxes with chirp times.
+    """
+    chirp_df["chirp_times"] = np.mean(chirp_df[["t1", "t2"]], axis=1)
+
+    return chirp_df
 
@@ -1970,7 +1981,9 @@

Returns

Source code in chirpdetector/assign_chirps.py -
150
+            
148
+149
+150
 151
 152
 153
@@ -1997,38 +2010,36 @@ 

Returns

174 175 176 -177 -178 -179
def clean_bboxes(data: Dataset, chirp_df: pd.DataFrame) -> pd.DataFrame:
-    """Clean the chirp bboxes.
-
-    This is a collection of filters that remove bboxes that
-    either overlap, are out of range or otherwise do not make sense.
-
-    Parameters
-    ----------
-    - `data`: `gridtools.datasets.Dataset`
-        Dataset object containing the data
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-
-    Returns
-    -------
-    - `chirp_df_tf`: `pd.dataframe`
-        Dataframe containing the chirp bboxes that overlap with the range
-    """
-    # non-max suppression: remove all chirp bboxes that overlap with
-    # another more than threshold
-    pick_indices = non_max_suppression_fast(chirp_df, 0.5)
-    chirp_df_nms = chirp_df.loc[pick_indices, :]
-
-    # track filter: remove all chirp bboxes that do not overlap with
-    # the range spanned by the min and max of the wavetracker frequency tracks
-    minf = np.min(data.track.freqs).astype(float)
-    maxf = np.max(data.track.freqs).astype(float)
-    # maybe add some more cleaning here, such
-    # as removing chirps that are too short or too long
-    return track_filter(chirp_df_nms, minf, maxf)
+177
def clean_bboxes(data: Dataset, chirp_df: pd.DataFrame) -> pd.DataFrame:
+    """Clean the chirp bboxes.
+
+    This is a collection of filters that remove bboxes that
+    either overlap, are out of range or otherwise do not make sense.
+
+    Parameters
+    ----------
+    - `data`: `gridtools.datasets.Dataset`
+        Dataset object containing the data
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+
+    Returns
+    -------
+    - `chirp_df_tf`: `pd.dataframe`
+        Dataframe containing the chirp bboxes that overlap with the range
+    """
+    # non-max suppression: remove all chirp bboxes that overlap with
+    # another more than threshold
+    pick_indices = non_max_suppression_fast(chirp_df, 0.5)
+    chirp_df_nms = chirp_df.loc[pick_indices, :]
+
+    # track filter: remove all chirp bboxes that do not overlap with
+    # the range spanned by the min and max of the wavetracker frequency tracks
+    minf = np.min(data.track.freqs).astype(float)
+    maxf = np.max(data.track.freqs).astype(float)
+    # maybe add some more cleaning here, such
+    # as removing chirps that are too short or too long
+    return track_filter(chirp_df_nms, minf, maxf)
 
@@ -2067,7 +2078,9 @@

Returns

Source code in chirpdetector/assign_chirps.py -
182
+            
180
+181
+182
 183
 184
 185
@@ -2088,32 +2101,30 @@ 

Returns

200 201 202 -203 -204 -205
def cleanup(chirp_df: pd.DataFrame, data: Dataset) -> pd.DataFrame:
-    """Clean the chirp bboxes.
-
-    This is a collection of filters that remove bboxes that
-    either overlap, are out of range or otherwise do not make sense.
-
-    Parameters
-    ----------
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-    - `data`: `gridtools.datasets.Dataset`
-        Dataset object containing the data
-
-    Returns
-    -------
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes that overlap with the range
-    """
-    # first clean the bboxes
-    chirp_df = clean_bboxes(data, chirp_df)
-    # sort chirps in df by time, i.e. t1
-    chirp_df = chirp_df.sort_values(by="t1", ascending=True)
-    # compute chirp times, i.e. center of the bbox x axis
-    return bbox_to_chirptimes(chirp_df)
+203
def cleanup(chirp_df: pd.DataFrame, data: Dataset) -> pd.DataFrame:
+    """Clean the chirp bboxes.
+
+    This is a collection of filters that remove bboxes that
+    either overlap, are out of range or otherwise do not make sense.
+
+    Parameters
+    ----------
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+    - `data`: `gridtools.datasets.Dataset`
+        Dataset object containing the data
+
+    Returns
+    -------
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes that overlap with the range
+    """
+    # first clean the bboxes
+    chirp_df = clean_bboxes(data, chirp_df)
+    # sort chirps in df by time, i.e. t1
+    chirp_df = chirp_df.sort_values(by="t1", ascending=True)
+    # compute chirp times, i.e. center of the bbox x axis
+    return bbox_to_chirptimes(chirp_df)
 
@@ -2155,7 +2166,9 @@

Paramet
Source code in chirpdetector/assign_chirps.py -
365
+            
363
+364
+365
 366
 367
 368
@@ -2272,124 +2285,128 @@ 

Paramet 479 480 481 -482

def extract_assignment_data(
-    data: Dataset, chirp_df: pd.DataFrame
-) -> Tuple[Dict[str, np.ndarray], pd.DataFrame, Dataset]:
-    """Get envelope troughs to determine chirp assignment.
-
-    This algorigthm assigns chirps to wavetracker tracks by a series of steps:
-    1. clean the chirp bboxes
-    2. for each fish track, filter the signal on the best electrode
-    3. find troughs in the envelope of the filtered signal
-    4. compute the prominence of the trough and the distance to the chirp
-    center
-    5. compute a cost function that is high when the trough prominence is high
-    and the distance to the chirp center is low
-    6. compare the value of the cost function for each track and choose the
-    track with the highest cost function value
-
-    Parameters
-    ----------
-    - `data`: `dataset`
-        Dataset object containing the data
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-    """
-    # clean the chirp bboxes
-    chirp_df = cleanup(chirp_df, data)
-
-    # now loop over all tracks and assign chirps to tracks
-    chirp_indices = []  # index of chirp in chirp_df
-    track_ids = []  # id of track / fish
-    peak_prominences = []  # prominence of trough in envelope
-    peak_distances = []  # distance of trough to chirp center
-    peak_times = []  # time of trough in envelope, should be close to chirp
-
-    for fish_id in data.track.ids:
-        # get chirps, times and freqs and powers for this track
-        chirps = np.array(chirp_df.chirp_times.values)
-        time = data.track.times[
-            data.track.indices[data.track.idents == fish_id]
-        ]
-        freq = data.track.freqs[data.track.idents == fish_id]
-        powers = data.track.powers[data.track.idents == fish_id, :]
-
-        for idx, chirp in enumerate(chirps):
-            # find the closest time, freq and power to the chirp time
-            closest_idx = np.argmin(np.abs(time - chirp))
-            best_electrode = np.argmax(powers[closest_idx, :]).astype(int)
-            second_best_electrode = np.argsort(powers[closest_idx, :])[-2]
-            best_freq = freq[closest_idx]
-
-            # check if chirp overlaps with track
-            f1 = chirp_df.f1.to_numpy()[idx]
-            f2 = chirp_df.f2.to_numpy()[idx]
-            f2 = f1 + (f2 - f1) * 0.5 # range is the lower half of the bbox
-            if (f1 > best_freq) or (f2 < best_freq):
-                peak_distances.append(np.nan)
-                peak_prominences.append(np.nan)
-                peak_times.append(np.nan)
-                chirp_indices.append(idx)
-                track_ids.append(fish_id)
-                continue
-
-            # determine start and stop index of time window on raw data
-            # using bounding box start and stop times of chirp detection
-            start_idx, stop_idx, center_idx = make_indices(
-                chirp_df, data, idx, chirp
-            )
-
-            indices = (start_idx, stop_idx, center_idx)
-            peaks, proms = extract_envelope_trough(
-                data,
-                best_electrode,
-                second_best_electrode,
-                best_freq,
-                indices,
-            )
-
-            # if no peaks are found, skip this chirp
-            if len(peaks) == 0:
-                peak_distances.append(np.nan)
-                peak_prominences.append(np.nan)
-                peak_times.append(np.nan)
-                chirp_indices.append(idx)
-                track_ids.append(fish_id)
-                continue
-
-            # compute index to closest peak to chirp center
-            distances = np.abs(peaks - (center_idx - start_idx))
-            closest_peak_idx = np.argmin(distances)
-
-            # store peak prominence and distance to chirp center
-            peak_distances.append(distances[closest_peak_idx])
-            peak_prominences.append(proms[closest_peak_idx])
-            peak_times.append(
-                (start_idx + peaks[closest_peak_idx]) / data.grid.samplerate,
-            )
-            chirp_indices.append(idx)
-            track_ids.append(fish_id)
-
-    peak_prominences = np.array(peak_prominences)
-    peak_distances = (
-        np.array(peak_distances) + 1
-    )  # add 1 to avoid division by zero
-    peak_times = np.array(peak_times)
-    chirp_indices = np.array(chirp_indices)
-    track_ids = np.array(track_ids)
-
-    assignment_data = {
-        "proms": peak_prominences,
-        "peaks": peak_distances,
-        "ptimes": peak_times,
-        "cindices": chirp_indices,
-        "track_ids": track_ids,
-    }
-    return (
-        assignment_data,
-        chirp_df,
-        data,
-    )
+482
+483
def extract_assignment_data(
+    data: Dataset, chirp_df: pd.DataFrame
+) -> Tuple[Dict[str, np.ndarray], pd.DataFrame, Dataset]:
+    """Get envelope troughs to determine chirp assignment.
+
+    This algorigthm assigns chirps to wavetracker tracks by a series of steps:
+    1. clean the chirp bboxes
+    2. for each fish track, filter the signal on the best electrode
+    3. find troughs in the envelope of the filtered signal
+    4. compute the prominence of the trough and the distance to the chirp
+    center
+    5. compute a cost function that is high when the trough prominence is high
+    and the distance to the chirp center is low
+    6. compare the value of the cost function for each track and choose the
+    track with the highest cost function value
+
+    Parameters
+    ----------
+    - `data`: `dataset`
+        Dataset object containing the data
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+    """
+    # clean the chirp bboxes
+    chirp_df = cleanup(chirp_df, data)
+
+    # now loop over all tracks and assign chirps to tracks
+    chirp_indices = []  # index of chirp in chirp_df
+    track_ids = []  # id of track / fish
+    peak_prominences = []  # prominence of trough in envelope
+    peak_distances = []  # distance of trough to chirp center
+    peak_times = []  # time of trough in envelope, should be close to chirp
+
+    for fish_id in data.track.ids:
+        # get chirps, times and freqs and powers for this track
+        chirps = np.array(chirp_df.chirp_times.values)
+        time = data.track.times[
+            data.track.indices[data.track.idents == fish_id]
+        ]
+        freq = data.track.freqs[data.track.idents == fish_id]
+        powers = data.track.powers[data.track.idents == fish_id, :]
+
+        if len(time) == 0:
+            continue # skip if no track is found
+
+        for idx, chirp in enumerate(chirps):
+            # find the closest time, freq and power to the chirp time
+            closest_idx = np.argmin(np.abs(time - chirp))
+            best_electrode = np.argmax(powers[closest_idx, :]).astype(int)
+            second_best_electrode = np.argsort(powers[closest_idx, :])[-2]
+            best_freq = freq[closest_idx]
+
+            # check if chirp overlaps with track
+            f1 = chirp_df.f1.to_numpy()[idx]
+            f2 = chirp_df.f2.to_numpy()[idx]
+            f2 = f1 + (f2 - f1) * 0.5 # range is the lower half of the bbox
+            if (f1 > best_freq) or (f2 < best_freq):
+                peak_distances.append(np.nan)
+                peak_prominences.append(np.nan)
+                peak_times.append(np.nan)
+                chirp_indices.append(idx)
+                track_ids.append(fish_id)
+                continue
+
+            # determine start and stop index of time window on raw data
+            # using bounding box start and stop times of chirp detection
+            start_idx, stop_idx, center_idx = make_indices(
+                chirp_df, data, idx, chirp
+            )
+
+            indices = (start_idx, stop_idx, center_idx)
+            peaks, proms = extract_envelope_trough(
+                data,
+                best_electrode,
+                second_best_electrode,
+                best_freq,
+                indices,
+            )
+
+            # if no peaks are found, skip this chirp
+            if len(peaks) == 0:
+                peak_distances.append(np.nan)
+                peak_prominences.append(np.nan)
+                peak_times.append(np.nan)
+                chirp_indices.append(idx)
+                track_ids.append(fish_id)
+                continue
+
+            # compute index to closest peak to chirp center
+            distances = np.abs(peaks - (center_idx - start_idx))
+            closest_peak_idx = np.argmin(distances)
+
+            # store peak prominence and distance to chirp center
+            peak_distances.append(distances[closest_peak_idx])
+            peak_prominences.append(proms[closest_peak_idx])
+            peak_times.append(
+                (start_idx + peaks[closest_peak_idx]) / data.grid.samplerate,
+            )
+            chirp_indices.append(idx)
+            track_ids.append(fish_id)
+
+    peak_prominences = np.array(peak_prominences)
+    peak_distances = (
+        np.array(peak_distances) + 1
+    )  # add 1 to avoid division by zero
+    peak_times = np.array(peak_times)
+    chirp_indices = np.array(chirp_indices)
+    track_ids = np.array(track_ids)
+
+    assignment_data = {
+        "proms": peak_prominences,
+        "peaks": peak_distances,
+        "ptimes": peak_times,
+        "cindices": chirp_indices,
+        "track_ids": track_ids,
+    }
+    return (
+        assignment_data,
+        chirp_df,
+        data,
+    )
 
@@ -2437,7 +2454,9 @@

Returns Source code in chirpdetector/assign_chirps.py -
298
+            
296
+297
+298
 299
 300
 301
@@ -2499,73 +2518,71 @@ 

Returns357 358 359 -360 -361 -362

def extract_envelope_trough(
-    data: Dataset,
-    best_electrode: int,
-    second_best_electrode: int,
-    best_freq: float,
-    indices: Tuple[int, int, int],
-) -> Tuple[np.ndarray, np.ndarray]:
-    """Extract envelope troughs.
-
-    Extracts a snippet from the raw data around the chirp time and computes
-    the envelope of the bandpass filtered signal. Then finds the troughs in
-    the envelope and computes their prominences.
-
-    Parameters
-    ----------
-    - `data`: `gridtools.datasets.Dataset`
-        Dataset object containing the data
-    - `best_electrode`: `int`
-        Index of the best electrode
-    - `second_best_electrode`: `int`
-        Index of the second best electrode
-    - `best_freq`: `float`
-        Frequency of the chirp
-    - `indices`: `Tuple[int, int, int]`
-        Tuple containing the start, center, stop indices of the chirp
-
-    Returns
-    -------
-    - `peaks`: `np.ndarray`
-        Indices of the envelope troughs
-    - `proms`: `np.ndarray`
-        Prominences of the envelope troughs
-    """
-    start_idx, stop_idx, _= indices
-
-    # determine bandpass cutoffs above and below baseline frequency
-    lower_f = best_freq - 15
-    upper_f = best_freq + 15
-
-    # get the raw signal on the 2 best electrodes and make differential
-    raw1 = data.grid.rec[start_idx:stop_idx, best_electrode]
-    raw2 = data.grid.rec[start_idx:stop_idx, second_best_electrode]
-    raw = raw1 - raw2
-
-    # bandpass filter the raw signal
-    raw_filtered = bandpass_filter(
-        raw,
-        data.grid.samplerate,
-        lower_f,
-        upper_f,
-    )
-
-    # compute the envelope of the filtered signal
-    env = envelope(
-        signal=raw_filtered,
-        samplerate=data.grid.samplerate,
-        cutoff_frequency=50,
-    )
-    peaks, proms = get_env_trough(env, raw_filtered)
-    # mpl.use("TkAgg")
-    # plt.plot(env)
-    # plt.plot(raw_filtered)
-    # plt.plot(peaks, env[peaks], "x")
-    # plt.show()
-    return peaks, proms
+360
def extract_envelope_trough(
+    data: Dataset,
+    best_electrode: int,
+    second_best_electrode: int,
+    best_freq: float,
+    indices: Tuple[int, int, int],
+) -> Tuple[np.ndarray, np.ndarray]:
+    """Extract envelope troughs.
+
+    Extracts a snippet from the raw data around the chirp time and computes
+    the envelope of the bandpass filtered signal. Then finds the troughs in
+    the envelope and computes their prominences.
+
+    Parameters
+    ----------
+    - `data`: `gridtools.datasets.Dataset`
+        Dataset object containing the data
+    - `best_electrode`: `int`
+        Index of the best electrode
+    - `second_best_electrode`: `int`
+        Index of the second best electrode
+    - `best_freq`: `float`
+        Frequency of the chirp
+    - `indices`: `Tuple[int, int, int]`
+        Tuple containing the start, center, stop indices of the chirp
+
+    Returns
+    -------
+    - `peaks`: `np.ndarray`
+        Indices of the envelope troughs
+    - `proms`: `np.ndarray`
+        Prominences of the envelope troughs
+    """
+    start_idx, stop_idx, _= indices
+
+    # determine bandpass cutoffs above and below baseline frequency
+    lower_f = best_freq - 15
+    upper_f = best_freq + 15
+
+    # get the raw signal on the 2 best electrodes and make differential
+    raw1 = data.grid.rec[start_idx:stop_idx, best_electrode]
+    raw2 = data.grid.rec[start_idx:stop_idx, second_best_electrode]
+    raw = raw1 - raw2
+
+    # bandpass filter the raw signal
+    raw_filtered = bandpass_filter(
+        raw,
+        data.grid.samplerate,
+        lower_f,
+        upper_f,
+    )
+
+    # compute the envelope of the filtered signal
+    env = envelope(
+        signal=raw_filtered,
+        samplerate=data.grid.samplerate,
+        cutoff_frequency=50,
+    )
+    peaks, proms = get_env_trough(env, raw_filtered)
+    # mpl.use("TkAgg")
+    # plt.plot(env)
+    # plt.plot(raw_filtered)
+    # plt.plot(peaks, env[peaks], "x")
+    # plt.show()
+    return peaks, proms
 
@@ -2604,7 +2621,9 @@

Returns

Source code in chirpdetector/assign_chirps.py -
264
+            
262
+263
+264
 265
 266
 267
@@ -2633,40 +2652,38 @@ 

Returns

290 291 292 -293 -294 -295
def get_env_trough(
-    env: np.ndarray,
-    raw: np.ndarray,
-) -> Tuple[np.ndarray, np.ndarray]:
-    """Get the envelope troughs and their prominences.
-
-    Parameters
-    ----------
-    - `env`: `np.ndarray`
-        Envelope of the filtered signal
-    - `raw`: `np.ndarray`
-        Raw signal
-
-    Returns
-    -------
-    - `peaks`: `np.ndarray`
-        Indices of the envelope troughs
-    - `proms`: `np.ndarray`
-        Prominences of the envelope troughs
-    """
-    # normalize the envelope using the amplitude of the raw signal
-    # to preserve the amplitude of the envelope
-    env = env / np.max(np.abs(raw))
-
-    # cut of the first and last 20% of the envelope
-    env[: int(0.25 * len(env))] = np.nan
-    env[int(0.75 * len(env)) :] = np.nan
-
-    # find troughs in the envelope and compute trough prominences
-    peaks, params = find_peaks(-env, prominence=1e-3)
-    proms = params["prominences"]
-    return peaks, proms
+293
def get_env_trough(
+    env: np.ndarray,
+    raw: np.ndarray,
+) -> Tuple[np.ndarray, np.ndarray]:
+    """Get the envelope troughs and their prominences.
+
+    Parameters
+    ----------
+    - `env`: `np.ndarray`
+        Envelope of the filtered signal
+    - `raw`: `np.ndarray`
+        Raw signal
+
+    Returns
+    -------
+    - `peaks`: `np.ndarray`
+        Indices of the envelope troughs
+    - `proms`: `np.ndarray`
+        Prominences of the envelope troughs
+    """
+    # normalize the envelope using the amplitude of the raw signal
+    # to preserve the amplitude of the envelope
+    env = env / np.max(np.abs(raw))
+
+    # cut of the first and last 20% of the envelope
+    env[: int(0.25 * len(env))] = np.nan
+    env[int(0.75 * len(env)) :] = np.nan
+
+    # find troughs in the envelope and compute trough prominences
+    peaks, params = find_peaks(-env, prominence=1e-3)
+    proms = params["prominences"]
+    return peaks, proms
 
@@ -2711,7 +2728,9 @@

Returns

Source code in chirpdetector/assign_chirps.py -
226
+            
224
+225
+226
 227
 228
 229
@@ -2744,44 +2763,42 @@ 

Returns

256 257 258 -259 -260 -261
def make_indices(
-    chirp_df: pd.DataFrame, data: Dataset, idx: int, chirp: float
-) -> Tuple[int, int, int]:
-    """Make indices for the chirp window.
-
-    Parameters
-    ----------
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-    - `data`: `gridtools.datasets.Dataset`
-        Dataset object containing the data
-    - `idx`: `int`
-        Index of the chirp in the chirp_df
-    - `chirp`: `float`
-        Chirp time
-
-    Returns
-    -------
-    - `start_idx`: `int`
-        Start index of the chirp window
-    - `stop_idx`: `int`
-        Stop index of the chirp window
-    - `center_idx`: `int`
-        Center index of the chirp window
-    """
-    # determine start and stop index of time window on raw data
-    # using bounding box start and stop times of chirp detection
-    diffr = chirp_df.t2.to_numpy()[idx] - chirp_df.t1.to_numpy()[idx]
-    t1 = chirp_df.t1.to_numpy()[idx] - 0.5 * diffr
-    t2 = chirp_df.t2.to_numpy()[idx] + 0.5 * diffr
-
-    start_idx = int(np.round(t1 * data.grid.samplerate))
-    stop_idx = int(np.round(t2 * data.grid.samplerate))
-    center_idx = int(np.round(chirp * data.grid.samplerate))
-
-    return start_idx, stop_idx, center_idx
+259
def make_indices(
+    chirp_df: pd.DataFrame, data: Dataset, idx: int, chirp: float
+) -> Tuple[int, int, int]:
+    """Make indices for the chirp window.
+
+    Parameters
+    ----------
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+    - `data`: `gridtools.datasets.Dataset`
+        Dataset object containing the data
+    - `idx`: `int`
+        Index of the chirp in the chirp_df
+    - `chirp`: `float`
+        Chirp time
+
+    Returns
+    -------
+    - `start_idx`: `int`
+        Start index of the chirp window
+    - `stop_idx`: `int`
+        Stop index of the chirp window
+    - `center_idx`: `int`
+        Center index of the chirp window
+    """
+    # determine start and stop index of time window on raw data
+    # using bounding box start and stop times of chirp detection
+    diffr = chirp_df.t2.to_numpy()[idx] - chirp_df.t1.to_numpy()[idx]
+    t1 = chirp_df.t1.to_numpy()[idx] - 0.5 * diffr
+    t2 = chirp_df.t2.to_numpy()[idx] + 0.5 * diffr
+
+    start_idx = int(np.round(t1 * data.grid.samplerate))
+    stop_idx = int(np.round(t2 * data.grid.samplerate))
+    center_idx = int(np.round(chirp * data.grid.samplerate))
+
+    return start_idx, stop_idx, center_idx
 
@@ -2819,7 +2836,9 @@

Returns Source code in chirpdetector/assign_chirps.py -
 30
+            
 28
+ 29
+ 30
  31
  32
  33
@@ -2891,83 +2910,81 @@ 

Returns 99 100 101 -102 -103 -104

def non_max_suppression_fast(
-    chirp_df: pd.DataFrame,
-    overlapthresh: float,
-) -> list:
-    """Raster implementation of non-maximum suppression.
+102
def non_max_suppression_fast(
+    chirp_df: pd.DataFrame,
+    overlapthresh: float,
+) -> list:
+    """Raster implementation of non-maximum suppression.
+
+    To remove overlapping bounding boxes.
 
-    To remove overlapping bounding boxes.
-
-    Parameters
-    ----------
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-    - `overlapthresh`: `float`
-        Threshold for overlap between bboxes
-
-    Returns
-    -------
-    - `pick`: `list`
-        List of indices of bboxes to keep
-    """
-    # slightly modified version of
-    # https://pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
-
-    # convert boxes to list of tuples and then to numpy array
-    boxes = chirp_df[["t1", "f1", "t2", "f2"]].to_numpy()
-
-    # if there are no boxes, return an empty list
-    if len(boxes) == 0:
-        return []
-
-    # initialize the list of picked indexes
-    pick = []
-
-    # grab the coordinates of the bounding boxes
-    x1 = boxes[:, 0]
-    y1 = boxes[:, 1]
-    x2 = boxes[:, 2]
-    y2 = boxes[:, 3]
-
-    # compute the area of the bounding boxes and sort the bounding
-    # boxes by the bottom-right y-coordinate of the bounding box
-    area = (x2 - x1) * (y2 - y1)
-    idxs = np.argsort(y2)
-
-    # keep looping while some indexes still remain in the indexes
-    # list
-    while len(idxs) > 0:
-        # grab the last index in the indexes list and add the
-        # index value to the list of picked indexes
-        last = len(idxs) - 1
-        i = idxs[last]
-        pick.append(i)
-
-        # find the largest (x, y) coordinates for the start of
-        # the bounding box and the smallest (x, y) coordinates
-        # for the end of the bounding box
-        xx1 = np.maximum(x1[i], x1[idxs[:last]])
-        yy1 = np.maximum(y1[i], y1[idxs[:last]])
-        xx2 = np.minimum(x2[i], x2[idxs[:last]])
-        yy2 = np.minimum(y2[i], y2[idxs[:last]])
-
-        # compute the width and height of the bounding box
-        w = np.maximum(0, xx2 - xx1)
-        h = np.maximum(0, yy2 - yy1)
-
-        # compute the ratio of overlap (intersection over union)
-        overlap = (w * h) / area[idxs[:last]]
-
-        # delete all indexes from the index list that have
-        idxs = np.delete(
-            idxs,
-            np.concatenate(([last], np.where(overlap > overlapthresh)[0])),
-        )
-        # return the indicies of the picked boxes
-    return pick
+    Parameters
+    ----------
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+    - `overlapthresh`: `float`
+        Threshold for overlap between bboxes
+
+    Returns
+    -------
+    - `pick`: `list`
+        List of indices of bboxes to keep
+    """
+    # slightly modified version of
+    # https://pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
+
+    # convert boxes to list of tuples and then to numpy array
+    boxes = chirp_df[["t1", "f1", "t2", "f2"]].to_numpy()
+
+    # if there are no boxes, return an empty list
+    if len(boxes) == 0:
+        return []
+
+    # initialize the list of picked indexes
+    pick = []
+
+    # grab the coordinates of the bounding boxes
+    x1 = boxes[:, 0]
+    y1 = boxes[:, 1]
+    x2 = boxes[:, 2]
+    y2 = boxes[:, 3]
+
+    # compute the area of the bounding boxes and sort the bounding
+    # boxes by the bottom-right y-coordinate of the bounding box
+    area = (x2 - x1) * (y2 - y1)
+    idxs = np.argsort(y2)
+
+    # keep looping while some indexes still remain in the indexes
+    # list
+    while len(idxs) > 0:
+        # grab the last index in the indexes list and add the
+        # index value to the list of picked indexes
+        last = len(idxs) - 1
+        i = idxs[last]
+        pick.append(i)
+
+        # find the largest (x, y) coordinates for the start of
+        # the bounding box and the smallest (x, y) coordinates
+        # for the end of the bounding box
+        xx1 = np.maximum(x1[i], x1[idxs[:last]])
+        yy1 = np.maximum(y1[i], y1[idxs[:last]])
+        xx2 = np.minimum(x2[i], x2[idxs[:last]])
+        yy2 = np.minimum(y2[i], y2[idxs[:last]])
+
+        # compute the width and height of the bounding box
+        w = np.maximum(0, xx2 - xx1)
+        h = np.maximum(0, yy2 - yy1)
+
+        # compute the ratio of overlap (intersection over union)
+        overlap = (w * h) / area[idxs[:last]]
+
+        # delete all indexes from the index list that have
+        idxs = np.delete(
+            idxs,
+            np.concatenate(([last], np.where(overlap > overlapthresh)[0])),
+        )
+        # return the indicies of the picked boxes
+    return pick
 
@@ -3006,7 +3023,9 @@

Returns

Source code in chirpdetector/assign_chirps.py -
107
+            
105
+106
+107
 108
 109
 110
@@ -3044,49 +3063,47 @@ 

Returns

142 143 144 -145 -146 -147
def track_filter(
-    chirp_df: pd.DataFrame,
-    minf: float,
-    maxf: float,
-) -> pd.DataFrame:
-    """Remove chirp bboxes that do not overlap with tracks.
-
-    Parameters
-    ----------
-    - `chirp_df`: `pd.dataframe`
-        Dataframe containing the chirp bboxes
-    - `minf`: `float`
-        Minimum frequency of the range
-    - `maxf`: `float`
-        Maximum frequency of the range
-
-    Returns
-    -------
-    - `chirp_df_tf`: `pd.dataframe`
-        Dataframe containing the chirp bboxes that overlap with the range
-    """
-    # remove all chirp bboxes that have no overlap with the range spanned by
-    # minf and maxf
-
-    # first build a box that spans the entire range
-    range_box = np.array([0, minf, np.max(chirp_df.t2), maxf])
-
-    # now compute the intersection between the range box and each chirp bboxes
-    # and keep only those that have an intersection area > 0
-    chirp_df_tf = chirp_df.copy()
-    intersection = chirp_df_tf.apply(
-        lambda row: (
-            max(0, min(row["t2"], range_box[2]) - max(row["t1"], range_box[0]))
-            * max(
-                0,
-                min(row["f2"], range_box[3]) - max(row["f1"], range_box[1]),
-            )
-        ),
-        axis=1,
-    )
-    return chirp_df_tf.loc[intersection > 0, :]
+145
def track_filter(
+    chirp_df: pd.DataFrame,
+    minf: float,
+    maxf: float,
+) -> pd.DataFrame:
+    """Remove chirp bboxes that do not overlap with tracks.
+
+    Parameters
+    ----------
+    - `chirp_df`: `pd.dataframe`
+        Dataframe containing the chirp bboxes
+    - `minf`: `float`
+        Minimum frequency of the range
+    - `maxf`: `float`
+        Maximum frequency of the range
+
+    Returns
+    -------
+    - `chirp_df_tf`: `pd.dataframe`
+        Dataframe containing the chirp bboxes that overlap with the range
+    """
+    # remove all chirp bboxes that have no overlap with the range spanned by
+    # minf and maxf
+
+    # first build a box that spans the entire range
+    range_box = np.array([0, minf, np.max(chirp_df.t2), maxf])
+
+    # now compute the intersection between the range box and each chirp bboxes
+    # and keep only those that have an intersection area > 0
+    chirp_df_tf = chirp_df.copy()
+    intersection = chirp_df_tf.apply(
+        lambda row: (
+            max(0, min(row["t2"], range_box[2]) - max(row["t1"], range_box[0]))
+            * max(
+                0,
+                min(row["f2"], range_box[3]) - max(row["f1"], range_box[1]),
+            )
+        ),
+        axis=1,
+    )
+    return chirp_df_tf.loc[intersection > 0, :]
 
@@ -3187,5 +3204,5 @@

Returns

- + \ No newline at end of file diff --git a/api/convert_data/index.html b/api/convert_data/index.html index 606937f..01969cb 100644 --- a/api/convert_data/index.html +++ b/api/convert_data/index.html @@ -55,7 +55,18 @@ - + @@ -2815,5 +2826,5 @@

Returns

- + \ No newline at end of file diff --git a/api/dataset_utils/index.html b/api/dataset_utils/index.html index 25cdee9..fc3a943 100644 --- a/api/dataset_utils/index.html +++ b/api/dataset_utils/index.html @@ -53,7 +53,18 @@ - + @@ -1920,5 +1931,5 @@

Returns

- + \ No newline at end of file diff --git a/api/detect_chirps/index.html b/api/detect_chirps/index.html index 8addc91..30de3f3 100644 --- a/api/detect_chirps/index.html +++ b/api/detect_chirps/index.html @@ -55,7 +55,18 @@ - + @@ -1819,7 +1830,7 @@

Returns

if idx2 > data.grid.rec.shape[0]: idx2 = data.grid.rec.shape[0] - 1 - # This bit should alleviate the edge effects of the tracks + # This bit should alleviate the edge effects of the tracks # by limiting the start and stop times of the spectrogram # to the start and stop times of the track. start_t = idx1 / data.grid.samplerate @@ -2733,5 +2744,5 @@

Returns

- + \ No newline at end of file diff --git a/api/index.html b/api/index.html deleted file mode 100644 index cb99cba..0000000 --- a/api/index.html +++ /dev/null @@ -1,1010 +0,0 @@ - - - - - - - - - - - - - - - - - - - Api - chirpdetector - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
-
- -
- - - - -
- - -
- -
- - - - - - - - - -
-
- - - -
-
-
- - - - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - -

Api

- - - - - - - - -
-
- - -
- - - -
- -
- - - - -
- -
-
-
-
- -
- - - - - - - - - - \ No newline at end of file diff --git a/api/plot_detections/index.html b/api/plot_detections/index.html index 34d28fe..6cd54dc 100644 --- a/api/plot_detections/index.html +++ b/api/plot_detections/index.html @@ -55,7 +55,18 @@ - + @@ -1267,11 +1278,7 @@

Parameter
Source code in chirpdetector/plot_detections.py -
316
-317
-318
-319
-320
+            
320
 321
 322
 323
@@ -1281,21 +1288,25 @@ 

Parameter 327 328 329 -330

def clean_all_plots_cli(path: pathlib.Path) -> None:
-    """Remove all plots from the chirpdetections folder.
-
-    Parameters
-    ----------
-    path : pathlib.Path
-        Path to the config file.
-    """
-    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]
-    with prog:
-        task = prog.add_task("Cleaning plots...", total=len(dirs))
-        for dataset in dirs:
-            prog.console.log(f"Cleaning plots for {dataset.name}")
-            clean_plots_cli(dataset)
-            prog.advance(task)
+330
+331
+332
+333
+334
def clean_all_plots_cli(path: pathlib.Path) -> None:
+    """Remove all plots from the chirpdetections folder.
+
+    Parameters
+    ----------
+    path : pathlib.Path
+        Path to the config file.
+    """
+    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]
+    with prog:
+        task = prog.add_task("Cleaning plots...", total=len(dirs))
+        for dataset in dirs:
+            prog.console.log(f"Cleaning plots for {dataset.name}")
+            clean_plots_cli(dataset)
+            prog.advance(task)
 
@@ -1323,27 +1334,27 @@

Parameters Source code in chirpdetector/plot_detections.py -
271
-272
-273
-274
-275
+            
def clean_plots_cli(path: pathlib.Path) -> None:
-    """Remove all plots from the chirpdetections folder.
-
-    Parameters
-    ----------
-    path : pathlib.Path
-        Path to the config file.
-    """
-    savepath = path / "chirpdetections"
-    for f in savepath.iterdir():
-        f.unlink()
+281
+282
+283
+284
+285
def clean_plots_cli(path: pathlib.Path) -> None:
+    """Remove all plots from the chirpdetections folder.
+
+    Parameters
+    ----------
+    path : pathlib.Path
+        Path to the config file.
+    """
+    savepath = path / "chirpdetections"
+    for f in savepath.iterdir():
+        f.unlink()
 
@@ -1371,11 +1382,7 @@

Param
Source code in chirpdetector/plot_detections.py -
296
-297
-298
-299
-300
+            
300
 301
 302
 303
@@ -1389,25 +1396,29 @@ 

Param 311 312 313 -314

def plot_all_detections_cli(path: pathlib.Path) -> None:
-    """Plot detections on images.
-
-    Parameters
-    ----------
-    path : pathlib.Path
-        Path to the config file.
-    """
-    conf = load_config(path / "chirpdetector.toml")
-
-    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]
-    with prog:
-        task = prog.add_task("Plotting detections...", total=len(dirs))
-        for dataset in dirs:
-            prog.console.log(f"Plotting detections for {dataset.name}")
-            data = load(dataset)
-            chirp_df = pd.read_csv(dataset / "chirpdetector_bboxes.csv")
-            plot_detections(data, chirp_df, conf)
-            prog.advance(task)
+314
+315
+316
+317
+318
def plot_all_detections_cli(path: pathlib.Path) -> None:
+    """Plot detections on images.
+
+    Parameters
+    ----------
+    path : pathlib.Path
+        Path to the config file.
+    """
+    conf = load_config(path / "chirpdetector.toml")
+
+    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]
+    with prog:
+        task = prog.add_task("Plotting detections...", total=len(dirs))
+        for dataset in dirs:
+            prog.console.log(f"Plotting detections for {dataset.name}")
+            data = load(dataset)
+            chirp_df = pd.read_csv(dataset / "chirpdetector_bboxes.csv")
+            plot_detections(data, chirp_df, conf)
+            prog.advance(task)
 
@@ -1439,11 +1450,7 @@

Parameters Source code in chirpdetector/plot_detections.py -
 39
- 40
- 41
- 42
- 43
+            
 43
  44
  45
  46
@@ -1669,237 +1676,241 @@ 

Parameters266 267 268 -269

def plot_detections(
-    data: Dataset,
-    chirp_df: pd.DataFrame,
-    conf: Config,
-) -> None:
-    """Plot detections on spectrograms.
-
-    Parameters
-    ----------
-    data : Dataset
-        The dataset.
-    chirp_df : pd.DataFrame
-        The dataframe containing the chirp detections.
-    conf : Config
-        The config file.
-    """
-    time_window = 15
-    n_electrodes = data.grid.rec.shape[1]
-
-    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples
-    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples
-    chunksize = time_window * data.grid.samplerate  # samples
-    nchunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)
-    window_overlap_samples = int(conf.spec.spec_overlap * data.grid.samplerate)
-
-    for chunk_no in range(nchunks):
-        # get start and stop indices for the current chunk
-        # including some overlap to compensate for edge effects
-        # this diffrers for the first and last chunk
-
-        if chunk_no == 0:
-            idx1 = int(chunk_no * chunksize)
-            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)
-        elif chunk_no == nchunks - 1:
-            idx1 = int(chunk_no * chunksize - window_overlap_samples)
-            idx2 = int((chunk_no + 1) * chunksize)
-        else:
-            idx1 = int(chunk_no * chunksize - window_overlap_samples)
-            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)
-
-        # idx1 and idx2 now determine the window I cut out of the raw signal
-        # to compute the spectrogram of.
-
-        # compute the time and frequency axes of the spectrogram now that we
-        # include the start and stop indices of the current chunk and thus the
-        # right start and stop time. The `spectrogram` function does not know
-        # about this and would start every time axis at 0.
-        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate
-        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft
-
-        # create a subset from the grid dataset
-        if idx2 > data.grid.rec.shape[0]:
-            idx2 = data.grid.rec.shape[0] - 1
-        chunk = subset(data, idx1, idx2, mode="index")
-
-        # dont plot chunks without chirps
-        if len(chunk.com.chirp.times) == 0:
-            continue
+269
+270
+271
+272
+273
def plot_detections(
+    data: Dataset,
+    chirp_df: pd.DataFrame,
+    conf: Config,
+) -> None:
+    """Plot detections on spectrograms.
+
+    Parameters
+    ----------
+    data : Dataset
+        The dataset.
+    chirp_df : pd.DataFrame
+        The dataframe containing the chirp detections.
+    conf : Config
+        The config file.
+    """
+    time_window = 15
+    n_electrodes = data.grid.rec.shape[1]
+
+    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples
+    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples
+    chunksize = time_window * data.grid.samplerate  # samples
+    nchunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)
+    window_overlap_samples = int(conf.spec.spec_overlap * data.grid.samplerate)
+
+    for chunk_no in range(nchunks):
+        # get start and stop indices for the current chunk
+        # including some overlap to compensate for edge effects
+        # this diffrers for the first and last chunk
+
+        if chunk_no == 0:
+            idx1 = int(chunk_no * chunksize)
+            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)
+        elif chunk_no == nchunks - 1:
+            idx1 = int(chunk_no * chunksize - window_overlap_samples)
+            idx2 = int((chunk_no + 1) * chunksize)
+        else:
+            idx1 = int(chunk_no * chunksize - window_overlap_samples)
+            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)
+
+        # idx1 and idx2 now determine the window I cut out of the raw signal
+        # to compute the spectrogram of.
+
+        # compute the time and frequency axes of the spectrogram now that we
+        # include the start and stop indices of the current chunk and thus the
+        # right start and stop time. The `spectrogram` function does not know
+        # about this and would start every time axis at 0.
+        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate
+        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft
+
+        # create a subset from the grid dataset
+        if idx2 > data.grid.rec.shape[0]:
+            idx2 = data.grid.rec.shape[0] - 1
+        chunk = subset(data, idx1, idx2, mode="index")
 
-        # compute the spectrogram for each electrode of the current chunk
-        spec = torch.zeros((len(spec_freqs), len(spec_times)))
-        for el in range(n_electrodes):
-            # get the signal for the current electrode
-            sig = chunk.grid.rec[:, el]
-
-            # compute the spectrogram for the current electrode
-            chunk_spec, _, _ = spectrogram(
-                data=sig.copy(),
-                samplingrate=data.grid.samplerate,
-                nfft=nfft,
-                hop_length=hop_len,
-            )
-
-            # sum spectrogram over all electrodes
-            if el == 0:
-                spec = chunk_spec
-            else:
-                spec += chunk_spec
-
-        # normalize spectrogram by the number of electrodes
-        spec /= n_electrodes
-
-        # convert the spectrogram to dB
-        spec = decibel(spec)
-        spec = spec.detach().cpu().numpy()
+        # dont plot chunks without chirps
+        if len(chunk.com.chirp.times) == 0:
+            continue
+
+        # compute the spectrogram for each electrode of the current chunk
+        spec = torch.zeros((len(spec_freqs), len(spec_times)))
+        for el in range(n_electrodes):
+            # get the signal for the current electrode
+            sig = chunk.grid.rec[:, el]
+
+            # compute the spectrogram for the current electrode
+            chunk_spec, _, _ = spectrogram(
+                data=sig.copy(),
+                samplingrate=data.grid.samplerate,
+                nfft=nfft,
+                hop_length=hop_len,
+            )
+
+            # sum spectrogram over all electrodes
+            if el == 0:
+                spec = chunk_spec
+            else:
+                spec += chunk_spec
+
+        # normalize spectrogram by the number of electrodes
+        spec /= n_electrodes
 
-        # Set y limits
-        flims = (
-            np.min(data.track.freqs) - 200,
-            np.max(data.track.freqs) + 700,
-        )
-        spec = spec[(spec_freqs >= flims[0]) & (spec_freqs <= flims[1]), :]
-        spec_freqs = spec_freqs[
-            (spec_freqs >= flims[0]) & (spec_freqs <= flims[1])
-        ]
-
-        # Extract the bounding boxes for the current chunk
-        chunk_t1 = idx1 / data.grid.samplerate
-        chunk_t2 = idx2 / data.grid.samplerate
-        chunk_df = chirp_df[
-            (chirp_df["t1"] >= chunk_t1) & (chirp_df["t2"] <= chunk_t2)
-        ]
-
-        # get t1, t2, f1, f2 from chunk_df
-        bboxes = chunk_df[["score", "t1", "f1", "t2", "f2"]].to_numpy()
-
-        # get chirp times and chirp ids
-        chirp_times = chunk_df["envelope_trough_time"]
-        chirp_ids = chunk_df["assigned_track"]
+        # convert the spectrogram to dB
+        spec = decibel(spec)
+        spec = spec.detach().cpu().numpy()
+
+        # Set y limits
+        flims = (
+            np.min(data.track.freqs) - 200,
+            np.max(data.track.freqs) + 700,
+        )
+        spec = spec[(spec_freqs >= flims[0]) & (spec_freqs <= flims[1]), :]
+        spec_freqs = spec_freqs[
+            (spec_freqs >= flims[0]) & (spec_freqs <= flims[1])
+        ]
+
+        # Extract the bounding boxes for the current chunk
+        chunk_t1 = idx1 / data.grid.samplerate
+        chunk_t2 = idx2 / data.grid.samplerate
+        chunk_df = chirp_df[
+            (chirp_df["t1"] >= chunk_t1) & (chirp_df["t2"] <= chunk_t2)
+        ]
+
+        # get t1, t2, f1, f2 from chunk_df
+        bboxes = chunk_df[["score", "t1", "f1", "t2", "f2"]].to_numpy()
 
-        _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)
-
-        # plot bounding boxes
-        ax.imshow(
-            spec,
-            aspect="auto",
-            origin="lower",
-            interpolation="gaussian",
-            extent=[
-                spec_times[0],
-                spec_times[-1],
-                spec_freqs[0],
-                spec_freqs[-1],
-            ],
-            cmap="magma",
-            vmin=-80,
-            vmax=-45,
-        )
-        for bbox in bboxes:
-            ax.add_patch(
-                Rectangle(
-                    (bbox[1], bbox[2]),
-                    bbox[3] - bbox[1],
-                    bbox[4] - bbox[2],
-                    fill=False,
-                    color="gray",
-                    linewidth=1,
-                    label="faster-R-CNN predictions",
-                ),
-            )
-            ax.text(
-                bbox[1],
-                bbox[4] + 15,
-                f"{bbox[0]:.2f}",
-                color="gray",
-                fontsize=10,
-                verticalalignment="bottom",
-                horizontalalignment="left",
-                rotation=90,
-            )
-
-        # plot chirp times and frequency traces
-        for track_id in np.unique(data.track.idents):
-            ctimes = chirp_times[chirp_ids == track_id]
+        # get chirp times and chirp ids
+        chirp_times = chunk_df["envelope_trough_time"]
+        chirp_ids = chunk_df["assigned_track"]
+
+        _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)
+
+        # plot bounding boxes
+        ax.imshow(
+            spec,
+            aspect="auto",
+            origin="lower",
+            interpolation="gaussian",
+            extent=[
+                spec_times[0],
+                spec_times[-1],
+                spec_freqs[0],
+                spec_freqs[-1],
+            ],
+            cmap="magma",
+            vmin=-80,
+            vmax=-45,
+        )
+        for bbox in bboxes:
+            ax.add_patch(
+                Rectangle(
+                    (bbox[1], bbox[2]),
+                    bbox[3] - bbox[1],
+                    bbox[4] - bbox[2],
+                    fill=False,
+                    color="gray",
+                    linewidth=1,
+                    label="faster-R-CNN predictions",
+                ),
+            )
+            ax.text(
+                bbox[1],
+                bbox[4] + 15,
+                f"{bbox[0]:.2f}",
+                color="gray",
+                fontsize=10,
+                verticalalignment="bottom",
+                horizontalalignment="left",
+                rotation=90,
+            )
 
-            freqs = data.track.freqs[data.track.idents == track_id]
-            times = data.track.times[
-                data.track.indices[data.track.idents == track_id]
-            ]
-            freqs = freqs[
-                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)
-            ]
-            times = times[
-                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)
-            ]
-
-            # get freqs where times are closest to ctimes
-            cfreqs = np.zeros_like(ctimes)
-            for i, ctime in enumerate(ctimes):
-                try:
-                    indx = np.argmin(np.abs(times - ctime))
-                    cfreqs[i] = freqs[indx]
-                except ValueError:
-                    msg = (
-                        "Failed to find track time closest to chirp time "
-                        f"in chunk {chunk_no}, check the plots."
-                    )
-                    prog.console.log(msg)
-
-            if len(times) != 0:
-                ax.plot(
-                    times,
-                    freqs,
-                    lw=2,
-                    color="black",
-                    label="Frequency traces",
-                )
-
-            ax.scatter(
-                ctimes,
-                cfreqs,
-                marker="o",
-                lw=1,
-                facecolor="white",
-                edgecolor="black",
-                s=25,
-                zorder=10,
-                label="Chirp assignments",
-            )
-
-        ax.set_ylim(flims[0] + 5, flims[1] - 5)
-        ax.set_xlim([spec_times[0], spec_times[-1]])
-        ax.set_xlabel("Time [s]", fontsize=12)
-        ax.set_ylabel("Frequency [Hz]", fontsize=12)
-
-        handles, labels = plt.gca().get_legend_handles_labels()
-        by_label = dict(zip(labels, handles))
-        plt.legend(
-            by_label.values(),
-            by_label.keys(),
-            bbox_to_anchor=(0.5, 1.02),
-            loc="lower center",
-            mode="None",
-            borderaxespad=0,
-            ncol=3,
-            fancybox=False,
-            framealpha=0,
-        )
-
-        savepath = data.path / "chirpdetections"
-        savepath.mkdir(exist_ok=True)
-        plt.savefig(
-            savepath / f"cpd_{chunk_no}.png",
-            dpi=300,
-            bbox_inches="tight",
-        )
-
-        plt.close()
-        plt.clf()
-        plt.cla()
-        plt.close("all")
+        # plot chirp times and frequency traces
+        for track_id in np.unique(data.track.idents):
+            ctimes = chirp_times[chirp_ids == track_id]
+
+            freqs = data.track.freqs[data.track.idents == track_id]
+            times = data.track.times[
+                data.track.indices[data.track.idents == track_id]
+            ]
+            freqs = freqs[
+                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)
+            ]
+            times = times[
+                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)
+            ]
+
+            # get freqs where times are closest to ctimes
+            cfreqs = np.zeros_like(ctimes)
+            for i, ctime in enumerate(ctimes):
+                try:
+                    indx = np.argmin(np.abs(times - ctime))
+                    cfreqs[i] = freqs[indx]
+                except ValueError:
+                    msg = (
+                        "Failed to find track time closest to chirp time "
+                        f"in chunk {chunk_no}, check the plots."
+                    )
+                    prog.console.log(msg)
+
+            if len(times) != 0:
+                ax.plot(
+                    times,
+                    freqs,
+                    lw=2,
+                    color="black",
+                    label="Frequency traces",
+                )
+
+            ax.scatter(
+                ctimes,
+                cfreqs,
+                marker="o",
+                lw=1,
+                facecolor="white",
+                edgecolor="black",
+                s=25,
+                zorder=10,
+                label="Chirp assignments",
+            )
+
+        ax.set_ylim(flims[0] + 5, flims[1] - 5)
+        ax.set_xlim([spec_times[0], spec_times[-1]])
+        ax.set_xlabel("Time [s]", fontsize=12)
+        ax.set_ylabel("Frequency [Hz]", fontsize=12)
+
+        handles, labels = plt.gca().get_legend_handles_labels()
+        by_label = dict(zip(labels, handles))
+        plt.legend(
+            by_label.values(),
+            by_label.keys(),
+            bbox_to_anchor=(0.5, 1.02),
+            loc="lower center",
+            mode="None",
+            borderaxespad=0,
+            ncol=3,
+            fancybox=False,
+            framealpha=0,
+        )
+
+        savepath = data.path / "chirpdetections"
+        savepath.mkdir(exist_ok=True)
+        plt.savefig(
+            savepath / f"cpd_{chunk_no}.png",
+            dpi=300,
+            bbox_inches="tight",
+        )
+
+        plt.close()
+        plt.clf()
+        plt.cla()
+        plt.close("all")
 
@@ -1927,29 +1938,29 @@

Parameter
Source code in chirpdetector/plot_detections.py -
283
-284
-285
-286
-287
+            
def plot_detections_cli(path: pathlib.Path) -> None:
-    """Plot detections on images.
-
-    Parameters
-    ----------
-    path : pathlib.Path
-        Path to the config file.
-    """
-    conf = load_config(path.parent / "chirpdetector.toml")
-    data = load(path)
-    chirp_df = pd.read_csv(path / "chirpdetector_bboxes.csv")
-    plot_detections(data, chirp_df, conf)
+294
+295
+296
+297
+298
def plot_detections_cli(path: pathlib.Path) -> None:
+    """Plot detections on images.
+
+    Parameters
+    ----------
+    path : pathlib.Path
+        Path to the config file.
+    """
+    conf = load_config(path.parent / "chirpdetector.toml")
+    data = load(path)
+    chirp_df = pd.read_csv(path / "chirpdetector_bboxes.csv")
+    plot_detections(data, chirp_df, conf)
 
@@ -2050,5 +2061,5 @@

Parameter - + \ No newline at end of file diff --git a/api/train_model/index.html b/api/train_model/index.html index 7bce8a8..d4da4eb 100644 --- a/api/train_model/index.html +++ b/api/train_model/index.html @@ -55,7 +55,18 @@ - + @@ -1474,8 +1485,7 @@

Returns

Source code in chirpdetector/train_model.py -
 79
- 80
+            
 80
  81
  82
  83
@@ -1537,70 +1547,71 @@ 

Returns

139 140 141 -142
def plot_epochs(
-    epoch_train_loss: list,
-    epoch_val_loss: list,
-    epoch_avg_train_loss: list,
-    epoch_avg_val_loss: list,
-    path: pathlib.Path,
-) -> None:
-    """Plot the loss for each epoch.
-
-    Parameters
-    ----------
-    - `epoch_train_loss`: `list`
-        The training loss for each epoch.
-    - `epoch_val_loss`: `list`
-        The validation loss for each epoch.
-    - `epoch_avg_train_loss`: `list`
-        The average training loss for each epoch.
-    - `epoch_avg_val_loss`: `list`
-        The average validation loss for each epoch.
-    - `path`: `pathlib.Path`
-        The path to save the plot to.
-
-    Returns
-    -------
-    - `None`
-    """
-    _, ax = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)
-
-    x_train = np.arange(len(epoch_train_loss[0])) + 1
-    x_val = np.arange(len(epoch_val_loss[0])) + len(epoch_train_loss[0]) + 1
-
-    for train_loss, val_loss in zip(epoch_train_loss, epoch_val_loss):
-        ax[0].plot(x_train, train_loss, c="tab:blue", label="_")
-        ax[0].plot(x_val, val_loss, c="tab:orange", label="_")
-        x_train = np.arange(len(epoch_train_loss[0])) + x_val[-1]
-        x_val = np.arange(len(epoch_val_loss[0])) + x_train[-1]
-
-    x_avg = np.arange(len(epoch_avg_train_loss)) + 1
-    ax[1].plot(
-        x_avg,
-        epoch_avg_train_loss,
-        label="Training Loss",
-        c="tab:blue",
-    )
-    ax[1].plot(
-        x_avg,
-        epoch_avg_val_loss,
-        label="Validation Loss",
-        c="tab:orange",
-    )
-
-    ax[0].set_ylabel("Loss")
-    ax[0].set_xlabel("Batch")
-    ax[0].set_ylim(bottom=0)
-    ax[0].set_title("Loss per batch")
-
-    ax[1].set_ylabel("Loss")
-    ax[1].set_xlabel("Epoch")
-    ax[1].legend()
-    ax[1].set_ylim(bottom=0)
-    ax[1].set_title("Avg loss per epoch")
-
-    plt.savefig(path)
-    plt.close()
+142
+143
def plot_epochs(
+    epoch_train_loss: list,
+    epoch_val_loss: list,
+    epoch_avg_train_loss: list,
+    epoch_avg_val_loss: list,
+    path: pathlib.Path,
+) -> None:
+    """Plot the loss for each epoch.
+
+    Parameters
+    ----------
+    - `epoch_train_loss`: `list`
+        The training loss for each epoch.
+    - `epoch_val_loss`: `list`
+        The validation loss for each epoch.
+    - `epoch_avg_train_loss`: `list`
+        The average training loss for each epoch.
+    - `epoch_avg_val_loss`: `list`
+        The average validation loss for each epoch.
+    - `path`: `pathlib.Path`
+        The path to save the plot to.
+
+    Returns
+    -------
+    - `None`
+    """
+    _, ax = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)
+
+    x_train = np.arange(len(epoch_train_loss[0])) + 1
+    x_val = np.arange(len(epoch_val_loss[0])) + len(epoch_train_loss[0]) + 1
+
+    for train_loss, val_loss in zip(epoch_train_loss, epoch_val_loss):
+        ax[0].plot(x_train, train_loss, c="tab:blue", label="_")
+        ax[0].plot(x_val, val_loss, c="tab:orange", label="_")
+        x_train = np.arange(len(epoch_train_loss[0])) + x_val[-1]
+        x_val = np.arange(len(epoch_val_loss[0])) + x_train[-1]
+
+    x_avg = np.arange(len(epoch_avg_train_loss)) + 1
+    ax[1].plot(
+        x_avg,
+        epoch_avg_train_loss,
+        label="Training Loss",
+        c="tab:blue",
+    )
+    ax[1].plot(
+        x_avg,
+        epoch_avg_val_loss,
+        label="Validation Loss",
+        c="tab:orange",
+    )
+
+    ax[0].set_ylabel("Loss")
+    ax[0].set_xlabel("Batch")
+    ax[0].set_ylim(bottom=0)
+    ax[0].set_title("Loss per batch")
+
+    ax[1].set_ylabel("Loss")
+    ax[1].set_xlabel("Epoch")
+    ax[1].legend()
+    ax[1].set_ylim(bottom=0)
+    ax[1].set_title("Avg loss per epoch")
+
+    plt.savefig(path)
+    plt.close()
 
@@ -1638,8 +1649,7 @@

Returns

Source code in chirpdetector/train_model.py -
145
-146
+            
146
 147
 148
 149
@@ -1687,56 +1697,57 @@ 

Returns

191 192 193 -194
def plot_folds(
-    fold_avg_train_loss: list,
-    fold_avg_val_loss: list,
-    path: pathlib.Path,
-) -> None:
-    """Plot the loss for each fold.
-
-    Parameters
-    ----------
-    - `fold_avg_train_loss`: `list`
-        The average training loss for each fold.
-    - `fold_avg_val_loss`: `list`
-        The average validation loss for each fold.
-    - `path`: `pathlib.Path`
-        The path to save the plot to.
-
-    Returns
-    -------
-    - `None`
-    """
-    _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)
-
-    for train_loss, val_loss in zip(fold_avg_train_loss, fold_avg_val_loss):
-        x = np.arange(len(train_loss)) + 1
-        ax.plot(x, train_loss, c="tab:blue", alpha=0.3, label="_")
-        ax.plot(x, val_loss, c="tab:orange", alpha=0.3, label="_")
-
-    avg_train = np.mean(fold_avg_train_loss, axis=0)
-    avg_val = np.mean(fold_avg_val_loss, axis=0)
-    x = np.arange(len(avg_train)) + 1
-    ax.plot(
-        x,
-        avg_train,
-        label="Training Loss",
-        c="tab:blue",
-    )
-    ax.plot(
-        x,
-        avg_val,
-        label="Validation Loss",
-        c="tab:orange",
-    )
-
-    ax.set_ylabel("Loss")
-    ax.set_xlabel("Epoch")
-    ax.legend()
-    ax.set_ylim(bottom=0)
-
-    plt.savefig(path)
-    plt.close()
+194
+195
def plot_folds(
+    fold_avg_train_loss: list,
+    fold_avg_val_loss: list,
+    path: pathlib.Path,
+) -> None:
+    """Plot the loss for each fold.
+
+    Parameters
+    ----------
+    - `fold_avg_train_loss`: `list`
+        The average training loss for each fold.
+    - `fold_avg_val_loss`: `list`
+        The average validation loss for each fold.
+    - `path`: `pathlib.Path`
+        The path to save the plot to.
+
+    Returns
+    -------
+    - `None`
+    """
+    _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)
+
+    for train_loss, val_loss in zip(fold_avg_train_loss, fold_avg_val_loss):
+        x = np.arange(len(train_loss)) + 1
+        ax.plot(x, train_loss, c="tab:blue", alpha=0.3, label="_")
+        ax.plot(x, val_loss, c="tab:orange", alpha=0.3, label="_")
+
+    avg_train = np.mean(fold_avg_train_loss, axis=0)
+    avg_val = np.mean(fold_avg_val_loss, axis=0)
+    x = np.arange(len(avg_train)) + 1
+    ax.plot(
+        x,
+        avg_train,
+        label="Training Loss",
+        c="tab:blue",
+    )
+    ax.plot(
+        x,
+        avg_val,
+        label="Validation Loss",
+        c="tab:orange",
+    )
+
+    ax.set_ylabel("Loss")
+    ax.set_xlabel("Epoch")
+    ax.legend()
+    ax.set_ylim(bottom=0)
+
+    plt.savefig(path)
+    plt.close()
 
@@ -1776,8 +1787,7 @@

Returns

Source code in chirpdetector/train_model.py -
44
-45
+            
45
 46
 47
 48
@@ -1808,39 +1818,40 @@ 

Returns

73 74 75 -76
def save_model(
-    epoch: int,
-    model: torch.nn.Module,
-    optimizer: torch.optim.Optimizer,
-    path: str,
-) -> None:
-    """Save the model state dict.
-
-    Parameters
-    ----------
-    - `epoch`: `int`
-        The current epoch.
-    - `model`: `torch.nn.Module`
-        The model to save.
-    - `optimizer`: `torch.optim.Optimizer`
-        The optimizer to save.
-    - `path`: `str`
-        The path to save the model to.
-
-    Returns
-    -------
-    - `None`
-    """
-    path = pathlib.Path(path)
-    path.mkdir(parents=True, exist_ok=True)
-    torch.save(
-        {
-            "epoch": epoch,
-            "model_state_dict": model.state_dict(),
-            "optimizer_state_dict": optimizer.state_dict(),
-        },
-        path / "model.pt",
-    )
+76
+77
def save_model(
+    epoch: int,
+    model: torch.nn.Module,
+    optimizer: torch.optim.Optimizer,
+    path: str,
+) -> None:
+    """Save the model state dict.
+
+    Parameters
+    ----------
+    - `epoch`: `int`
+        The current epoch.
+    - `model`: `torch.nn.Module`
+        The model to save.
+    - `optimizer`: `torch.optim.Optimizer`
+        The optimizer to save.
+    - `path`: `str`
+        The path to save the model to.
+
+    Returns
+    -------
+    - `None`
+    """
+    path = pathlib.Path(path)
+    path.mkdir(parents=True, exist_ok=True)
+    torch.save(
+        {
+            "epoch": epoch,
+            "model_state_dict": model.state_dict(),
+            "optimizer_state_dict": optimizer.state_dict(),
+        },
+        path / "model.pt",
+    )
 
@@ -1876,8 +1887,7 @@

Returns

Source code in chirpdetector/train_model.py -
279
-280
+            
280
 281
 282
 283
@@ -2101,232 +2111,233 @@ 

Returns

501 502 503 -504
def train(config: Config, mode: str = "pretrain") -> None:
-    """Train the model.
-
-    Parameters
-    ----------
-    - `config`: `Config`
-        The config file.
-    - `mode`: `str`
-        The mode to train in. Either `pretrain` or `finetune`.
-
-    Returns
-    -------
-    - `None`
-    """
-    # Load a pretrained model from pytorch if in pretrain mode,
-    # otherwise open an already trained model from the
-    # model state dict.
-    assert mode in ["pretrain", "finetune"]
-    if mode == "pretrain":
-        assert config.train.datapath is not None
-        datapath = config.train.datapath
-    elif mode == "finetune":
-        assert config.finetune.datapath is not None
-        datapath = config.finetune.datapath
-
-    # Check if the path to the data actually exists
-    if not pathlib.Path(datapath).exists():
-        raise FileNotFoundError(f"Path {datapath} does not exist.")
-
-    # Initialize the logger and progress bar, make the logger global
-    global logger
-    logger = make_logger(
-        __name__,
-        pathlib.Path(config.path).parent / "chirpdetector.log",
-    )
-
-    # Get the device (e.g. GPU or CPU)
-    device = get_device()
-
-    # Print information about starting training
-    progress.console.rule("Starting training")
-    msg = (
-        f"Device: {device}, Config: {config.path},"
-        f" Mode: {mode}, Data: {datapath}"
-    )
-    progress.console.log(msg)
-    logger.info(msg)
-
-    # initialize the dataset
-    data = CustomDataset(
-        path=datapath,
-        classes=config.hyper.classes,
-    )
-
-    # initialize the k-fold cross-validation
-    splits = KFold(n_splits=config.hyper.kfolds, shuffle=True, random_state=42)
-
-    # initialize the best validation loss to a large number
-    best_val_loss = float("inf")
-
-    # iterate over the folds for k-fold cross-validation
-    with progress:
-        # save loss across all epochs and folds
-        fold_train_loss = []
-        fold_val_loss = []
-        fold_avg_train_loss = []
-        fold_avg_val_loss = []
-
-        # Add kfolds progress bar that runs alongside the epochs progress bar
-        task_folds = progress.add_task(
-            f"[blue]{config.hyper.kfolds}-Fold Crossvalidation",
-            total=config.hyper.kfolds,
-        )
-
-        # iterate over the folds
-        for fold, (train_idx, val_idx) in enumerate(
-            splits.split(np.arange(len(data))),
-        ):
-            # initialize the model and optimizer
-            model = load_fasterrcnn(num_classes=len(config.hyper.classes)).to(
-                device,
-            )
-
-            # If the mode is finetune, load the model state dict from
-            # previous training
-            if mode == "finetune":
-                modelpath = pathlib.Path(config.hyper.modelpath) / "model.pt"
-                checkpoint = torch.load(modelpath, map_location=device)
-                model.load_state_dict(checkpoint["model_state_dict"])
-
-            # Initialize stochastic gradient descent optimizer
-            params = [p for p in model.parameters() if p.requires_grad]
-            optimizer = torch.optim.SGD(
-                params,
-                lr=config.hyper.learning_rate,
-                momentum=config.hyper.momentum,
-                weight_decay=config.hyper.weight_decay,
-            )
-
-            # make train and validation dataloaders for the current fold
-            train_data = torch.utils.data.Subset(data, train_idx)
-            val_data = torch.utils.data.Subset(data, val_idx)
-
-            # this is for training
-            train_loader = DataLoader(
-                train_data,
-                batch_size=config.hyper.batch_size,
-                shuffle=True,
-                num_workers=config.hyper.num_workers,
-                collate_fn=collate_fn,
-            )
-
-            # this is only for validation
-            val_loader = DataLoader(
-                val_data,
-                batch_size=config.hyper.batch_size,
-                shuffle=True,
-                num_workers=config.hyper.num_workers,
-                collate_fn=collate_fn,
-            )
-
-            # save loss across all epochs
-            epoch_avg_train_loss = []
-            epoch_avg_val_loss = []
-            epoch_train_loss = []
-            epoch_val_loss = []
-
-            # train the model for the specified number of epochs
-            task_epochs = progress.add_task(
-                f"{config.hyper.num_epochs} Epochs for fold k={fold + 1}",
-                total=config.hyper.num_epochs,
-            )
-
-            # iterate across n epochs
-            for epoch in range(config.hyper.num_epochs):
-                # print information about the current epoch
-                msg = (
-                    f"Training epoch {epoch + 1} of {config.hyper.num_epochs} "
-                    f"for fold {fold + 1} of {config.hyper.kfolds}"
-                )
-                progress.console.log(msg)
-                logger.info(msg)
-
-                # train the epoch
-                train_loss = train_epoch(
-                    dataloader=train_loader,
-                    device=device,
-                    model=model,
-                    optimizer=optimizer,
-                )
-
-                # validate the epoch
-                _, val_loss = val_epoch(
-                    dataloader=val_loader,
-                    device=device,
-                    model=model,
-                )
-
-                # save losses for this epoch
-                epoch_train_loss.append(train_loss)
-                epoch_val_loss.append(val_loss)
-
-                # save the average loss for this epoch
-                epoch_avg_train_loss.append(np.median(train_loss))
-                epoch_avg_val_loss.append(np.median(val_loss))
-
-                # save the model if it is the best so far
-                if np.mean(val_loss) < best_val_loss:
-                    best_val_loss = sum(val_loss) / len(val_loss)
-
-                    msg = (
-                        f"New best validation loss: {best_val_loss:.4f}, "
-                        "saving model..."
-                    )
-                    progress.console.log(msg)
-                    logger.info(msg)
-
-                    save_model(
-                        epoch=epoch,
-                        model=model,
-                        optimizer=optimizer,
-                        path=config.hyper.modelpath,
-                    )
-
-                # plot the losses for this epoch
-                plot_epochs(
-                    epoch_train_loss=epoch_train_loss,
-                    epoch_val_loss=epoch_val_loss,
-                    epoch_avg_train_loss=epoch_avg_train_loss,
-                    epoch_avg_val_loss=epoch_avg_val_loss,
-                    path=pathlib.Path(config.hyper.modelpath)
-                    / f"fold{fold + 1}.png",
-                )
-
-                # update the progress bar for the epochs
-                progress.update(task_epochs, advance=1)
-
-            # update the progress bar for the epochs and hide it if done
-            progress.update(task_epochs, visible=False)
-
-            # save the losses for this fold
-            fold_train_loss.append(epoch_train_loss)
-            fold_val_loss.append(epoch_val_loss)
-            fold_avg_train_loss.append(epoch_avg_train_loss)
-            fold_avg_val_loss.append(epoch_avg_val_loss)
-
-            plot_folds(
-                fold_avg_train_loss=fold_avg_train_loss,
-                fold_avg_val_loss=fold_avg_val_loss,
-                path=pathlib.Path(config.hyper.modelpath) / "losses.png",
-            )
-
-            # update the progress bar for the folds
-            progress.update(task_folds, advance=1)
-
-        # update the progress bar for the folds and hide it if done
-        progress.update(task_folds, visible=False)
-
-        # print information about the training
-        msg = (
-            "Average validation loss of last epoch across folds: "
-            f"{np.mean(fold_val_loss):.4f}"
-        )
-        progress.console.log(msg)
-        logger.info(msg)
-        progress.console.rule("[bold blue]Finished training")
+504
+505
def train(config: Config, mode: str = "pretrain") -> None:
+    """Train the model.
+
+    Parameters
+    ----------
+    - `config`: `Config`
+        The config file.
+    - `mode`: `str`
+        The mode to train in. Either `pretrain` or `finetune`.
+
+    Returns
+    -------
+    - `None`
+    """
+    # Load a pretrained model from pytorch if in pretrain mode,
+    # otherwise open an already trained model from the
+    # model state dict.
+    assert mode in ["pretrain", "finetune"]
+    if mode == "pretrain":
+        assert config.train.datapath is not None
+        datapath = config.train.datapath
+    elif mode == "finetune":
+        assert config.finetune.datapath is not None
+        datapath = config.finetune.datapath
+
+    # Check if the path to the data actually exists
+    if not pathlib.Path(datapath).exists():
+        raise FileNotFoundError(f"Path {datapath} does not exist.")
+
+    # Initialize the logger and progress bar, make the logger global
+    global logger
+    logger = make_logger(
+        __name__,
+        pathlib.Path(config.path).parent / "chirpdetector.log",
+    )
+
+    # Get the device (e.g. GPU or CPU)
+    device = get_device()
+
+    # Print information about starting training
+    progress.console.rule("Starting training")
+    msg = (
+        f"Device: {device}, Config: {config.path},"
+        f" Mode: {mode}, Data: {datapath}"
+    )
+    progress.console.log(msg)
+    logger.info(msg)
+
+    # initialize the dataset
+    data = CustomDataset(
+        path=datapath,
+        classes=config.hyper.classes,
+    )
+
+    # initialize the k-fold cross-validation
+    splits = KFold(n_splits=config.hyper.kfolds, shuffle=True, random_state=42)
+
+    # initialize the best validation loss to a large number
+    best_val_loss = float("inf")
+
+    # iterate over the folds for k-fold cross-validation
+    with progress:
+        # save loss across all epochs and folds
+        fold_train_loss = []
+        fold_val_loss = []
+        fold_avg_train_loss = []
+        fold_avg_val_loss = []
+
+        # Add kfolds progress bar that runs alongside the epochs progress bar
+        task_folds = progress.add_task(
+            f"[blue]{config.hyper.kfolds}-Fold Crossvalidation",
+            total=config.hyper.kfolds,
+        )
+
+        # iterate over the folds
+        for fold, (train_idx, val_idx) in enumerate(
+            splits.split(np.arange(len(data))),
+        ):
+            # initialize the model and optimizer
+            model = load_fasterrcnn(num_classes=len(config.hyper.classes)).to(
+                device,
+            )
+
+            # If the mode is finetune, load the model state dict from
+            # previous training
+            if mode == "finetune":
+                modelpath = pathlib.Path(config.hyper.modelpath) / "model.pt"
+                checkpoint = torch.load(modelpath, map_location=device)
+                model.load_state_dict(checkpoint["model_state_dict"])
+
+            # Initialize stochastic gradient descent optimizer
+            params = [p for p in model.parameters() if p.requires_grad]
+            optimizer = torch.optim.SGD(
+                params,
+                lr=config.hyper.learning_rate,
+                momentum=config.hyper.momentum,
+                weight_decay=config.hyper.weight_decay,
+            )
+
+            # make train and validation dataloaders for the current fold
+            train_data = torch.utils.data.Subset(data, train_idx)
+            val_data = torch.utils.data.Subset(data, val_idx)
+
+            # this is for training
+            train_loader = DataLoader(
+                train_data,
+                batch_size=config.hyper.batch_size,
+                shuffle=True,
+                num_workers=config.hyper.num_workers,
+                collate_fn=collate_fn,
+            )
+
+            # this is only for validation
+            val_loader = DataLoader(
+                val_data,
+                batch_size=config.hyper.batch_size,
+                shuffle=True,
+                num_workers=config.hyper.num_workers,
+                collate_fn=collate_fn,
+            )
+
+            # save loss across all epochs
+            epoch_avg_train_loss = []
+            epoch_avg_val_loss = []
+            epoch_train_loss = []
+            epoch_val_loss = []
+
+            # train the model for the specified number of epochs
+            task_epochs = progress.add_task(
+                f"{config.hyper.num_epochs} Epochs for fold k={fold + 1}",
+                total=config.hyper.num_epochs,
+            )
+
+            # iterate across n epochs
+            for epoch in range(config.hyper.num_epochs):
+                # print information about the current epoch
+                msg = (
+                    f"Training epoch {epoch + 1} of {config.hyper.num_epochs} "
+                    f"for fold {fold + 1} of {config.hyper.kfolds}"
+                )
+                progress.console.log(msg)
+                logger.info(msg)
+
+                # train the epoch
+                train_loss = train_epoch(
+                    dataloader=train_loader,
+                    device=device,
+                    model=model,
+                    optimizer=optimizer,
+                )
+
+                # validate the epoch
+                _, val_loss = val_epoch(
+                    dataloader=val_loader,
+                    device=device,
+                    model=model,
+                )
+
+                # save losses for this epoch
+                epoch_train_loss.append(train_loss)
+                epoch_val_loss.append(val_loss)
+
+                # save the average loss for this epoch
+                epoch_avg_train_loss.append(np.median(train_loss))
+                epoch_avg_val_loss.append(np.median(val_loss))
+
+                # save the model if it is the best so far
+                if np.mean(val_loss) < best_val_loss:
+                    best_val_loss = sum(val_loss) / len(val_loss)
+
+                    msg = (
+                        f"New best validation loss: {best_val_loss:.4f}, "
+                        "saving model..."
+                    )
+                    progress.console.log(msg)
+                    logger.info(msg)
+
+                    save_model(
+                        epoch=epoch,
+                        model=model,
+                        optimizer=optimizer,
+                        path=config.hyper.modelpath,
+                    )
+
+                # plot the losses for this epoch
+                plot_epochs(
+                    epoch_train_loss=epoch_train_loss,
+                    epoch_val_loss=epoch_val_loss,
+                    epoch_avg_train_loss=epoch_avg_train_loss,
+                    epoch_avg_val_loss=epoch_avg_val_loss,
+                    path=pathlib.Path(config.hyper.modelpath)
+                    / f"fold{fold + 1}.png",
+                )
+
+                # update the progress bar for the epochs
+                progress.update(task_epochs, advance=1)
+
+            # update the progress bar for the epochs and hide it if done
+            progress.update(task_epochs, visible=False)
+
+            # save the losses for this fold
+            fold_train_loss.append(epoch_train_loss)
+            fold_val_loss.append(epoch_val_loss)
+            fold_avg_train_loss.append(epoch_avg_train_loss)
+            fold_avg_val_loss.append(epoch_avg_val_loss)
+
+            plot_folds(
+                fold_avg_train_loss=fold_avg_train_loss,
+                fold_avg_val_loss=fold_avg_val_loss,
+                path=pathlib.Path(config.hyper.modelpath) / "losses.png",
+            )
+
+            # update the progress bar for the folds
+            progress.update(task_folds, advance=1)
+
+        # update the progress bar for the folds and hide it if done
+        progress.update(task_folds, visible=False)
+
+        # print information about the training
+        msg = (
+            "Average validation loss of last epoch across folds: "
+            f"{np.mean(fold_val_loss):.4f}"
+        )
+        progress.console.log(msg)
+        logger.info(msg)
+        progress.console.rule("[bold blue]Finished training")
 
@@ -2362,8 +2373,7 @@

Returns

Source code in chirpdetector/train_model.py -
507
-508
+            
508
 509
 510
 511
@@ -2377,22 +2387,23 @@ 

Returns

519 520 521 -522
def train_cli(config_path: pathlib.Path, mode: str) -> None:
-    """Train the model from the command line.
-
-    Parameters
-    ----------
-    - `config_path`: `pathlib.Path`
-        The path to the config file.
-    - `mode`: `str`
-        The mode to train in. Either `pretrain` or `finetune`.
-
-    Returns
-    -------
-    - `None`
-    """
-    config = load_config(config_path)
-    train(config, mode=mode)
+522
+523
def train_cli(config_path: pathlib.Path, mode: str) -> None:
+    """Train the model from the command line.
+
+    Parameters
+    ----------
+    - `config_path`: `pathlib.Path`
+        The path to the config file.
+    - `mode`: `str`
+        The mode to train in. Either `pretrain` or `finetune`.
+
+    Returns
+    -------
+    - `None`
+    """
+    config = load_config(config_path)
+    train(config, mode=mode)
 
@@ -2433,8 +2444,7 @@

Returns

Source code in chirpdetector/train_model.py -
197
-198
+            
198
 199
 200
 201
@@ -2474,48 +2484,49 @@ 

Returns

235 236 237 -238
def train_epoch(
-    dataloader: DataLoader,
-    device: torch.device,
-    model: torch.nn.Module,
-    optimizer: torch.optim.Optimizer,
-) -> List:
-    """Train the model for one epoch.
-
-    Parameters
-    ----------
-    - `dataloader`: `DataLoader`
-        The dataloader for the training data.
-    - `device`: `torch.device`
-        The device to train on.
-    - `model`: `torch.nn.Module`
-        The model to train.
-    - `optimizer`: `torch.optim.Optimizer`
-        The optimizer to use.
-
-    Returns
-    -------
-    - `train_loss`: `List`
-        The training loss for each batch.
-    """
-    train_loss = []
-
-    for samples, targets in dataloader:
-        images = list(sample.to(device) for sample in samples)
-        targets = [
-            {k: v.to(device) for k, v in t.items() if k != "image_name"}
-            for t in targets
-        ]
-
-        loss_dict = model(images, targets)
-        losses = sum(loss for loss in loss_dict.values())
-        train_loss.append(losses.item())
-
-        optimizer.zero_grad()
-        losses.backward()
-        optimizer.step()
-
-    return train_loss
+238
+239
def train_epoch(
+    dataloader: DataLoader,
+    device: torch.device,
+    model: torch.nn.Module,
+    optimizer: torch.optim.Optimizer,
+) -> List:
+    """Train the model for one epoch.
+
+    Parameters
+    ----------
+    - `dataloader`: `DataLoader`
+        The dataloader for the training data.
+    - `device`: `torch.device`
+        The device to train on.
+    - `model`: `torch.nn.Module`
+        The model to train.
+    - `optimizer`: `torch.optim.Optimizer`
+        The optimizer to use.
+
+    Returns
+    -------
+    - `train_loss`: `List`
+        The training loss for each batch.
+    """
+    train_loss = []
+
+    for samples, targets in dataloader:
+        images = list(sample.to(device) for sample in samples)
+        targets = [
+            {k: v.to(device) for k, v in t.items() if k != "image_name"}
+            for t in targets
+        ]
+
+        loss_dict = model(images, targets)
+        losses = sum(loss for loss in loss_dict.values())
+        train_loss.append(losses.item())
+
+        optimizer.zero_grad()
+        losses.backward()
+        optimizer.step()
+
+    return train_loss
 
@@ -2554,8 +2565,7 @@

Returns

Source code in chirpdetector/train_model.py -
241
-242
+            
242
 243
 244
 245
@@ -2589,42 +2599,43 @@ 

Returns

273 274 275 -276
def val_epoch(
-    dataloader: DataLoader,
-    device: torch.device,
-    model: torch.nn.Module,
-) -> List:
-    """Validate the model for one epoch.
-
-    Parameters
-    ----------
-    - `dataloader`: `DataLoader`
-        The dataloader for the validation data.
-    - `device`: `torch.device`
-        The device to train on.
-    - `model`: `torch.nn.Module`
-        The model to train.
-
-    Returns
-    -------
-    - `loss_dict`: `dict`
-        The loss dictionary.
-    """
-    val_loss = []
-    for samples, targets in dataloader:
-        images = list(sample.to(device) for sample in samples)
-        targets = [
-            {k: v.to(device) for k, v in t.items() if k != "image_name"}
-            for t in targets
-        ]
-
-        with torch.inference_mode():
-            loss_dict = model(images, targets)
-
-        losses = sum(loss for loss in loss_dict.values())
-        val_loss.append(losses.item())
-
-    return loss_dict, val_loss
+276
+277
def val_epoch(
+    dataloader: DataLoader,
+    device: torch.device,
+    model: torch.nn.Module,
+) -> List:
+    """Validate the model for one epoch.
+
+    Parameters
+    ----------
+    - `dataloader`: `DataLoader`
+        The dataloader for the validation data.
+    - `device`: `torch.device`
+        The device to train on.
+    - `model`: `torch.nn.Module`
+        The model to train.
+
+    Returns
+    -------
+    - `loss_dict`: `dict`
+        The loss dictionary.
+    """
+    val_loss = []
+    for samples, targets in dataloader:
+        images = list(sample.to(device) for sample in samples)
+        targets = [
+            {k: v.to(device) for k, v in t.items() if k != "image_name"}
+            for t in targets
+        ]
+
+        with torch.inference_mode():
+            loss_dict = model(images, targets)
+
+        losses = sum(loss for loss in loss_dict.values())
+        val_loss.append(losses.item())
+
+    return loss_dict, val_loss
 
@@ -2725,5 +2736,5 @@

Returns

- + \ No newline at end of file diff --git a/assets/javascripts/glightbox.min.js b/assets/javascripts/glightbox.min.js new file mode 100644 index 0000000..614fb18 --- /dev/null +++ b/assets/javascripts/glightbox.min.js @@ -0,0 +1 @@ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e=e||self).GLightbox=t()}(this,(function(){"use strict";function e(t){return(e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(t)}function t(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){for(var i=0;i1&&void 0!==arguments[1]?arguments[1]:null,i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:null,n=e[s]=e[s]||[],l={all:n,evt:null,found:null};return t&&i&&P(n)>0&&o(n,(function(e,n){if(e.eventName==t&&e.fn.toString()==i.toString())return l.found=!0,l.evt=n,!1})),l}function a(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=t.onElement,n=t.withCallback,s=t.avoidDuplicate,l=void 0===s||s,a=t.once,h=void 0!==a&&a,d=t.useCapture,c=void 0!==d&&d,u=arguments.length>2?arguments[2]:void 0,g=i||[];function v(e){T(n)&&n.call(u,e,this),h&&v.destroy()}return C(g)&&(g=document.querySelectorAll(g)),v.destroy=function(){o(g,(function(t){var i=r(t,e,v);i.found&&i.all.splice(i.evt,1),t.removeEventListener&&t.removeEventListener(e,v,c)}))},o(g,(function(t){var i=r(t,e,v);(t.addEventListener&&l&&!i.found||!l)&&(t.addEventListener(e,v,c),i.all.push({eventName:e,fn:v}))})),v}function h(e,t){o(t.split(" "),(function(t){return e.classList.add(t)}))}function d(e,t){o(t.split(" "),(function(t){return e.classList.remove(t)}))}function c(e,t){return e.classList.contains(t)}function u(e,t){for(;e!==document.body;){if(!(e=e.parentElement))return!1;if("function"==typeof e.matches?e.matches(t):e.msMatchesSelector(t))return e}}function g(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",i=arguments.length>2&&void 0!==arguments[2]&&arguments[2];if(!e||""===t)return!1;if("none"==t)return T(i)&&i(),!1;var n=x(),s=t.split(" ");o(s,(function(t){h(e,"g"+t)})),a(n,{onElement:e,avoidDuplicate:!1,once:!0,withCallback:function(e,t){o(s,(function(e){d(t,"g"+e)})),T(i)&&i()}})}function v(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(""==t)return e.style.webkitTransform="",e.style.MozTransform="",e.style.msTransform="",e.style.OTransform="",e.style.transform="",!1;e.style.webkitTransform=t,e.style.MozTransform=t,e.style.msTransform=t,e.style.OTransform=t,e.style.transform=t}function f(e){e.style.display="block"}function p(e){e.style.display="none"}function m(e){var t=document.createDocumentFragment(),i=document.createElement("div");for(i.innerHTML=e;i.firstChild;)t.appendChild(i.firstChild);return t}function y(){return{width:window.innerWidth||document.documentElement.clientWidth||document.body.clientWidth,height:window.innerHeight||document.documentElement.clientHeight||document.body.clientHeight}}function x(){var e,t=document.createElement("fakeelement"),i={animation:"animationend",OAnimation:"oAnimationEnd",MozAnimation:"animationend",WebkitAnimation:"webkitAnimationEnd"};for(e in i)if(void 0!==t.style[e])return i[e]}function b(e,t,i,n){if(e())t();else{var s;i||(i=100);var l=setInterval((function(){e()&&(clearInterval(l),s&&clearTimeout(s),t())}),i);n&&(s=setTimeout((function(){clearInterval(l)}),n))}}function S(e,t,i){if(I(e))console.error("Inject assets error");else if(T(t)&&(i=t,t=!1),C(t)&&t in window)T(i)&&i();else{var n;if(-1!==e.indexOf(".css")){if((n=document.querySelectorAll('link[href="'+e+'"]'))&&n.length>0)return void(T(i)&&i());var s=document.getElementsByTagName("head")[0],l=s.querySelectorAll('link[rel="stylesheet"]'),o=document.createElement("link");return o.rel="stylesheet",o.type="text/css",o.href=e,o.media="all",l?s.insertBefore(o,l[0]):s.appendChild(o),void(T(i)&&i())}if((n=document.querySelectorAll('script[src="'+e+'"]'))&&n.length>0){if(T(i)){if(C(t))return b((function(){return void 0!==window[t]}),(function(){i()})),!1;i()}}else{var r=document.createElement("script");r.type="text/javascript",r.src=e,r.onload=function(){if(T(i)){if(C(t))return b((function(){return void 0!==window[t]}),(function(){i()})),!1;i()}},document.body.appendChild(r)}}}function w(){return"navigator"in window&&window.navigator.userAgent.match(/(iPad)|(iPhone)|(iPod)|(Android)|(PlayBook)|(BB10)|(BlackBerry)|(Opera Mini)|(IEMobile)|(webOS)|(MeeGo)/i)}function T(e){return"function"==typeof e}function C(e){return"string"==typeof e}function k(e){return!(!e||!e.nodeType||1!=e.nodeType)}function E(e){return Array.isArray(e)}function A(e){return e&&e.length&&isFinite(e.length)}function L(t){return"object"===e(t)&&null!=t&&!T(t)&&!E(t)}function I(e){return null==e}function O(e,t){return null!==e&&hasOwnProperty.call(e,t)}function P(e){if(L(e)){if(e.keys)return e.keys().length;var t=0;for(var i in e)O(e,i)&&t++;return t}return e.length}function M(e){return!isNaN(parseFloat(e))&&isFinite(e)}function z(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:-1,t=document.querySelectorAll(".gbtn[data-taborder]:not(.disabled)");if(!t.length)return!1;if(1==t.length)return t[0];"string"==typeof e&&(e=parseInt(e));var i=[];o(t,(function(e){i.push(e.getAttribute("data-taborder"))}));var n=Math.max.apply(Math,i.map((function(e){return parseInt(e)}))),s=e<0?1:e+1;s>n&&(s="1");var l=i.filter((function(e){return e>=parseInt(s)})),r=l.sort()[0];return document.querySelector('.gbtn[data-taborder="'.concat(r,'"]'))}function X(e){if(e.events.hasOwnProperty("keyboard"))return!1;e.events.keyboard=a("keydown",{onElement:window,withCallback:function(t,i){var n=(t=t||window.event).keyCode;if(9==n){var s=document.querySelector(".gbtn.focused");if(!s){var l=!(!document.activeElement||!document.activeElement.nodeName)&&document.activeElement.nodeName.toLocaleLowerCase();if("input"==l||"textarea"==l||"button"==l)return}t.preventDefault();var o=document.querySelectorAll(".gbtn[data-taborder]");if(!o||o.length<=0)return;if(!s){var r=z();return void(r&&(r.focus(),h(r,"focused")))}var a=z(s.getAttribute("data-taborder"));d(s,"focused"),a&&(a.focus(),h(a,"focused"))}39==n&&e.nextSlide(),37==n&&e.prevSlide(),27==n&&e.close()}})}function Y(e){return Math.sqrt(e.x*e.x+e.y*e.y)}function q(e,t){var i=function(e,t){var i=Y(e)*Y(t);if(0===i)return 0;var n=function(e,t){return e.x*t.x+e.y*t.y}(e,t)/i;return n>1&&(n=1),Math.acos(n)}(e,t);return function(e,t){return e.x*t.y-t.x*e.y}(e,t)>0&&(i*=-1),180*i/Math.PI}var N=function(){function e(i){t(this,e),this.handlers=[],this.el=i}return n(e,[{key:"add",value:function(e){this.handlers.push(e)}},{key:"del",value:function(e){e||(this.handlers=[]);for(var t=this.handlers.length;t>=0;t--)this.handlers[t]===e&&this.handlers.splice(t,1)}},{key:"dispatch",value:function(){for(var e=0,t=this.handlers.length;e=0)console.log("ignore drag for this touched element",e.target.nodeName.toLowerCase());else{this.now=Date.now(),this.x1=e.touches[0].pageX,this.y1=e.touches[0].pageY,this.delta=this.now-(this.last||this.now),this.touchStart.dispatch(e,this.element),null!==this.preTapPosition.x&&(this.isDoubleTap=this.delta>0&&this.delta<=250&&Math.abs(this.preTapPosition.x-this.x1)<30&&Math.abs(this.preTapPosition.y-this.y1)<30,this.isDoubleTap&&clearTimeout(this.singleTapTimeout)),this.preTapPosition.x=this.x1,this.preTapPosition.y=this.y1,this.last=this.now;var t=this.preV;if(e.touches.length>1){this._cancelLongTap(),this._cancelSingleTap();var i={x:e.touches[1].pageX-this.x1,y:e.touches[1].pageY-this.y1};t.x=i.x,t.y=i.y,this.pinchStartLen=Y(t),this.multipointStart.dispatch(e,this.element)}this._preventTap=!1,this.longTapTimeout=setTimeout(function(){this.longTap.dispatch(e,this.element),this._preventTap=!0}.bind(this),750)}}}},{key:"move",value:function(e){if(e.touches){var t=this.preV,i=e.touches.length,n=e.touches[0].pageX,s=e.touches[0].pageY;if(this.isDoubleTap=!1,i>1){var l=e.touches[1].pageX,o=e.touches[1].pageY,r={x:e.touches[1].pageX-n,y:e.touches[1].pageY-s};null!==t.x&&(this.pinchStartLen>0&&(e.zoom=Y(r)/this.pinchStartLen,this.pinch.dispatch(e,this.element)),e.angle=q(r,t),this.rotate.dispatch(e,this.element)),t.x=r.x,t.y=r.y,null!==this.x2&&null!==this.sx2?(e.deltaX=(n-this.x2+l-this.sx2)/2,e.deltaY=(s-this.y2+o-this.sy2)/2):(e.deltaX=0,e.deltaY=0),this.twoFingerPressMove.dispatch(e,this.element),this.sx2=l,this.sy2=o}else{if(null!==this.x2){e.deltaX=n-this.x2,e.deltaY=s-this.y2;var a=Math.abs(this.x1-this.x2),h=Math.abs(this.y1-this.y2);(a>10||h>10)&&(this._preventTap=!0)}else e.deltaX=0,e.deltaY=0;this.pressMove.dispatch(e,this.element)}this.touchMove.dispatch(e,this.element),this._cancelLongTap(),this.x2=n,this.y2=s,i>1&&e.preventDefault()}}},{key:"end",value:function(e){if(e.changedTouches){this._cancelLongTap();var t=this;e.touches.length<2&&(this.multipointEnd.dispatch(e,this.element),this.sx2=this.sy2=null),this.x2&&Math.abs(this.x1-this.x2)>30||this.y2&&Math.abs(this.y1-this.y2)>30?(e.direction=this._swipeDirection(this.x1,this.x2,this.y1,this.y2),this.swipeTimeout=setTimeout((function(){t.swipe.dispatch(e,t.element)}),0)):(this.tapTimeout=setTimeout((function(){t._preventTap||t.tap.dispatch(e,t.element),t.isDoubleTap&&(t.doubleTap.dispatch(e,t.element),t.isDoubleTap=!1)}),0),t.isDoubleTap||(t.singleTapTimeout=setTimeout((function(){t.singleTap.dispatch(e,t.element)}),250))),this.touchEnd.dispatch(e,this.element),this.preV.x=0,this.preV.y=0,this.zoom=1,this.pinchStartLen=null,this.x1=this.x2=this.y1=this.y2=null}}},{key:"cancelAll",value:function(){this._preventTap=!0,clearTimeout(this.singleTapTimeout),clearTimeout(this.tapTimeout),clearTimeout(this.longTapTimeout),clearTimeout(this.swipeTimeout)}},{key:"cancel",value:function(e){this.cancelAll(),this.touchCancel.dispatch(e,this.element)}},{key:"_cancelLongTap",value:function(){clearTimeout(this.longTapTimeout)}},{key:"_cancelSingleTap",value:function(){clearTimeout(this.singleTapTimeout)}},{key:"_swipeDirection",value:function(e,t,i,n){return Math.abs(e-t)>=Math.abs(i-n)?e-t>0?"Left":"Right":i-n>0?"Up":"Down"}},{key:"on",value:function(e,t){this[e]&&this[e].add(t)}},{key:"off",value:function(e,t){this[e]&&this[e].del(t)}},{key:"destroy",value:function(){return this.singleTapTimeout&&clearTimeout(this.singleTapTimeout),this.tapTimeout&&clearTimeout(this.tapTimeout),this.longTapTimeout&&clearTimeout(this.longTapTimeout),this.swipeTimeout&&clearTimeout(this.swipeTimeout),this.element.removeEventListener("touchstart",this.start),this.element.removeEventListener("touchmove",this.move),this.element.removeEventListener("touchend",this.end),this.element.removeEventListener("touchcancel",this.cancel),this.rotate.del(),this.touchStart.del(),this.multipointStart.del(),this.multipointEnd.del(),this.pinch.del(),this.swipe.del(),this.tap.del(),this.doubleTap.del(),this.longTap.del(),this.singleTap.del(),this.pressMove.del(),this.twoFingerPressMove.del(),this.touchMove.del(),this.touchEnd.del(),this.touchCancel.del(),this.preV=this.pinchStartLen=this.zoom=this.isDoubleTap=this.delta=this.last=this.now=this.tapTimeout=this.singleTapTimeout=this.longTapTimeout=this.swipeTimeout=this.x1=this.x2=this.y1=this.y2=this.preTapPosition=this.rotate=this.touchStart=this.multipointStart=this.multipointEnd=this.pinch=this.swipe=this.tap=this.doubleTap=this.longTap=this.singleTap=this.pressMove=this.touchMove=this.touchEnd=this.touchCancel=this.twoFingerPressMove=null,window.removeEventListener("scroll",this._cancelAllHandler),null}}]),e}();function W(e){var t=function(){var e,t=document.createElement("fakeelement"),i={transition:"transitionend",OTransition:"oTransitionEnd",MozTransition:"transitionend",WebkitTransition:"webkitTransitionEnd"};for(e in i)if(void 0!==t.style[e])return i[e]}(),i=window.innerWidth||document.documentElement.clientWidth||document.body.clientWidth,n=c(e,"gslide-media")?e:e.querySelector(".gslide-media"),s=u(n,".ginner-container"),l=e.querySelector(".gslide-description");i>769&&(n=s),h(n,"greset"),v(n,"translate3d(0, 0, 0)"),a(t,{onElement:n,once:!0,withCallback:function(e,t){d(n,"greset")}}),n.style.opacity="",l&&(l.style.opacity="")}function B(e){if(e.events.hasOwnProperty("touch"))return!1;var t,i,n,s=y(),l=s.width,o=s.height,r=!1,a=null,g=null,f=null,p=!1,m=1,x=1,b=!1,S=!1,w=null,T=null,C=null,k=null,E=0,A=0,L=!1,I=!1,O={},P={},M=0,z=0,X=document.getElementById("glightbox-slider"),Y=document.querySelector(".goverlay"),q=new _(X,{touchStart:function(t){if(r=!0,(c(t.targetTouches[0].target,"ginner-container")||u(t.targetTouches[0].target,".gslide-desc")||"a"==t.targetTouches[0].target.nodeName.toLowerCase())&&(r=!1),u(t.targetTouches[0].target,".gslide-inline")&&!c(t.targetTouches[0].target.parentNode,"gslide-inline")&&(r=!1),r){if(P=t.targetTouches[0],O.pageX=t.targetTouches[0].pageX,O.pageY=t.targetTouches[0].pageY,M=t.targetTouches[0].clientX,z=t.targetTouches[0].clientY,a=e.activeSlide,g=a.querySelector(".gslide-media"),n=a.querySelector(".gslide-inline"),f=null,c(g,"gslide-image")&&(f=g.querySelector("img")),(window.innerWidth||document.documentElement.clientWidth||document.body.clientWidth)>769&&(g=a.querySelector(".ginner-container")),d(Y,"greset"),t.pageX>20&&t.pageXo){var a=O.pageX-P.pageX;if(Math.abs(a)<=13)return!1}p=!0;var h,d=s.targetTouches[0].clientX,c=s.targetTouches[0].clientY,u=M-d,m=z-c;if(Math.abs(u)>Math.abs(m)?(L=!1,I=!0):(I=!1,L=!0),t=P.pageX-O.pageX,E=100*t/l,i=P.pageY-O.pageY,A=100*i/o,L&&f&&(h=1-Math.abs(i)/o,Y.style.opacity=h,e.settings.touchFollowAxis&&(E=0)),I&&(h=1-Math.abs(t)/l,g.style.opacity=h,e.settings.touchFollowAxis&&(A=0)),!f)return v(g,"translate3d(".concat(E,"%, 0, 0)"));v(g,"translate3d(".concat(E,"%, ").concat(A,"%, 0)"))}},touchEnd:function(){if(r){if(p=!1,S||b)return C=w,void(k=T);var t=Math.abs(parseInt(A)),i=Math.abs(parseInt(E));if(!(t>29&&f))return t<29&&i<25?(h(Y,"greset"),Y.style.opacity=1,W(g)):void 0;e.close()}},multipointEnd:function(){setTimeout((function(){b=!1}),50)},multipointStart:function(){b=!0,m=x||1},pinch:function(e){if(!f||p)return!1;b=!0,f.scaleX=f.scaleY=m*e.zoom;var t=m*e.zoom;if(S=!0,t<=1)return S=!1,t=1,k=null,C=null,w=null,T=null,void f.setAttribute("style","");t>4.5&&(t=4.5),f.style.transform="scale3d(".concat(t,", ").concat(t,", 1)"),x=t},pressMove:function(e){if(S&&!b){var t=P.pageX-O.pageX,i=P.pageY-O.pageY;C&&(t+=C),k&&(i+=k),w=t,T=i;var n="translate3d(".concat(t,"px, ").concat(i,"px, 0)");x&&(n+=" scale3d(".concat(x,", ").concat(x,", 1)")),v(f,n)}},swipe:function(t){if(!S)if(b)b=!1;else{if("Left"==t.direction){if(e.index==e.elements.length-1)return W(g);e.nextSlide()}if("Right"==t.direction){if(0==e.index)return W(g);e.prevSlide()}}}});e.events.touch=q}var H=function(){function e(i,n){var s=this,l=arguments.length>2&&void 0!==arguments[2]?arguments[2]:null;if(t(this,e),this.img=i,this.slide=n,this.onclose=l,this.img.setZoomEvents)return!1;this.active=!1,this.zoomedIn=!1,this.dragging=!1,this.currentX=null,this.currentY=null,this.initialX=null,this.initialY=null,this.xOffset=0,this.yOffset=0,this.img.addEventListener("mousedown",(function(e){return s.dragStart(e)}),!1),this.img.addEventListener("mouseup",(function(e){return s.dragEnd(e)}),!1),this.img.addEventListener("mousemove",(function(e){return s.drag(e)}),!1),this.img.addEventListener("click",(function(e){return s.slide.classList.contains("dragging-nav")?(s.zoomOut(),!1):s.zoomedIn?void(s.zoomedIn&&!s.dragging&&s.zoomOut()):s.zoomIn()}),!1),this.img.setZoomEvents=!0}return n(e,[{key:"zoomIn",value:function(){var e=this.widowWidth();if(!(this.zoomedIn||e<=768)){var t=this.img;if(t.setAttribute("data-style",t.getAttribute("style")),t.style.maxWidth=t.naturalWidth+"px",t.style.maxHeight=t.naturalHeight+"px",t.naturalWidth>e){var i=e/2-t.naturalWidth/2;this.setTranslate(this.img.parentNode,i,0)}this.slide.classList.add("zoomed"),this.zoomedIn=!0}}},{key:"zoomOut",value:function(){this.img.parentNode.setAttribute("style",""),this.img.setAttribute("style",this.img.getAttribute("data-style")),this.slide.classList.remove("zoomed"),this.zoomedIn=!1,this.currentX=null,this.currentY=null,this.initialX=null,this.initialY=null,this.xOffset=0,this.yOffset=0,this.onclose&&"function"==typeof this.onclose&&this.onclose()}},{key:"dragStart",value:function(e){e.preventDefault(),this.zoomedIn?("touchstart"===e.type?(this.initialX=e.touches[0].clientX-this.xOffset,this.initialY=e.touches[0].clientY-this.yOffset):(this.initialX=e.clientX-this.xOffset,this.initialY=e.clientY-this.yOffset),e.target===this.img&&(this.active=!0,this.img.classList.add("dragging"))):this.active=!1}},{key:"dragEnd",value:function(e){var t=this;e.preventDefault(),this.initialX=this.currentX,this.initialY=this.currentY,this.active=!1,setTimeout((function(){t.dragging=!1,t.img.isDragging=!1,t.img.classList.remove("dragging")}),100)}},{key:"drag",value:function(e){this.active&&(e.preventDefault(),"touchmove"===e.type?(this.currentX=e.touches[0].clientX-this.initialX,this.currentY=e.touches[0].clientY-this.initialY):(this.currentX=e.clientX-this.initialX,this.currentY=e.clientY-this.initialY),this.xOffset=this.currentX,this.yOffset=this.currentY,this.img.isDragging=!0,this.dragging=!0,this.setTranslate(this.img,this.currentX,this.currentY))}},{key:"onMove",value:function(e){if(this.zoomedIn){var t=e.clientX-this.img.naturalWidth/2,i=e.clientY-this.img.naturalHeight/2;this.setTranslate(this.img,t,i)}}},{key:"setTranslate",value:function(e,t,i){e.style.transform="translate3d("+t+"px, "+i+"px, 0)"}},{key:"widowWidth",value:function(){return window.innerWidth||document.documentElement.clientWidth||document.body.clientWidth}}]),e}(),V=function(){function e(){var i=this,n=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};t(this,e);var s=n.dragEl,l=n.toleranceX,o=void 0===l?40:l,r=n.toleranceY,a=void 0===r?65:r,h=n.slide,d=void 0===h?null:h,c=n.instance,u=void 0===c?null:c;this.el=s,this.active=!1,this.dragging=!1,this.currentX=null,this.currentY=null,this.initialX=null,this.initialY=null,this.xOffset=0,this.yOffset=0,this.direction=null,this.lastDirection=null,this.toleranceX=o,this.toleranceY=a,this.toleranceReached=!1,this.dragContainer=this.el,this.slide=d,this.instance=u,this.el.addEventListener("mousedown",(function(e){return i.dragStart(e)}),!1),this.el.addEventListener("mouseup",(function(e){return i.dragEnd(e)}),!1),this.el.addEventListener("mousemove",(function(e){return i.drag(e)}),!1)}return n(e,[{key:"dragStart",value:function(e){if(this.slide.classList.contains("zoomed"))this.active=!1;else{"touchstart"===e.type?(this.initialX=e.touches[0].clientX-this.xOffset,this.initialY=e.touches[0].clientY-this.yOffset):(this.initialX=e.clientX-this.xOffset,this.initialY=e.clientY-this.yOffset);var t=e.target.nodeName.toLowerCase();e.target.classList.contains("nodrag")||u(e.target,".nodrag")||-1!==["input","select","textarea","button","a"].indexOf(t)?this.active=!1:(e.preventDefault(),(e.target===this.el||"img"!==t&&u(e.target,".gslide-inline"))&&(this.active=!0,this.el.classList.add("dragging"),this.dragContainer=u(e.target,".ginner-container")))}}},{key:"dragEnd",value:function(e){var t=this;e&&e.preventDefault(),this.initialX=0,this.initialY=0,this.currentX=null,this.currentY=null,this.initialX=null,this.initialY=null,this.xOffset=0,this.yOffset=0,this.active=!1,this.doSlideChange&&(this.instance.preventOutsideClick=!0,"right"==this.doSlideChange&&this.instance.prevSlide(),"left"==this.doSlideChange&&this.instance.nextSlide()),this.doSlideClose&&this.instance.close(),this.toleranceReached||this.setTranslate(this.dragContainer,0,0,!0),setTimeout((function(){t.instance.preventOutsideClick=!1,t.toleranceReached=!1,t.lastDirection=null,t.dragging=!1,t.el.isDragging=!1,t.el.classList.remove("dragging"),t.slide.classList.remove("dragging-nav"),t.dragContainer.style.transform="",t.dragContainer.style.transition=""}),100)}},{key:"drag",value:function(e){if(this.active){e.preventDefault(),this.slide.classList.add("dragging-nav"),"touchmove"===e.type?(this.currentX=e.touches[0].clientX-this.initialX,this.currentY=e.touches[0].clientY-this.initialY):(this.currentX=e.clientX-this.initialX,this.currentY=e.clientY-this.initialY),this.xOffset=this.currentX,this.yOffset=this.currentY,this.el.isDragging=!0,this.dragging=!0,this.doSlideChange=!1,this.doSlideClose=!1;var t=Math.abs(this.currentX),i=Math.abs(this.currentY);if(t>0&&t>=Math.abs(this.currentY)&&(!this.lastDirection||"x"==this.lastDirection)){this.yOffset=0,this.lastDirection="x",this.setTranslate(this.dragContainer,this.currentX,0);var n=this.shouldChange();if(!this.instance.settings.dragAutoSnap&&n&&(this.doSlideChange=n),this.instance.settings.dragAutoSnap&&n)return this.instance.preventOutsideClick=!0,this.toleranceReached=!0,this.active=!1,this.instance.preventOutsideClick=!0,this.dragEnd(null),"right"==n&&this.instance.prevSlide(),void("left"==n&&this.instance.nextSlide())}if(this.toleranceY>0&&i>0&&i>=t&&(!this.lastDirection||"y"==this.lastDirection)){this.xOffset=0,this.lastDirection="y",this.setTranslate(this.dragContainer,0,this.currentY);var s=this.shouldClose();return!this.instance.settings.dragAutoSnap&&s&&(this.doSlideClose=!0),void(this.instance.settings.dragAutoSnap&&s&&this.instance.close())}}}},{key:"shouldChange",value:function(){var e=!1;if(Math.abs(this.currentX)>=this.toleranceX){var t=this.currentX>0?"right":"left";("left"==t&&this.slide!==this.slide.parentNode.lastChild||"right"==t&&this.slide!==this.slide.parentNode.firstChild)&&(e=t)}return e}},{key:"shouldClose",value:function(){var e=!1;return Math.abs(this.currentY)>=this.toleranceY&&(e=!0),e}},{key:"setTranslate",value:function(e,t,i){var n=arguments.length>3&&void 0!==arguments[3]&&arguments[3];e.style.transition=n?"all .2s ease":"",e.style.transform="translate3d(".concat(t,"px, ").concat(i,"px, 0)")}}]),e}();function j(e,t,i,n){var s=e.querySelector(".gslide-media"),l=new Image,o="gSlideTitle_"+i,r="gSlideDesc_"+i;l.addEventListener("load",(function(){T(n)&&n()}),!1),l.src=t.href,""!=t.sizes&&""!=t.srcset&&(l.sizes=t.sizes,l.srcset=t.srcset),l.alt="",I(t.alt)||""===t.alt||(l.alt=t.alt),""!==t.title&&l.setAttribute("aria-labelledby",o),""!==t.description&&l.setAttribute("aria-describedby",r),t.hasOwnProperty("_hasCustomWidth")&&t._hasCustomWidth&&(l.style.width=t.width),t.hasOwnProperty("_hasCustomHeight")&&t._hasCustomHeight&&(l.style.height=t.height),s.insertBefore(l,s.firstChild)}function F(e,t,i,n){var s=this,l=e.querySelector(".ginner-container"),o="gvideo"+i,r=e.querySelector(".gslide-media"),a=this.getAllPlayers();h(l,"gvideo-container"),r.insertBefore(m('
'),r.firstChild);var d=e.querySelector(".gvideo-wrapper");S(this.settings.plyr.css,"Plyr");var c=t.href,u=location.protocol.replace(":",""),g="",v="",f=!1;"file"==u&&(u="http"),r.style.maxWidth=t.width,S(this.settings.plyr.js,"Plyr",(function(){if(c.match(/vimeo\.com\/([0-9]*)/)){var l=/vimeo.*\/(\d+)/i.exec(c);g="vimeo",v=l[1]}if(c.match(/(youtube\.com|youtube-nocookie\.com)\/watch\?v=([a-zA-Z0-9\-_]+)/)||c.match(/youtu\.be\/([a-zA-Z0-9\-_]+)/)||c.match(/(youtube\.com|youtube-nocookie\.com)\/embed\/([a-zA-Z0-9\-_]+)/)){var r=function(e){var t="";t=void 0!==(e=e.replace(/(>|<)/gi,"").split(/(vi\/|v=|\/v\/|youtu\.be\/|\/embed\/)/))[2]?(t=e[2].split(/[^0-9a-z_\-]/i))[0]:e;return t}(c);g="youtube",v=r}if(null!==c.match(/\.(mp4|ogg|webm|mov)$/)){g="local";var u='")}var w=f||m('
'));h(d,"".concat(g,"-video gvideo")),d.appendChild(w),d.setAttribute("data-id",o),d.setAttribute("data-index",i);var C=O(s.settings.plyr,"config")?s.settings.plyr.config:{},k=new Plyr("#"+o,C);k.on("ready",(function(e){var t=e.detail.plyr;a[o]=t,T(n)&&n()})),b((function(){return e.querySelector("iframe")&&"true"==e.querySelector("iframe").dataset.ready}),(function(){s.resize(e)})),k.on("enterfullscreen",R),k.on("exitfullscreen",R)}))}function R(e){var t=u(e.target,".gslide-media");"enterfullscreen"==e.type&&h(t,"fullscreen"),"exitfullscreen"==e.type&&d(t,"fullscreen")}function G(e,t,i,n){var s,l=this,o=e.querySelector(".gslide-media"),r=!(!O(t,"href")||!t.href)&&t.href.split("#").pop().trim(),d=!(!O(t,"content")||!t.content)&&t.content;if(d&&(C(d)&&(s=m('
'.concat(d,"
"))),k(d))){"none"==d.style.display&&(d.style.display="block");var c=document.createElement("div");c.className="ginlined-content",c.appendChild(d),s=c}if(r){var u=document.getElementById(r);if(!u)return!1;var g=u.cloneNode(!0);g.style.height=t.height,g.style.maxWidth=t.width,h(g,"ginlined-content"),s=g}if(!s)return console.error("Unable to append inline slide content",t),!1;o.style.height=t.height,o.style.width=t.width,o.appendChild(s),this.events["inlineclose"+r]=a("click",{onElement:o.querySelectorAll(".gtrigger-close"),withCallback:function(e){e.preventDefault(),l.close()}}),T(n)&&n()}function Z(e,t,i,n){var s=e.querySelector(".gslide-media"),l=function(e){var t=e.url,i=e.allow,n=e.callback,s=e.appendTo,l=document.createElement("iframe");return l.className="vimeo-video gvideo",l.src=t,l.style.width="100%",l.style.height="100%",i&&l.setAttribute("allow",i),l.onload=function(){h(l,"node-ready"),T(n)&&n()},s&&s.appendChild(l),l}({url:t.href,callback:n});s.parentNode.style.maxWidth=t.width,s.parentNode.style.height=t.height,s.appendChild(l)}var $=function(){function e(){var i=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};t(this,e),this.defaults={href:"",sizes:"",srcset:"",title:"",type:"",description:"",alt:"",descPosition:"bottom",effect:"",width:"",height:"",content:!1,zoomable:!0,draggable:!0},L(i)&&(this.defaults=l(this.defaults,i))}return n(e,[{key:"sourceType",value:function(e){var t=e;if(null!==(e=e.toLowerCase()).match(/\.(jpeg|jpg|jpe|gif|png|apn|webp|avif|svg)/))return"image";if(e.match(/(youtube\.com|youtube-nocookie\.com)\/watch\?v=([a-zA-Z0-9\-_]+)/)||e.match(/youtu\.be\/([a-zA-Z0-9\-_]+)/)||e.match(/(youtube\.com|youtube-nocookie\.com)\/embed\/([a-zA-Z0-9\-_]+)/))return"video";if(e.match(/vimeo\.com\/([0-9]*)/))return"video";if(null!==e.match(/\.(mp4|ogg|webm|mov)/))return"video";if(null!==e.match(/\.(mp3|wav|wma|aac|ogg)/))return"audio";if(e.indexOf("#")>-1&&""!==t.split("#").pop().trim())return"inline";return e.indexOf("goajax=true")>-1?"ajax":"external"}},{key:"parseConfig",value:function(e,t){var i=this,n=l({descPosition:t.descPosition},this.defaults);if(L(e)&&!k(e)){O(e,"type")||(O(e,"content")&&e.content?e.type="inline":O(e,"href")&&(e.type=this.sourceType(e.href)));var s=l(n,e);return this.setSize(s,t),s}var r="",a=e.getAttribute("data-glightbox"),h=e.nodeName.toLowerCase();if("a"===h&&(r=e.href),"img"===h&&(r=e.src,n.alt=e.alt),n.href=r,o(n,(function(s,l){O(t,l)&&"width"!==l&&(n[l]=t[l]);var o=e.dataset[l];I(o)||(n[l]=i.sanitizeValue(o))})),n.content&&(n.type="inline"),!n.type&&r&&(n.type=this.sourceType(r)),I(a)){if(!n.title&&"a"==h){var d=e.title;I(d)||""===d||(n.title=d)}if(!n.title&&"img"==h){var c=e.alt;I(c)||""===c||(n.title=c)}}else{var u=[];o(n,(function(e,t){u.push(";\\s?"+t)})),u=u.join("\\s?:|"),""!==a.trim()&&o(n,(function(e,t){var s=a,l=new RegExp("s?"+t+"s?:s?(.*?)("+u+"s?:|$)"),o=s.match(l);if(o&&o.length&&o[1]){var r=o[1].trim().replace(/;\s*$/,"");n[t]=i.sanitizeValue(r)}}))}if(n.description&&"."===n.description.substring(0,1)){var g;try{g=document.querySelector(n.description).innerHTML}catch(e){if(!(e instanceof DOMException))throw e}g&&(n.description=g)}if(!n.description){var v=e.querySelector(".glightbox-desc");v&&(n.description=v.innerHTML)}return this.setSize(n,t,e),this.slideConfig=n,n}},{key:"setSize",value:function(e,t){var i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:null,n="video"==e.type?this.checkSize(t.videosWidth):this.checkSize(t.width),s=this.checkSize(t.height);return e.width=O(e,"width")&&""!==e.width?this.checkSize(e.width):n,e.height=O(e,"height")&&""!==e.height?this.checkSize(e.height):s,i&&"image"==e.type&&(e._hasCustomWidth=!!i.dataset.width,e._hasCustomHeight=!!i.dataset.height),e}},{key:"checkSize",value:function(e){return M(e)?"".concat(e,"px"):e}},{key:"sanitizeValue",value:function(e){return"true"!==e&&"false"!==e?e:"true"===e}}]),e}(),U=function(){function e(i,n,s){t(this,e),this.element=i,this.instance=n,this.index=s}return n(e,[{key:"setContent",value:function(){var e=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:null,i=arguments.length>1&&void 0!==arguments[1]&&arguments[1];if(c(t,"loaded"))return!1;var n=this.instance.settings,s=this.slideConfig,l=w();T(n.beforeSlideLoad)&&n.beforeSlideLoad({index:this.index,slide:t,player:!1});var o=s.type,r=s.descPosition,a=t.querySelector(".gslide-media"),d=t.querySelector(".gslide-title"),u=t.querySelector(".gslide-desc"),g=t.querySelector(".gdesc-inner"),v=i,f="gSlideTitle_"+this.index,p="gSlideDesc_"+this.index;if(T(n.afterSlideLoad)&&(v=function(){T(i)&&i(),n.afterSlideLoad({index:e.index,slide:t,player:e.instance.getSlidePlayerInstance(e.index)})}),""==s.title&&""==s.description?g&&g.parentNode.parentNode.removeChild(g.parentNode):(d&&""!==s.title?(d.id=f,d.innerHTML=s.title):d.parentNode.removeChild(d),u&&""!==s.description?(u.id=p,l&&n.moreLength>0?(s.smallDescription=this.slideShortDesc(s.description,n.moreLength,n.moreText),u.innerHTML=s.smallDescription,this.descriptionEvents(u,s)):u.innerHTML=s.description):u.parentNode.removeChild(u),h(a.parentNode,"desc-".concat(r)),h(g.parentNode,"description-".concat(r))),h(a,"gslide-".concat(o)),h(t,"loaded"),"video"!==o){if("external"!==o)return"inline"===o?(G.apply(this.instance,[t,s,this.index,v]),void(s.draggable&&new V({dragEl:t.querySelector(".gslide-inline"),toleranceX:n.dragToleranceX,toleranceY:n.dragToleranceY,slide:t,instance:this.instance}))):void("image"!==o?T(v)&&v():j(t,s,this.index,(function(){var i=t.querySelector("img");s.draggable&&new V({dragEl:i,toleranceX:n.dragToleranceX,toleranceY:n.dragToleranceY,slide:t,instance:e.instance}),s.zoomable&&i.naturalWidth>i.offsetWidth&&(h(i,"zoomable"),new H(i,t,(function(){e.instance.resize()}))),T(v)&&v()})));Z.apply(this,[t,s,this.index,v])}else F.apply(this.instance,[t,s,this.index,v])}},{key:"slideShortDesc",value:function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:50,i=arguments.length>2&&void 0!==arguments[2]&&arguments[2],n=document.createElement("div");n.innerHTML=e;var s=n.innerText,l=i;if((e=s.trim()).length<=t)return e;var o=e.substr(0,t-1);return l?(n=null,o+'... '+i+""):o}},{key:"descriptionEvents",value:function(e,t){var i=this,n=e.querySelector(".desc-more");if(!n)return!1;a("click",{onElement:n,withCallback:function(e,n){e.preventDefault();var s=document.body,l=u(n,".gslide-desc");if(!l)return!1;l.innerHTML=t.description,h(s,"gdesc-open");var o=a("click",{onElement:[s,u(l,".gslide-description")],withCallback:function(e,n){"a"!==e.target.nodeName.toLowerCase()&&(d(s,"gdesc-open"),h(s,"gdesc-closed"),l.innerHTML=t.smallDescription,i.descriptionEvents(l,t),setTimeout((function(){d(s,"gdesc-closed")}),400),o.destroy())}})}})}},{key:"create",value:function(){return m(this.instance.settings.slideHTML)}},{key:"getConfig",value:function(){k(this.element)||this.element.hasOwnProperty("draggable")||(this.element.draggable=this.instance.settings.draggable);var e=new $(this.instance.settings.slideExtraAttributes);return this.slideConfig=e.parseConfig(this.element,this.instance.settings),this.slideConfig}}]),e}(),J=w(),K=null!==w()||void 0!==document.createTouch||"ontouchstart"in window||"onmsgesturechange"in window||navigator.msMaxTouchPoints,Q=document.getElementsByTagName("html")[0],ee={selector:".glightbox",elements:null,skin:"clean",theme:"clean",closeButton:!0,startAt:null,autoplayVideos:!0,autofocusVideos:!0,descPosition:"bottom",width:"900px",height:"506px",videosWidth:"960px",beforeSlideChange:null,afterSlideChange:null,beforeSlideLoad:null,afterSlideLoad:null,slideInserted:null,slideRemoved:null,slideExtraAttributes:null,onOpen:null,onClose:null,loop:!1,zoomable:!0,draggable:!0,dragAutoSnap:!1,dragToleranceX:40,dragToleranceY:65,preload:!0,oneSlidePerOpen:!1,touchNavigation:!0,touchFollowAxis:!0,keyboardNavigation:!0,closeOnOutsideClick:!0,plugins:!1,plyr:{css:"https://cdn.plyr.io/3.6.8/plyr.css",js:"https://cdn.plyr.io/3.6.8/plyr.js",config:{ratio:"16:9",fullscreen:{enabled:!0,iosNative:!0},youtube:{noCookie:!0,rel:0,showinfo:0,iv_load_policy:3},vimeo:{byline:!1,portrait:!1,title:!1,transparent:!1}}},openEffect:"zoom",closeEffect:"zoom",slideEffect:"slide",moreText:"See more",moreLength:60,cssEfects:{fade:{in:"fadeIn",out:"fadeOut"},zoom:{in:"zoomIn",out:"zoomOut"},slide:{in:"slideInRight",out:"slideOutLeft"},slideBack:{in:"slideInLeft",out:"slideOutRight"},none:{in:"none",out:"none"}},svg:{close:'',next:' ',prev:''},slideHTML:'
\n
\n
\n
\n
\n
\n
\n

\n
\n
\n
\n
\n
\n
',lightboxHTML:''},te=function(){function e(){var i=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};t(this,e),this.customOptions=i,this.settings=l(ee,i),this.effectsClasses=this.getAnimationClasses(),this.videoPlayers={},this.apiEvents=[],this.fullElementsList=!1}return n(e,[{key:"init",value:function(){var e=this,t=this.getSelector();t&&(this.baseEvents=a("click",{onElement:t,withCallback:function(t,i){t.preventDefault(),e.open(i)}})),this.elements=this.getElements()}},{key:"open",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:null,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;if(0==this.elements.length)return!1;this.activeSlide=null,this.prevActiveSlideIndex=null,this.prevActiveSlide=null;var i=M(t)?t:this.settings.startAt;if(k(e)){var n=e.getAttribute("data-gallery");n&&(this.fullElementsList=this.elements,this.elements=this.getGalleryElements(this.elements,n)),I(i)&&(i=this.getElementIndex(e))<0&&(i=0)}M(i)||(i=0),this.build(),g(this.overlay,"none"==this.settings.openEffect?"none":this.settings.cssEfects.fade.in);var s=document.body,l=window.innerWidth-document.documentElement.clientWidth;if(l>0){var o=document.createElement("style");o.type="text/css",o.className="gcss-styles",o.innerText=".gscrollbar-fixer {margin-right: ".concat(l,"px}"),document.head.appendChild(o),h(s,"gscrollbar-fixer")}h(s,"glightbox-open"),h(Q,"glightbox-open"),J&&(h(document.body,"glightbox-mobile"),this.settings.slideEffect="slide"),this.showSlide(i,!0),1==this.elements.length?(h(this.prevButton,"glightbox-button-hidden"),h(this.nextButton,"glightbox-button-hidden")):(d(this.prevButton,"glightbox-button-hidden"),d(this.nextButton,"glightbox-button-hidden")),this.lightboxOpen=!0,this.trigger("open"),T(this.settings.onOpen)&&this.settings.onOpen(),K&&this.settings.touchNavigation&&B(this),this.settings.keyboardNavigation&&X(this)}},{key:"openAt",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0;this.open(null,e)}},{key:"showSlide",value:function(){var e=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:0,i=arguments.length>1&&void 0!==arguments[1]&&arguments[1];f(this.loader),this.index=parseInt(t);var n=this.slidesContainer.querySelector(".current");n&&d(n,"current"),this.slideAnimateOut();var s=this.slidesContainer.querySelectorAll(".gslide")[t];if(c(s,"loaded"))this.slideAnimateIn(s,i),p(this.loader);else{f(this.loader);var l=this.elements[t],o={index:this.index,slide:s,slideNode:s,slideConfig:l.slideConfig,slideIndex:this.index,trigger:l.node,player:null};this.trigger("slide_before_load",o),l.instance.setContent(s,(function(){p(e.loader),e.resize(),e.slideAnimateIn(s,i),e.trigger("slide_after_load",o)}))}this.slideDescription=s.querySelector(".gslide-description"),this.slideDescriptionContained=this.slideDescription&&c(this.slideDescription.parentNode,"gslide-media"),this.settings.preload&&(this.preloadSlide(t+1),this.preloadSlide(t-1)),this.updateNavigationClasses(),this.activeSlide=s}},{key:"preloadSlide",value:function(e){var t=this;if(e<0||e>this.elements.length-1)return!1;if(I(this.elements[e]))return!1;var i=this.slidesContainer.querySelectorAll(".gslide")[e];if(c(i,"loaded"))return!1;var n=this.elements[e],s=n.type,l={index:e,slide:i,slideNode:i,slideConfig:n.slideConfig,slideIndex:e,trigger:n.node,player:null};this.trigger("slide_before_load",l),"video"==s||"external"==s?setTimeout((function(){n.instance.setContent(i,(function(){t.trigger("slide_after_load",l)}))}),200):n.instance.setContent(i,(function(){t.trigger("slide_after_load",l)}))}},{key:"prevSlide",value:function(){this.goToSlide(this.index-1)}},{key:"nextSlide",value:function(){this.goToSlide(this.index+1)}},{key:"goToSlide",value:function(){var e=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(this.prevActiveSlide=this.activeSlide,this.prevActiveSlideIndex=this.index,!this.loop()&&(e<0||e>this.elements.length-1))return!1;e<0?e=this.elements.length-1:e>=this.elements.length&&(e=0),this.showSlide(e)}},{key:"insertSlide",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:-1;t<0&&(t=this.elements.length);var i=new U(e,this,t),n=i.getConfig(),s=l({},n),o=i.create(),r=this.elements.length-1;s.index=t,s.node=!1,s.instance=i,s.slideConfig=n,this.elements.splice(t,0,s);var a=null,h=null;if(this.slidesContainer){if(t>r)this.slidesContainer.appendChild(o);else{var d=this.slidesContainer.querySelectorAll(".gslide")[t];this.slidesContainer.insertBefore(o,d)}(this.settings.preload&&0==this.index&&0==t||this.index-1==t||this.index+1==t)&&this.preloadSlide(t),0==this.index&&0==t&&(this.index=1),this.updateNavigationClasses(),a=this.slidesContainer.querySelectorAll(".gslide")[t],h=this.getSlidePlayerInstance(t),s.slideNode=a}this.trigger("slide_inserted",{index:t,slide:a,slideNode:a,slideConfig:n,slideIndex:t,trigger:null,player:h}),T(this.settings.slideInserted)&&this.settings.slideInserted({index:t,slide:a,player:h})}},{key:"removeSlide",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:-1;if(e<0||e>this.elements.length-1)return!1;var t=this.slidesContainer&&this.slidesContainer.querySelectorAll(".gslide")[e];t&&(this.getActiveSlideIndex()==e&&(e==this.elements.length-1?this.prevSlide():this.nextSlide()),t.parentNode.removeChild(t)),this.elements.splice(e,1),this.trigger("slide_removed",e),T(this.settings.slideRemoved)&&this.settings.slideRemoved(e)}},{key:"slideAnimateIn",value:function(e,t){var i=this,n=e.querySelector(".gslide-media"),s=e.querySelector(".gslide-description"),l={index:this.prevActiveSlideIndex,slide:this.prevActiveSlide,slideNode:this.prevActiveSlide,slideIndex:this.prevActiveSlide,slideConfig:I(this.prevActiveSlideIndex)?null:this.elements[this.prevActiveSlideIndex].slideConfig,trigger:I(this.prevActiveSlideIndex)?null:this.elements[this.prevActiveSlideIndex].node,player:this.getSlidePlayerInstance(this.prevActiveSlideIndex)},o={index:this.index,slide:this.activeSlide,slideNode:this.activeSlide,slideConfig:this.elements[this.index].slideConfig,slideIndex:this.index,trigger:this.elements[this.index].node,player:this.getSlidePlayerInstance(this.index)};if(n.offsetWidth>0&&s&&(p(s),s.style.display=""),d(e,this.effectsClasses),t)g(e,this.settings.cssEfects[this.settings.openEffect].in,(function(){i.settings.autoplayVideos&&i.slidePlayerPlay(e),i.trigger("slide_changed",{prev:l,current:o}),T(i.settings.afterSlideChange)&&i.settings.afterSlideChange.apply(i,[l,o])}));else{var r=this.settings.slideEffect,a="none"!==r?this.settings.cssEfects[r].in:r;this.prevActiveSlideIndex>this.index&&"slide"==this.settings.slideEffect&&(a=this.settings.cssEfects.slideBack.in),g(e,a,(function(){i.settings.autoplayVideos&&i.slidePlayerPlay(e),i.trigger("slide_changed",{prev:l,current:o}),T(i.settings.afterSlideChange)&&i.settings.afterSlideChange.apply(i,[l,o])}))}setTimeout((function(){i.resize(e)}),100),h(e,"current")}},{key:"slideAnimateOut",value:function(){if(!this.prevActiveSlide)return!1;var e=this.prevActiveSlide;d(e,this.effectsClasses),h(e,"prev");var t=this.settings.slideEffect,i="none"!==t?this.settings.cssEfects[t].out:t;this.slidePlayerPause(e),this.trigger("slide_before_change",{prev:{index:this.prevActiveSlideIndex,slide:this.prevActiveSlide,slideNode:this.prevActiveSlide,slideIndex:this.prevActiveSlideIndex,slideConfig:I(this.prevActiveSlideIndex)?null:this.elements[this.prevActiveSlideIndex].slideConfig,trigger:I(this.prevActiveSlideIndex)?null:this.elements[this.prevActiveSlideIndex].node,player:this.getSlidePlayerInstance(this.prevActiveSlideIndex)},current:{index:this.index,slide:this.activeSlide,slideNode:this.activeSlide,slideIndex:this.index,slideConfig:this.elements[this.index].slideConfig,trigger:this.elements[this.index].node,player:this.getSlidePlayerInstance(this.index)}}),T(this.settings.beforeSlideChange)&&this.settings.beforeSlideChange.apply(this,[{index:this.prevActiveSlideIndex,slide:this.prevActiveSlide,player:this.getSlidePlayerInstance(this.prevActiveSlideIndex)},{index:this.index,slide:this.activeSlide,player:this.getSlidePlayerInstance(this.index)}]),this.prevActiveSlideIndex>this.index&&"slide"==this.settings.slideEffect&&(i=this.settings.cssEfects.slideBack.out),g(e,i,(function(){var t=e.querySelector(".ginner-container"),i=e.querySelector(".gslide-media"),n=e.querySelector(".gslide-description");t.style.transform="",i.style.transform="",d(i,"greset"),i.style.opacity="",n&&(n.style.opacity=""),d(e,"prev")}))}},{key:"getAllPlayers",value:function(){return this.videoPlayers}},{key:"getSlidePlayerInstance",value:function(e){var t="gvideo"+e,i=this.getAllPlayers();return!(!O(i,t)||!i[t])&&i[t]}},{key:"stopSlideVideo",value:function(e){if(k(e)){var t=e.querySelector(".gvideo-wrapper");t&&(e=t.getAttribute("data-index"))}console.log("stopSlideVideo is deprecated, use slidePlayerPause");var i=this.getSlidePlayerInstance(e);i&&i.playing&&i.pause()}},{key:"slidePlayerPause",value:function(e){if(k(e)){var t=e.querySelector(".gvideo-wrapper");t&&(e=t.getAttribute("data-index"))}var i=this.getSlidePlayerInstance(e);i&&i.playing&&i.pause()}},{key:"playSlideVideo",value:function(e){if(k(e)){var t=e.querySelector(".gvideo-wrapper");t&&(e=t.getAttribute("data-index"))}console.log("playSlideVideo is deprecated, use slidePlayerPlay");var i=this.getSlidePlayerInstance(e);i&&!i.playing&&i.play()}},{key:"slidePlayerPlay",value:function(e){if(k(e)){var t=e.querySelector(".gvideo-wrapper");t&&(e=t.getAttribute("data-index"))}var i=this.getSlidePlayerInstance(e);i&&!i.playing&&(i.play(),this.settings.autofocusVideos&&i.elements.container.focus())}},{key:"setElements",value:function(e){var t=this;this.settings.elements=!1;var i=[];e&&e.length&&o(e,(function(e,n){var s=new U(e,t,n),o=s.getConfig(),r=l({},o);r.slideConfig=o,r.instance=s,r.index=n,i.push(r)})),this.elements=i,this.lightboxOpen&&(this.slidesContainer.innerHTML="",this.elements.length&&(o(this.elements,(function(){var e=m(t.settings.slideHTML);t.slidesContainer.appendChild(e)})),this.showSlide(0,!0)))}},{key:"getElementIndex",value:function(e){var t=!1;return o(this.elements,(function(i,n){if(O(i,"node")&&i.node==e)return t=n,!0})),t}},{key:"getElements",value:function(){var e=this,t=[];this.elements=this.elements?this.elements:[],!I(this.settings.elements)&&E(this.settings.elements)&&this.settings.elements.length&&o(this.settings.elements,(function(i,n){var s=new U(i,e,n),o=s.getConfig(),r=l({},o);r.node=!1,r.index=n,r.instance=s,r.slideConfig=o,t.push(r)}));var i=!1;return this.getSelector()&&(i=document.querySelectorAll(this.getSelector())),i?(o(i,(function(i,n){var s=new U(i,e,n),o=s.getConfig(),r=l({},o);r.node=i,r.index=n,r.instance=s,r.slideConfig=o,r.gallery=i.getAttribute("data-gallery"),t.push(r)})),t):t}},{key:"getGalleryElements",value:function(e,t){return e.filter((function(e){return e.gallery==t}))}},{key:"getSelector",value:function(){return!this.settings.elements&&(this.settings.selector&&"data-"==this.settings.selector.substring(0,5)?"*[".concat(this.settings.selector,"]"):this.settings.selector)}},{key:"getActiveSlide",value:function(){return this.slidesContainer.querySelectorAll(".gslide")[this.index]}},{key:"getActiveSlideIndex",value:function(){return this.index}},{key:"getAnimationClasses",value:function(){var e=[];for(var t in this.settings.cssEfects)if(this.settings.cssEfects.hasOwnProperty(t)){var i=this.settings.cssEfects[t];e.push("g".concat(i.in)),e.push("g".concat(i.out))}return e.join(" ")}},{key:"build",value:function(){var e=this;if(this.built)return!1;var t=document.body.childNodes,i=[];o(t,(function(e){e.parentNode==document.body&&"#"!==e.nodeName.charAt(0)&&e.hasAttribute&&!e.hasAttribute("aria-hidden")&&(i.push(e),e.setAttribute("aria-hidden","true"))}));var n=O(this.settings.svg,"next")?this.settings.svg.next:"",s=O(this.settings.svg,"prev")?this.settings.svg.prev:"",l=O(this.settings.svg,"close")?this.settings.svg.close:"",r=this.settings.lightboxHTML;r=m(r=(r=(r=r.replace(/{nextSVG}/g,n)).replace(/{prevSVG}/g,s)).replace(/{closeSVG}/g,l)),document.body.appendChild(r);var d=document.getElementById("glightbox-body");this.modal=d;var g=d.querySelector(".gclose");this.prevButton=d.querySelector(".gprev"),this.nextButton=d.querySelector(".gnext"),this.overlay=d.querySelector(".goverlay"),this.loader=d.querySelector(".gloader"),this.slidesContainer=document.getElementById("glightbox-slider"),this.bodyHiddenChildElms=i,this.events={},h(this.modal,"glightbox-"+this.settings.skin),this.settings.closeButton&&g&&(this.events.close=a("click",{onElement:g,withCallback:function(t,i){t.preventDefault(),e.close()}})),g&&!this.settings.closeButton&&g.parentNode.removeChild(g),this.nextButton&&(this.events.next=a("click",{onElement:this.nextButton,withCallback:function(t,i){t.preventDefault(),e.nextSlide()}})),this.prevButton&&(this.events.prev=a("click",{onElement:this.prevButton,withCallback:function(t,i){t.preventDefault(),e.prevSlide()}})),this.settings.closeOnOutsideClick&&(this.events.outClose=a("click",{onElement:d,withCallback:function(t,i){e.preventOutsideClick||c(document.body,"glightbox-mobile")||u(t.target,".ginner-container")||u(t.target,".gbtn")||c(t.target,"gnext")||c(t.target,"gprev")||e.close()}})),o(this.elements,(function(t,i){e.slidesContainer.appendChild(t.instance.create()),t.slideNode=e.slidesContainer.querySelectorAll(".gslide")[i]})),K&&h(document.body,"glightbox-touch"),this.events.resize=a("resize",{onElement:window,withCallback:function(){e.resize()}}),this.built=!0}},{key:"resize",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:null;if((e=e||this.activeSlide)&&!c(e,"zoomed")){var t=y(),i=e.querySelector(".gvideo-wrapper"),n=e.querySelector(".gslide-image"),s=this.slideDescription,l=t.width,o=t.height;if(l<=768?h(document.body,"glightbox-mobile"):d(document.body,"glightbox-mobile"),i||n){var r=!1;if(s&&(c(s,"description-bottom")||c(s,"description-top"))&&!c(s,"gabsolute")&&(r=!0),n)if(l<=768)n.querySelector("img");else if(r){var a=s.offsetHeight,u=n.querySelector("img");u.setAttribute("style","max-height: calc(100vh - ".concat(a,"px)")),s.setAttribute("style","max-width: ".concat(u.offsetWidth,"px;"))}if(i){var g=O(this.settings.plyr.config,"ratio")?this.settings.plyr.config.ratio:"";if(!g){var v=i.clientWidth,f=i.clientHeight,p=v/f;g="".concat(v/p,":").concat(f/p)}var m=g.split(":"),x=this.settings.videosWidth,b=this.settings.videosWidth,S=(b=M(x)||-1!==x.indexOf("px")?parseInt(x):-1!==x.indexOf("vw")?l*parseInt(x)/100:-1!==x.indexOf("vh")?o*parseInt(x)/100:-1!==x.indexOf("%")?l*parseInt(x)/100:parseInt(i.clientWidth))/(parseInt(m[0])/parseInt(m[1]));if(S=Math.floor(S),r&&(o-=s.offsetHeight),b>l||S>o||ob){var w=i.offsetWidth,T=i.offsetHeight,C=o/T,k={width:w*C,height:T*C};i.parentNode.setAttribute("style","max-width: ".concat(k.width,"px")),r&&s.setAttribute("style","max-width: ".concat(k.width,"px;"))}else i.parentNode.style.maxWidth="".concat(x),r&&s.setAttribute("style","max-width: ".concat(x,";"))}}}}},{key:"reload",value:function(){this.init()}},{key:"updateNavigationClasses",value:function(){var e=this.loop();d(this.nextButton,"disabled"),d(this.prevButton,"disabled"),0==this.index&&this.elements.length-1==0?(h(this.prevButton,"disabled"),h(this.nextButton,"disabled")):0!==this.index||e?this.index!==this.elements.length-1||e||h(this.nextButton,"disabled"):h(this.prevButton,"disabled")}},{key:"loop",value:function(){var e=O(this.settings,"loopAtEnd")?this.settings.loopAtEnd:null;return e=O(this.settings,"loop")?this.settings.loop:e,e}},{key:"close",value:function(){var e=this;if(!this.lightboxOpen){if(this.events){for(var t in this.events)this.events.hasOwnProperty(t)&&this.events[t].destroy();this.events=null}return!1}if(this.closing)return!1;this.closing=!0,this.slidePlayerPause(this.activeSlide),this.fullElementsList&&(this.elements=this.fullElementsList),this.bodyHiddenChildElms.length&&o(this.bodyHiddenChildElms,(function(e){e.removeAttribute("aria-hidden")})),h(this.modal,"glightbox-closing"),g(this.overlay,"none"==this.settings.openEffect?"none":this.settings.cssEfects.fade.out),g(this.activeSlide,this.settings.cssEfects[this.settings.closeEffect].out,(function(){if(e.activeSlide=null,e.prevActiveSlideIndex=null,e.prevActiveSlide=null,e.built=!1,e.events){for(var t in e.events)e.events.hasOwnProperty(t)&&e.events[t].destroy();e.events=null}var i=document.body;d(Q,"glightbox-open"),d(i,"glightbox-open touching gdesc-open glightbox-touch glightbox-mobile gscrollbar-fixer"),e.modal.parentNode.removeChild(e.modal),e.trigger("close"),T(e.settings.onClose)&&e.settings.onClose();var n=document.querySelector(".gcss-styles");n&&n.parentNode.removeChild(n),e.lightboxOpen=!1,e.closing=null}))}},{key:"destroy",value:function(){this.close(),this.clearAllEvents(),this.baseEvents&&this.baseEvents.destroy()}},{key:"on",value:function(e,t){var i=arguments.length>2&&void 0!==arguments[2]&&arguments[2];if(!e||!T(t))throw new TypeError("Event name and callback must be defined");this.apiEvents.push({evt:e,once:i,callback:t})}},{key:"once",value:function(e,t){this.on(e,t,!0)}},{key:"trigger",value:function(e){var t=this,i=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null,n=[];o(this.apiEvents,(function(t,s){var l=t.evt,o=t.once,r=t.callback;l==e&&(r(i),o&&n.push(s))})),n.length&&o(n,(function(e){return t.apiEvents.splice(e,1)}))}},{key:"clearAllEvents",value:function(){this.apiEvents.splice(0,this.apiEvents.length)}},{key:"version",value:function(){return"3.1.1"}}]),e}();return function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=new te(e);return t.init(),t}})); diff --git a/assets/plot.png b/assets/plot.png new file mode 100644 index 0000000..1661f7e Binary files /dev/null and b/assets/plot.png differ diff --git a/assets/stylesheets/glightbox.min.css b/assets/stylesheets/glightbox.min.css new file mode 100644 index 0000000..3c9ff87 --- /dev/null +++ b/assets/stylesheets/glightbox.min.css @@ -0,0 +1 @@ +.glightbox-container{width:100%;height:100%;position:fixed;top:0;left:0;z-index:999999!important;overflow:hidden;-ms-touch-action:none;touch-action:none;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;-ms-text-size-adjust:100%;text-size-adjust:100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;outline:0}.glightbox-container.inactive{display:none}.glightbox-container .gcontainer{position:relative;width:100%;height:100%;z-index:9999;overflow:hidden}.glightbox-container .gslider{-webkit-transition:-webkit-transform .4s ease;transition:-webkit-transform .4s ease;transition:transform .4s ease;transition:transform .4s ease,-webkit-transform .4s ease;height:100%;left:0;top:0;width:100%;position:relative;overflow:hidden;display:-webkit-box!important;display:-ms-flexbox!important;display:flex!important;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.glightbox-container .gslide{width:100%;position:absolute;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;opacity:0}.glightbox-container .gslide.current{opacity:1;z-index:99999;position:relative}.glightbox-container .gslide.prev{opacity:1;z-index:9999}.glightbox-container .gslide-inner-content{width:100%}.glightbox-container .ginner-container{position:relative;width:100%;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;max-width:100%;margin:auto;height:100vh}.glightbox-container .ginner-container.gvideo-container{width:100%}.glightbox-container .ginner-container.desc-bottom,.glightbox-container .ginner-container.desc-top{-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.glightbox-container .ginner-container.desc-left,.glightbox-container .ginner-container.desc-right{max-width:100%!important}.gslide iframe,.gslide video{outline:0!important;border:none;min-height:165px;-webkit-overflow-scrolling:touch;-ms-touch-action:auto;touch-action:auto}.gslide:not(.current){pointer-events:none}.gslide-image{-webkit-box-align:center;-ms-flex-align:center;align-items:center}.gslide-image img{max-height:100vh;display:block;padding:0;float:none;outline:0;border:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;max-width:100vw;width:auto;height:auto;-o-object-fit:cover;object-fit:cover;-ms-touch-action:none;touch-action:none;margin:auto;min-width:200px}.desc-bottom .gslide-image img,.desc-top .gslide-image img{width:auto}.desc-left .gslide-image img,.desc-right .gslide-image img{width:auto;max-width:100%}.gslide-image img.zoomable{position:relative}.gslide-image img.dragging{cursor:-webkit-grabbing!important;cursor:grabbing!important;-webkit-transition:none;transition:none}.gslide-video{position:relative;max-width:100vh;width:100%!important}.gslide-video .plyr__poster-enabled.plyr--loading .plyr__poster{display:none}.gslide-video .gvideo-wrapper{width:100%;margin:auto}.gslide-video::before{content:'';position:absolute;width:100%;height:100%;background:rgba(255,0,0,.34);display:none}.gslide-video.playing::before{display:none}.gslide-video.fullscreen{max-width:100%!important;min-width:100%;height:75vh}.gslide-video.fullscreen video{max-width:100%!important;width:100%!important}.gslide-inline{background:#fff;text-align:left;max-height:calc(100vh - 40px);overflow:auto;max-width:100%;margin:auto}.gslide-inline .ginlined-content{padding:20px;width:100%}.gslide-inline .dragging{cursor:-webkit-grabbing!important;cursor:grabbing!important;-webkit-transition:none;transition:none}.ginlined-content{overflow:auto;display:block!important;opacity:1}.gslide-external{display:-webkit-box;display:-ms-flexbox;display:flex;width:100%;min-width:100%;background:#fff;padding:0;overflow:auto;max-height:75vh;height:100%}.gslide-media{display:-webkit-box;display:-ms-flexbox;display:flex;width:auto}.zoomed .gslide-media{-webkit-box-shadow:none!important;box-shadow:none!important}.desc-bottom .gslide-media,.desc-top .gslide-media{margin:0 auto;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.gslide-description{position:relative;-webkit-box-flex:1;-ms-flex:1 0 100%;flex:1 0 100%}.gslide-description.description-left,.gslide-description.description-right{max-width:100%}.gslide-description.description-bottom,.gslide-description.description-top{margin:0 auto;width:100%}.gslide-description p{margin-bottom:12px}.gslide-description p:last-child{margin-bottom:0}.zoomed .gslide-description{display:none}.glightbox-button-hidden{display:none}.glightbox-mobile .glightbox-container .gslide-description{height:auto!important;width:100%;position:absolute;bottom:0;padding:19px 11px;max-width:100vw!important;-webkit-box-ordinal-group:3!important;-ms-flex-order:2!important;order:2!important;max-height:78vh;overflow:auto!important;background:-webkit-gradient(linear,left top,left bottom,from(rgba(0,0,0,0)),to(rgba(0,0,0,.75)));background:linear-gradient(to bottom,rgba(0,0,0,0) 0,rgba(0,0,0,.75) 100%);-webkit-transition:opacity .3s linear;transition:opacity .3s linear;padding-bottom:50px}.glightbox-mobile .glightbox-container .gslide-title{color:#fff;font-size:1em}.glightbox-mobile .glightbox-container .gslide-desc{color:#a1a1a1}.glightbox-mobile .glightbox-container .gslide-desc a{color:#fff;font-weight:700}.glightbox-mobile .glightbox-container .gslide-desc *{color:inherit}.glightbox-mobile .glightbox-container .gslide-desc .desc-more{color:#fff;opacity:.4}.gdesc-open .gslide-media{-webkit-transition:opacity .5s ease;transition:opacity .5s ease;opacity:.4}.gdesc-open .gdesc-inner{padding-bottom:30px}.gdesc-closed .gslide-media{-webkit-transition:opacity .5s ease;transition:opacity .5s ease;opacity:1}.greset{-webkit-transition:all .3s ease;transition:all .3s ease}.gabsolute{position:absolute}.grelative{position:relative}.glightbox-desc{display:none!important}.glightbox-open{overflow:hidden}.gloader{height:25px;width:25px;-webkit-animation:lightboxLoader .8s infinite linear;animation:lightboxLoader .8s infinite linear;border:2px solid #fff;border-right-color:transparent;border-radius:50%;position:absolute;display:block;z-index:9999;left:0;right:0;margin:0 auto;top:47%}.goverlay{width:100%;height:calc(100vh + 1px);position:fixed;top:-1px;left:0;background:#000;will-change:opacity}.glightbox-mobile .goverlay{background:#000}.gclose,.gnext,.gprev{z-index:99999;cursor:pointer;width:26px;height:44px;border:none;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.gclose svg,.gnext svg,.gprev svg{display:block;width:25px;height:auto;margin:0;padding:0}.gclose.disabled,.gnext.disabled,.gprev.disabled{opacity:.1}.gclose .garrow,.gnext .garrow,.gprev .garrow{stroke:#fff}.gbtn.focused{outline:2px solid #0f3d81}iframe.wait-autoplay{opacity:0}.glightbox-closing .gclose,.glightbox-closing .gnext,.glightbox-closing .gprev{opacity:0!important}.glightbox-clean .gslide-description{background:#fff}.glightbox-clean .gdesc-inner{padding:22px 20px}.glightbox-clean .gslide-title{font-size:1em;font-weight:400;font-family:arial;color:#000;margin-bottom:19px;line-height:1.4em}.glightbox-clean .gslide-desc{font-size:.86em;margin-bottom:0;font-family:arial;line-height:1.4em}.glightbox-clean .gslide-video{background:#000}.glightbox-clean .gclose,.glightbox-clean .gnext,.glightbox-clean .gprev{background-color:rgba(0,0,0,.75);border-radius:4px}.glightbox-clean .gclose path,.glightbox-clean .gnext path,.glightbox-clean .gprev path{fill:#fff}.glightbox-clean .gprev{position:absolute;top:-100%;left:30px;width:40px;height:50px}.glightbox-clean .gnext{position:absolute;top:-100%;right:30px;width:40px;height:50px}.glightbox-clean .gclose{width:35px;height:35px;top:15px;right:10px;position:absolute}.glightbox-clean .gclose svg{width:18px;height:auto}.glightbox-clean .gclose:hover{opacity:1}.gfadeIn{-webkit-animation:gfadeIn .5s ease;animation:gfadeIn .5s ease}.gfadeOut{-webkit-animation:gfadeOut .5s ease;animation:gfadeOut .5s ease}.gslideOutLeft{-webkit-animation:gslideOutLeft .3s ease;animation:gslideOutLeft .3s ease}.gslideInLeft{-webkit-animation:gslideInLeft .3s ease;animation:gslideInLeft .3s ease}.gslideOutRight{-webkit-animation:gslideOutRight .3s ease;animation:gslideOutRight .3s ease}.gslideInRight{-webkit-animation:gslideInRight .3s ease;animation:gslideInRight .3s ease}.gzoomIn{-webkit-animation:gzoomIn .5s ease;animation:gzoomIn .5s ease}.gzoomOut{-webkit-animation:gzoomOut .5s ease;animation:gzoomOut .5s ease}@-webkit-keyframes lightboxLoader{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@keyframes lightboxLoader{0%{-webkit-transform:rotate(0);transform:rotate(0)}100%{-webkit-transform:rotate(360deg);transform:rotate(360deg)}}@-webkit-keyframes gfadeIn{from{opacity:0}to{opacity:1}}@keyframes gfadeIn{from{opacity:0}to{opacity:1}}@-webkit-keyframes gfadeOut{from{opacity:1}to{opacity:0}}@keyframes gfadeOut{from{opacity:1}to{opacity:0}}@-webkit-keyframes gslideInLeft{from{opacity:0;-webkit-transform:translate3d(-60%,0,0);transform:translate3d(-60%,0,0)}to{visibility:visible;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);opacity:1}}@keyframes gslideInLeft{from{opacity:0;-webkit-transform:translate3d(-60%,0,0);transform:translate3d(-60%,0,0)}to{visibility:visible;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);opacity:1}}@-webkit-keyframes gslideOutLeft{from{opacity:1;visibility:visible;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}to{-webkit-transform:translate3d(-60%,0,0);transform:translate3d(-60%,0,0);opacity:0;visibility:hidden}}@keyframes gslideOutLeft{from{opacity:1;visibility:visible;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}to{-webkit-transform:translate3d(-60%,0,0);transform:translate3d(-60%,0,0);opacity:0;visibility:hidden}}@-webkit-keyframes gslideInRight{from{opacity:0;visibility:visible;-webkit-transform:translate3d(60%,0,0);transform:translate3d(60%,0,0)}to{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);opacity:1}}@keyframes gslideInRight{from{opacity:0;visibility:visible;-webkit-transform:translate3d(60%,0,0);transform:translate3d(60%,0,0)}to{-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);opacity:1}}@-webkit-keyframes gslideOutRight{from{opacity:1;visibility:visible;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}to{-webkit-transform:translate3d(60%,0,0);transform:translate3d(60%,0,0);opacity:0}}@keyframes gslideOutRight{from{opacity:1;visibility:visible;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}to{-webkit-transform:translate3d(60%,0,0);transform:translate3d(60%,0,0);opacity:0}}@-webkit-keyframes gzoomIn{from{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}to{opacity:1}}@keyframes gzoomIn{from{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}to{opacity:1}}@-webkit-keyframes gzoomOut{from{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}to{opacity:0}}@keyframes gzoomOut{from{opacity:1}50%{opacity:0;-webkit-transform:scale3d(.3,.3,.3);transform:scale3d(.3,.3,.3)}to{opacity:0}}@media (min-width:769px){.glightbox-container .ginner-container{width:auto;height:auto;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.glightbox-container .ginner-container.desc-top .gslide-description{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.glightbox-container .ginner-container.desc-top .gslide-image,.glightbox-container .ginner-container.desc-top .gslide-image img{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.glightbox-container .ginner-container.desc-left .gslide-description{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.glightbox-container .ginner-container.desc-left .gslide-image{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.gslide-image img{max-height:97vh;max-width:100%}.gslide-image img.zoomable{cursor:-webkit-zoom-in;cursor:zoom-in}.zoomed .gslide-image img.zoomable{cursor:-webkit-grab;cursor:grab}.gslide-inline{max-height:95vh}.gslide-external{max-height:100vh}.gslide-description.description-left,.gslide-description.description-right{max-width:275px}.glightbox-open{height:auto}.goverlay{background:rgba(0,0,0,.92)}.glightbox-clean .gslide-media{-webkit-box-shadow:1px 2px 9px 0 rgba(0,0,0,.65);box-shadow:1px 2px 9px 0 rgba(0,0,0,.65)}.glightbox-clean .description-left .gdesc-inner,.glightbox-clean .description-right .gdesc-inner{position:absolute;height:100%;overflow-y:auto}.glightbox-clean .gclose,.glightbox-clean .gnext,.glightbox-clean .gprev{background-color:rgba(0,0,0,.32)}.glightbox-clean .gclose:hover,.glightbox-clean .gnext:hover,.glightbox-clean .gprev:hover{background-color:rgba(0,0,0,.7)}.glightbox-clean .gprev{top:45%}.glightbox-clean .gnext{top:45%}}@media (min-width:992px){.glightbox-clean .gclose{opacity:.7;right:20px}}@media screen and (max-height:420px){.goverlay{background:#000}} \ No newline at end of file diff --git a/assingment/index.html b/assingment/index.html index c48d311..5a73139 100644 --- a/assingment/index.html +++ b/assingment/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Assingment

- +

Wow, such empty😮

@@ -1058,5 +1069,5 @@

Assingment

- + \ No newline at end of file diff --git a/contributing/index.html b/contributing/index.html index c782f96..8dc1369 100644 --- a/contributing/index.html +++ b/contributing/index.html @@ -55,7 +55,18 @@ - + @@ -76,6 +87,11 @@
@@ -751,6 +767,19 @@ + + + + @@ -762,6 +791,39 @@ + + + + @@ -951,6 +1013,30 @@ + + + + +
@@ -968,9 +1054,52 @@ -

Contributing

- -

This module is currently work in progress. Please come back later.

+

Contributing

+

We are thrilled to have you join in making this project even better. Please feel free to browse through the resources and guidelines provided here, and let us know if there is anything specific you would like to contribute or discuss.

+

If you would like to help to develop this package you can skim through the to-do list below as well as the contribution guidelines. Just fork the project, add your code and send a pull request. We are always happy to get some help 👍 !

+

If you encountered an issue using the chirpdetector, feel free to open an issue here.

+

Contributors guidelines

+

I try our best to adhere to good coding practices and catch up on writing tests +for this package. As I am currently the only one working on it, here is some +documentation of the development packages I use:

+
    +
  • pre-commit for pre-commit hooks
  • +
  • pytest and pytest-coverage for unit tests
  • +
  • ruff for linting and formatting
  • +
  • pyright for static type checking
  • +
+

Before every commit, a pre-commit hook runs all these packages on the code base +and refuses a push if errors are raised. If you want to contribute, please +make sure that your code is proberly formatted and run the tests before issuing +a pull request. The formatting guidelines should be automatically picked up by your +ruff installaton from the pyproject.toml file.

+

To Do

+

After the first release, this section will be removed an tasks will be organized +as github issues. Until them, if you fixed something, please check it off on this +list before opening a pull request.

+
    +
  • Refactor train, detect, convert. All into much smaller functions. Move accesory functions to utils
  • +
  • Move hardcoded params from assignment algo into config.toml
  • +
  • Resolve all pylint and mypy errors and warnings.. and ruff warnings ... etc
  • +
  • Fix make test, fails after ruff run
  • +
  • Build github actions CI/CD pipeline for codecov etc.
  • +
  • Move the dataconverter from gridtools to chirpdetector
  • +
  • Extend the dataconverter to just output the spectrograms so that hand-labelling can be done in a separate step
  • +
  • Add a main script so that the cli is chirpdetector <task> --<flag> <args>
  • +
  • Improve simulation of chirps to include more realistic noise, undershoot and maybe even phasic-tonic evolution of the frequency of the big chirps
  • +
  • make the copyconfig script more
  • +
  • start writing the chirp assignment algorithm
  • +
  • Move all the pprinting and logging constructors to a separate module and build a unified console object so that saving logs to file is easier, also log to file as well
  • +
  • Split the messy training loop into functions
  • +
  • Add label-studio
  • +
  • Supply scripts to convert completely unannotated or partially annotated data to the label-studio format to make manual labeling easier
  • +
  • Make possible to output detections as a yolo dataset
  • +
  • Look up how to convert a yolo dataset to a label-studio input so we can label pre-annotated data, facilitating a full human-in-the-loop approach
  • +
  • Add augmentation transforms to the dataset class and add augmentations to the simulation in gridtools. Note to this: Unnessecary, using real data.
  • +
  • Change bbox to actual yolo format, not the weird one I made up (which is x1, y1, x2, y2 instead of x1, y1, w, h). This is why the label-studio export is not working.
  • +
  • Port cli to click, works better
  • +
  • Try clustering the detected chirp windows on a spectrogram, could be interesting
  • +
@@ -1058,5 +1187,5 @@

Contributing

- + \ No newline at end of file diff --git a/dataset/index.html b/dataset/index.html index 2a2fd8f..9bf05cc 100644 --- a/dataset/index.html +++ b/dataset/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Creating a dataset

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Creating a dataset

- + \ No newline at end of file diff --git a/demo/index.html b/demo/index.html index 9140ddc..01ca597 100644 --- a/demo/index.html +++ b/demo/index.html @@ -55,7 +55,18 @@ - + @@ -76,6 +87,11 @@
@@ -456,6 +472,8 @@ + + @@ -951,6 +969,8 @@ + +
@@ -968,9 +988,28 @@ -

Demo

- - +

Detecting chirps with a few terminal commands

+

Once everything is set up correctly, detecting chirps is a breeze. +The terminal utility can be called by chirpdetector or simply cpd.

+

Simply run +

cpd detect --path "/path/to/dataset"
+
+And the bounding boxes will be computed and saved to a .csv file. Then run +
cpd assign --path "/path/to/dataset"
+
+to assing each detected chirp to a fundamental frequency of a fish. The results +will be added to the .csv file in the dataset. To check if this went well, +you can run +
cpd plot --path "/path/to/dataset"
+
+And the spectrograms, bounding boxes, and assigned chirps of all the detected +chirps will be plotted and saved as .png images into a subfolder of your +dataset.

+

The result will look something like this:

+
+

Image title +

15 seconds of a recording containing two chirping fish with bounding boxes around chirps and dots indicating to which frequency they are assigned to.

+
@@ -1058,5 +1097,5 @@

Demo

- + \ No newline at end of file diff --git a/detection/index.html b/detection/index.html index 624bd17..f3dffa3 100644 --- a/detection/index.html +++ b/detection/index.html @@ -55,7 +55,18 @@ - + @@ -76,11 +87,6 @@
@@ -561,19 +567,6 @@ - - - - @@ -585,32 +578,6 @@ - - - - @@ -995,23 +962,6 @@ - - - - -
@@ -1029,11 +979,9 @@ -

This module is work in progress. Please come back later.

-

This line is just to test if the github actions bot does what he is supposed to.

-

detect_chirps

-

float_index_interpolation

- +

Detection

+ +

Wow, such empty 😮

@@ -1121,5 +1069,5 @@

float_index_interpolation

- + \ No newline at end of file diff --git a/how_it_works/index.html b/how_it_works/index.html index c79b6ff..d787f9c 100644 --- a/how_it_works/index.html +++ b/how_it_works/index.html @@ -55,7 +55,18 @@ - + @@ -968,9 +979,13 @@ -

How it works

- +

How? 🤔

+

Chirps manifest as excursions in the electric organ discharge frequency. To discern the individual chirps in a recording featuring multiple fish separated solely by frequency, we delve into the frequency domain. This involves the computation of spectrograms, ensuring ample temporal resolution for chirp distinction and sufficient frequency resolution for fish differentiation. The outcome is a series of images.

+

This framework facilitates the application of potent computer vision algorithms, such as a faster-R-CNN, for the detection of objects like chirps within these 'images.' Each chirp detection yields a bounding box, a motif echoed in the package's logo.

+

Post-processing steps refine the results, assigning chirp times to the fundamental frequencies of each fish captured in the recording.

+


+Still not sold? Check out the demo »

@@ -1058,5 +1073,5 @@

How it works

- + \ No newline at end of file diff --git a/index.html b/index.html index 1850aa8..780edda 100644 --- a/index.html +++ b/index.html @@ -53,7 +53,18 @@ - + @@ -74,11 +85,6 @@
@@ -413,17 +419,6 @@ - - @@ -434,64 +429,6 @@ - - - - @@ -1024,55 +961,6 @@ - - -
@@ -1089,80 +977,22 @@ - -
- - Logo - +

+ +

Chirpdetector

+

+

+ Detect communication signals of electric fish using deep neural networks 🐟⚡🧠
+ This project is still work in progress and will approximately be released in spring of 2024. +

- - + -

- Detect brief communication signals of wave-type weakly electric fish using deep neural networks. -
- Explore the docs » -
-
- View Demo - | - Report Bug - | - Request Feature -

-
+
+

Why? 🤨

-

Codecov

-

This project is still work in progress and will approximately be released in spring of 2024.

-

The Problem

-

Chirps are by far the most thoroughly studied communication signal of weakly electric, if not all fish. But as soon as the electric fields of more than one fish is recorded on the same electrode, detecting chirps becomes so hard, that most of the research to date analyzes this signal in isolated individuals. This is not particularly advantageous if the focus lies on the communication aspect of this signal.

-

The Solution

-

To tackle this isse, this package provides an interface to detect chirps of multiple fish on spectrogram images. This enables the quantitative analysis of chirping between freely interacting fish for the first time.

-

Development

-

I try my best to adhere to good coding practices and catch up on writing tests -for this package. As I am currently the only one working on it, here is some -documentation of the development packages I use:

-
    -
  • pre-commit for pre-commit hooks
  • -
  • pytest and pytest-coverage for unit tests
  • -
  • ruff for linting and formatting
  • -
  • black for linting and formatting
  • -
  • isort to sort imports
  • -
  • pyright for static type checking
  • -
-

Before every commit, a pre-commit hook runs all these packages on the code base -and refuses a push if errors are raised. If you want to contribute, please -make sure that your code is proberly formatted and run the tests before issuing -a pull request.

-

TODO

-

Urgent!!!

-
    -
  • Refactor train, detect, convert. All into much smaller functions. Move accesory functions to utils
  • -
  • Move hardcoded params from assignment algo into config.toml
  • -
  • Resolve all pylint and mypy errors and warnings.. and ruff warnings ... etc
  • -
  • Fix make test, fails after ruff run
  • -
  • -

    Build github actions CI/CD pipeline for codecov etc.

    -
  • -
  • -

    Move the dataconverter from gridtools to chirpdetector

    -
  • -
  • Extend the dataconverter to just output the spectrograms so that hand-labelling can be done in a separate step
  • -
  • Add a main script so that the cli is chirpdetector <task> --<flag> <args>
  • -
  • Improve simulation of chirps to include more realistic noise, undershoot and maybe even phasic-tonic evolution of the frequency of the big chirps
  • -
  • make the copyconfig script more
  • -
  • start writing the chirp assignment algorithm
  • -
  • Move all the pprinting and logging constructors to a separate module and build a unified console object so that saving logs to file is easier, also log to file as well
  • -
  • Split the messy training loop into functions
  • -
  • Add label-studio
  • -
  • Supply scripts to convert completely unannotated or partially annotated data to the label-studio format to make manual labeling easier
  • -
  • Make possible to output detections as a yolo dataset
  • -
  • Look up how to convert a yolo dataset to a label-studio input so we can label pre-annotated data, facilitating a full human-in-the-loop approach
  • -
  • Add augmentation transforms to the dataset class and add augmentations to the simulation in gridtools. Note to this: Unnessecary, using real data.
  • -
  • Change bbox to actual yolo format, not the weird one I made up (which is x1, y1, x2, y2 instead of x1, y1, w, h). This is why the label-studio export is not working.
  • -
  • Port cli to click, works better
  • -
  • Try clustering the detected chirp windows on a spectrogram, could be interesting
  • -
+

Chirps are by far the most thoroughly researched communication signal of electric, probably even all fish. But detecting chirps becomes hard when more than one fish is recorded. As a result, most of the research to date analyzes this signal in isolated individuals. This is not good.

+

To tackle this isse, this package provides a simple toolbox to detect chirps of multiple fish on spectrograms. This enables true quantitative analyses of chirping between freely behaving fish for the first time.

@@ -1234,5 +1064,5 @@

Urgent!!!

- + \ No newline at end of file diff --git a/installation/index.html b/installation/index.html index 9c4a05f..62b45b4 100644 --- a/installation/index.html +++ b/installation/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Installation

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Installation

- + \ No newline at end of file diff --git a/labeling/index.html b/labeling/index.html index f9c41fa..fc5dd54 100644 --- a/labeling/index.html +++ b/labeling/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Labeling a dataset

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Labeling a dataset

- + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json index 5c0f549..daaab21 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

Detect brief communication signals of wave-type weakly electric fish using deep neural networks. Explore the docs \u00bb View Demo | Report Bug | Request Feature

This project is still work in progress and will approximately be released in spring of 2024.

"},{"location":"#the-problem","title":"The Problem","text":"

Chirps are by far the most thoroughly studied communication signal of weakly electric, if not all fish. But as soon as the electric fields of more than one fish is recorded on the same electrode, detecting chirps becomes so hard, that most of the research to date analyzes this signal in isolated individuals. This is not particularly advantageous if the focus lies on the communication aspect of this signal.

"},{"location":"#the-solution","title":"The Solution","text":"

To tackle this isse, this package provides an interface to detect chirps of multiple fish on spectrogram images. This enables the quantitative analysis of chirping between freely interacting fish for the first time.

"},{"location":"#development","title":"Development","text":"

I try my best to adhere to good coding practices and catch up on writing tests for this package. As I am currently the only one working on it, here is some documentation of the development packages I use:

  • pre-commit for pre-commit hooks
  • pytest and pytest-coverage for unit tests
  • ruff for linting and formatting
  • black for linting and formatting
  • isort to sort imports
  • pyright for static type checking

Before every commit, a pre-commit hook runs all these packages on the code base and refuses a push if errors are raised. If you want to contribute, please make sure that your code is proberly formatted and run the tests before issuing a pull request.

"},{"location":"#todo","title":"TODO","text":""},{"location":"#urgent","title":"Urgent!!!","text":"
  • Refactor train, detect, convert. All into much smaller functions. Move accesory functions to utils
  • Move hardcoded params from assignment algo into config.toml
  • Resolve all pylint and mypy errors and warnings.. and ruff warnings ... etc
  • Fix make test, fails after ruff run
  • Build github actions CI/CD pipeline for codecov etc.

  • Move the dataconverter from gridtools to chirpdetector

  • Extend the dataconverter to just output the spectrograms so that hand-labelling can be done in a separate step
  • Add a main script so that the cli is chirpdetector <task> --<flag> <args>
  • Improve simulation of chirps to include more realistic noise, undershoot and maybe even phasic-tonic evolution of the frequency of the big chirps
  • make the copyconfig script more
  • start writing the chirp assignment algorithm
  • Move all the pprinting and logging constructors to a separate module and build a unified console object so that saving logs to file is easier, also log to file as well
  • Split the messy training loop into functions
  • Add label-studio
  • Supply scripts to convert completely unannotated or partially annotated data to the label-studio format to make manual labeling easier
  • Make possible to output detections as a yolo dataset
  • Look up how to convert a yolo dataset to a label-studio input so we can label pre-annotated data, facilitating a full human-in-the-loop approach
  • Add augmentation transforms to the dataset class and add augmentations to the simulation in gridtools. Note to this: Unnessecary, using real data.
  • Change bbox to actual yolo format, not the weird one I made up (which is x1, y1, x2, y2 instead of x1, y1, w, h). This is why the label-studio export is not working.
  • Port cli to click, works better
  • Try clustering the detected chirp windows on a spectrogram, could be interesting
"},{"location":"contributing/","title":"Contributing","text":"

This module is currently work in progress. Please come back later.

"},{"location":"detection/","title":"Detection","text":"

This module is work in progress. Please come back later.

This line is just to test if the github actions bot does what he is supposed to.

"},{"location":"detection/#detect_chirps","title":"detect_chirps","text":""},{"location":"detection/#float_index_interpolation","title":"float_index_interpolation","text":""},{"location":"api/assign_chirps/","title":"assign_chirps","text":"

Assign chirps detected on a spectrogram to wavetracker tracks.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_chirps","title":"assign_chirps(assign_data, chirp_df, data)","text":"

Assign chirps to wavetracker tracks.

This function uses the extracted envelope troughs to assign chirps to tracks. It computes a cost function that is high when the trough prominence is high and the distance to the chirp center is low. For each chirp, the track with the highest cost function value is chosen.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_chirps--parameters","title":"Parameters","text":"
  • assign_data: dict Dictionary containing the data needed for assignment
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • data: gridtools.datasets.Dataset Dataset object containing the data
Source code in chirpdetector/assign_chirps.py
def assign_chirps(\n    assign_data: Dict[str, np.ndarray],\n    chirp_df: pd.DataFrame,\n    data: Dataset,\n) -> None:\n    \"\"\"Assign chirps to wavetracker tracks.\n\n    This function uses the extracted envelope troughs to assign chirps to\n    tracks. It computes a cost function that is high when the trough prominence\n    is high and the distance to the chirp center is low. For each chirp, the\n    track with the highest cost function value is chosen.\n\n    Parameters\n    ----------\n    - `assign_data`: `dict`\n        Dictionary containing the data needed for assignment\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    \"\"\"\n    # extract data from assign_data\n    peak_prominences = assign_data[\"proms\"]\n    peak_distances = assign_data[\"peaks\"]\n    peak_times = assign_data[\"ptimes\"]\n    chirp_indices = assign_data[\"cindices\"]\n    track_ids = assign_data[\"track_ids\"]\n\n    # compute cost function.\n    # this function is high when the trough prominence is high\n    # (-> chirp with high contrast)\n    # and when the trough is close to the chirp center as detected by the\n    # r-cnn (-> detected chirp is close to the actual chirp)\n    cost = peak_prominences / peak_distances**2\n\n    # set cost to zero for cases where no peak was found\n    cost[np.isnan(cost)] = 0\n\n    # for each chirp, choose the track where the cost is highest\n    # TODO: to avoid confusion make a cost function where high is good and low\n    # is bad. this is more like a \"gain function\"\n    chosen_tracks = []\n    chosen_track_times = []\n    for idx in np.unique(chirp_indices):\n        candidate_tracks = track_ids[chirp_indices == idx]\n        candidate_costs = cost[chirp_indices == idx]\n        candidate_times = peak_times[chirp_indices == idx]\n        chosen_tracks.append(candidate_tracks[np.argmax(candidate_costs)])\n        chosen_track_times.append(candidate_times[np.argmax(candidate_costs)])\n\n    # store chosen tracks in chirp_df\n    chirp_df[\"assigned_track\"] = chosen_tracks\n\n    # store chirp time estimated from envelope trough in chirp_df\n    chirp_df[\"envelope_trough_time\"] = chosen_track_times\n\n    # save chirp_df\n    chirp_df.to_csv(data.path / \"chirpdetector_bboxes.csv\", index=False)\n\n    # save old format:\n    np.save(data.path / \"chirp_ids_rcnn.npy\", chosen_tracks)\n    np.save(data.path / \"chirp_times_rcnn.npy\", chosen_track_times)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_cli","title":"assign_cli(path)","text":"

Assign chirps to wavetracker tracks.

this is the command line interface for the assign_chirps function.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_cli--parameters","title":"Parameters","text":"
  • path: pathlib.path path to the directory containing the chirpdetector.toml file
Source code in chirpdetector/assign_chirps.py
def assign_cli(path: pathlib.Path) -> None:\n    \"\"\"Assign chirps to wavetracker tracks.\n\n    this is the command line interface for the assign_chirps function.\n\n    Parameters\n    ----------\n    - `path`: `pathlib.path`\n        path to the directory containing the chirpdetector.toml file\n    \"\"\"\n    if not path.is_dir():\n        msg = f\"{path} is not a directory\"\n        raise ValueError(msg)\n\n    if not (path / \"chirpdetector.toml\").is_file():\n        msg = f\"{path} does not contain a chirpdetector.toml file\"\n        raise ValueError(msg)\n\n    logger = make_logger(__name__, path / \"chirpdetector.log\")\n    # config = load_config(path / \"chirpdetector.toml\")\n    recs = list(path.iterdir())\n    recs = [r for r in recs if r.is_dir()]\n    # recs = [path / \"subset_2020-03-18-10_34_t0_9320.0_t1_9920.0\"]\n\n    msg = f\"found {len(recs)} recordings in {path}, starting assignment\"\n    prog.console.log(msg)\n    logger.info(msg)\n\n    prog.console.rule(\"starting assignment\")\n    with prog:\n        task = prog.add_task(\"assigning chirps\", total=len(recs))\n        for rec in recs:\n            msg = f\"assigning chirps in {rec}\"\n            logger.info(msg)\n            prog.console.log(msg)\n\n            data = load(rec)\n            chirp_df = pd.read_csv(rec / \"chirpdetector_bboxes.csv\")\n            assign_data, chirp_df, data = extract_assignment_data(\n                data, chirp_df\n            )\n            assign_chirps(assign_data, chirp_df, data)\n            prog.update(task, advance=1)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.bbox_to_chirptimes","title":"bbox_to_chirptimes(chirp_df)","text":"

Convert chirp bboxes to chirp times.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.bbox_to_chirptimes--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe dataframe containing the chirp bboxes
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.bbox_to_chirptimes--returns","title":"Returns","text":"
  • chirp_df: pd.dataframe dataframe containing the chirp bboxes with chirp times.
Source code in chirpdetector/assign_chirps.py
def bbox_to_chirptimes(chirp_df: pd.DataFrame) -> pd.DataFrame:\n    \"\"\"Convert chirp bboxes to chirp times.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        dataframe containing the chirp bboxes\n\n    Returns\n    -------\n    - `chirp_df`: `pd.dataframe`\n        dataframe containing the chirp bboxes with chirp times.\n    \"\"\"\n    chirp_df[\"chirp_times\"] = np.mean(chirp_df[[\"t1\", \"t2\"]], axis=1)\n\n    return chirp_df\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.clean_bboxes","title":"clean_bboxes(data, chirp_df)","text":"

Clean the chirp bboxes.

This is a collection of filters that remove bboxes that either overlap, are out of range or otherwise do not make sense.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.clean_bboxes--parameters","title":"Parameters","text":"
  • data: gridtools.datasets.Dataset Dataset object containing the data
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.clean_bboxes--returns","title":"Returns","text":"
  • chirp_df_tf: pd.dataframe Dataframe containing the chirp bboxes that overlap with the range
Source code in chirpdetector/assign_chirps.py
def clean_bboxes(data: Dataset, chirp_df: pd.DataFrame) -> pd.DataFrame:\n    \"\"\"Clean the chirp bboxes.\n\n    This is a collection of filters that remove bboxes that\n    either overlap, are out of range or otherwise do not make sense.\n\n    Parameters\n    ----------\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n\n    Returns\n    -------\n    - `chirp_df_tf`: `pd.dataframe`\n        Dataframe containing the chirp bboxes that overlap with the range\n    \"\"\"\n    # non-max suppression: remove all chirp bboxes that overlap with\n    # another more than threshold\n    pick_indices = non_max_suppression_fast(chirp_df, 0.5)\n    chirp_df_nms = chirp_df.loc[pick_indices, :]\n\n    # track filter: remove all chirp bboxes that do not overlap with\n    # the range spanned by the min and max of the wavetracker frequency tracks\n    minf = np.min(data.track.freqs).astype(float)\n    maxf = np.max(data.track.freqs).astype(float)\n    # maybe add some more cleaning here, such\n    # as removing chirps that are too short or too long\n    return track_filter(chirp_df_nms, minf, maxf)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.cleanup","title":"cleanup(chirp_df, data)","text":"

Clean the chirp bboxes.

This is a collection of filters that remove bboxes that either overlap, are out of range or otherwise do not make sense.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.cleanup--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • data: gridtools.datasets.Dataset Dataset object containing the data
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.cleanup--returns","title":"Returns","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes that overlap with the range
Source code in chirpdetector/assign_chirps.py
def cleanup(chirp_df: pd.DataFrame, data: Dataset) -> pd.DataFrame:\n    \"\"\"Clean the chirp bboxes.\n\n    This is a collection of filters that remove bboxes that\n    either overlap, are out of range or otherwise do not make sense.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n\n    Returns\n    -------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes that overlap with the range\n    \"\"\"\n    # first clean the bboxes\n    chirp_df = clean_bboxes(data, chirp_df)\n    # sort chirps in df by time, i.e. t1\n    chirp_df = chirp_df.sort_values(by=\"t1\", ascending=True)\n    # compute chirp times, i.e. center of the bbox x axis\n    return bbox_to_chirptimes(chirp_df)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_assignment_data","title":"extract_assignment_data(data, chirp_df)","text":"

Get envelope troughs to determine chirp assignment.

This algorigthm assigns chirps to wavetracker tracks by a series of steps: 1. clean the chirp bboxes 2. for each fish track, filter the signal on the best electrode 3. find troughs in the envelope of the filtered signal 4. compute the prominence of the trough and the distance to the chirp center 5. compute a cost function that is high when the trough prominence is high and the distance to the chirp center is low 6. compare the value of the cost function for each track and choose the track with the highest cost function value

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_assignment_data--parameters","title":"Parameters","text":"
  • data: dataset Dataset object containing the data
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
Source code in chirpdetector/assign_chirps.py
def extract_assignment_data(\n    data: Dataset, chirp_df: pd.DataFrame\n) -> Tuple[Dict[str, np.ndarray], pd.DataFrame, Dataset]:\n    \"\"\"Get envelope troughs to determine chirp assignment.\n\n    This algorigthm assigns chirps to wavetracker tracks by a series of steps:\n    1. clean the chirp bboxes\n    2. for each fish track, filter the signal on the best electrode\n    3. find troughs in the envelope of the filtered signal\n    4. compute the prominence of the trough and the distance to the chirp\n    center\n    5. compute a cost function that is high when the trough prominence is high\n    and the distance to the chirp center is low\n    6. compare the value of the cost function for each track and choose the\n    track with the highest cost function value\n\n    Parameters\n    ----------\n    - `data`: `dataset`\n        Dataset object containing the data\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    \"\"\"\n    # clean the chirp bboxes\n    chirp_df = cleanup(chirp_df, data)\n\n    # now loop over all tracks and assign chirps to tracks\n    chirp_indices = []  # index of chirp in chirp_df\n    track_ids = []  # id of track / fish\n    peak_prominences = []  # prominence of trough in envelope\n    peak_distances = []  # distance of trough to chirp center\n    peak_times = []  # time of trough in envelope, should be close to chirp\n\n    for fish_id in data.track.ids:\n        # get chirps, times and freqs and powers for this track\n        chirps = np.array(chirp_df.chirp_times.values)\n        time = data.track.times[\n            data.track.indices[data.track.idents == fish_id]\n        ]\n        freq = data.track.freqs[data.track.idents == fish_id]\n        powers = data.track.powers[data.track.idents == fish_id, :]\n\n        for idx, chirp in enumerate(chirps):\n            # find the closest time, freq and power to the chirp time\n            closest_idx = np.argmin(np.abs(time - chirp))\n            best_electrode = np.argmax(powers[closest_idx, :]).astype(int)\n            second_best_electrode = np.argsort(powers[closest_idx, :])[-2]\n            best_freq = freq[closest_idx]\n\n            # check if chirp overlaps with track\n            f1 = chirp_df.f1.to_numpy()[idx]\n            f2 = chirp_df.f2.to_numpy()[idx]\n            f2 = f1 + (f2 - f1) * 0.5 # range is the lower half of the bbox\n            if (f1 > best_freq) or (f2 < best_freq):\n                peak_distances.append(np.nan)\n                peak_prominences.append(np.nan)\n                peak_times.append(np.nan)\n                chirp_indices.append(idx)\n                track_ids.append(fish_id)\n                continue\n\n            # determine start and stop index of time window on raw data\n            # using bounding box start and stop times of chirp detection\n            start_idx, stop_idx, center_idx = make_indices(\n                chirp_df, data, idx, chirp\n            )\n\n            indices = (start_idx, stop_idx, center_idx)\n            peaks, proms = extract_envelope_trough(\n                data,\n                best_electrode,\n                second_best_electrode,\n                best_freq,\n                indices,\n            )\n\n            # if no peaks are found, skip this chirp\n            if len(peaks) == 0:\n                peak_distances.append(np.nan)\n                peak_prominences.append(np.nan)\n                peak_times.append(np.nan)\n                chirp_indices.append(idx)\n                track_ids.append(fish_id)\n                continue\n\n            # compute index to closest peak to chirp center\n            distances = np.abs(peaks - (center_idx - start_idx))\n            closest_peak_idx = np.argmin(distances)\n\n            # store peak prominence and distance to chirp center\n            peak_distances.append(distances[closest_peak_idx])\n            peak_prominences.append(proms[closest_peak_idx])\n            peak_times.append(\n                (start_idx + peaks[closest_peak_idx]) / data.grid.samplerate,\n            )\n            chirp_indices.append(idx)\n            track_ids.append(fish_id)\n\n    peak_prominences = np.array(peak_prominences)\n    peak_distances = (\n        np.array(peak_distances) + 1\n    )  # add 1 to avoid division by zero\n    peak_times = np.array(peak_times)\n    chirp_indices = np.array(chirp_indices)\n    track_ids = np.array(track_ids)\n\n    assignment_data = {\n        \"proms\": peak_prominences,\n        \"peaks\": peak_distances,\n        \"ptimes\": peak_times,\n        \"cindices\": chirp_indices,\n        \"track_ids\": track_ids,\n    }\n    return (\n        assignment_data,\n        chirp_df,\n        data,\n    )\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_envelope_trough","title":"extract_envelope_trough(data, best_electrode, second_best_electrode, best_freq, indices)","text":"

Extract envelope troughs.

Extracts a snippet from the raw data around the chirp time and computes the envelope of the bandpass filtered signal. Then finds the troughs in the envelope and computes their prominences.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_envelope_trough--parameters","title":"Parameters","text":"
  • data: gridtools.datasets.Dataset Dataset object containing the data
  • best_electrode: int Index of the best electrode
  • second_best_electrode: int Index of the second best electrode
  • best_freq: float Frequency of the chirp
  • indices: Tuple[int, int, int] Tuple containing the start, center, stop indices of the chirp
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_envelope_trough--returns","title":"Returns","text":"
  • peaks: np.ndarray Indices of the envelope troughs
  • proms: np.ndarray Prominences of the envelope troughs
Source code in chirpdetector/assign_chirps.py
def extract_envelope_trough(\n    data: Dataset,\n    best_electrode: int,\n    second_best_electrode: int,\n    best_freq: float,\n    indices: Tuple[int, int, int],\n) -> Tuple[np.ndarray, np.ndarray]:\n    \"\"\"Extract envelope troughs.\n\n    Extracts a snippet from the raw data around the chirp time and computes\n    the envelope of the bandpass filtered signal. Then finds the troughs in\n    the envelope and computes their prominences.\n\n    Parameters\n    ----------\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    - `best_electrode`: `int`\n        Index of the best electrode\n    - `second_best_electrode`: `int`\n        Index of the second best electrode\n    - `best_freq`: `float`\n        Frequency of the chirp\n    - `indices`: `Tuple[int, int, int]`\n        Tuple containing the start, center, stop indices of the chirp\n\n    Returns\n    -------\n    - `peaks`: `np.ndarray`\n        Indices of the envelope troughs\n    - `proms`: `np.ndarray`\n        Prominences of the envelope troughs\n    \"\"\"\n    start_idx, stop_idx, _= indices\n\n    # determine bandpass cutoffs above and below baseline frequency\n    lower_f = best_freq - 15\n    upper_f = best_freq + 15\n\n    # get the raw signal on the 2 best electrodes and make differential\n    raw1 = data.grid.rec[start_idx:stop_idx, best_electrode]\n    raw2 = data.grid.rec[start_idx:stop_idx, second_best_electrode]\n    raw = raw1 - raw2\n\n    # bandpass filter the raw signal\n    raw_filtered = bandpass_filter(\n        raw,\n        data.grid.samplerate,\n        lower_f,\n        upper_f,\n    )\n\n    # compute the envelope of the filtered signal\n    env = envelope(\n        signal=raw_filtered,\n        samplerate=data.grid.samplerate,\n        cutoff_frequency=50,\n    )\n    peaks, proms = get_env_trough(env, raw_filtered)\n    # mpl.use(\"TkAgg\")\n    # plt.plot(env)\n    # plt.plot(raw_filtered)\n    # plt.plot(peaks, env[peaks], \"x\")\n    # plt.show()\n    return peaks, proms\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.get_env_trough","title":"get_env_trough(env, raw)","text":"

Get the envelope troughs and their prominences.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.get_env_trough--parameters","title":"Parameters","text":"
  • env: np.ndarray Envelope of the filtered signal
  • raw: np.ndarray Raw signal
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.get_env_trough--returns","title":"Returns","text":"
  • peaks: np.ndarray Indices of the envelope troughs
  • proms: np.ndarray Prominences of the envelope troughs
Source code in chirpdetector/assign_chirps.py
def get_env_trough(\n    env: np.ndarray,\n    raw: np.ndarray,\n) -> Tuple[np.ndarray, np.ndarray]:\n    \"\"\"Get the envelope troughs and their prominences.\n\n    Parameters\n    ----------\n    - `env`: `np.ndarray`\n        Envelope of the filtered signal\n    - `raw`: `np.ndarray`\n        Raw signal\n\n    Returns\n    -------\n    - `peaks`: `np.ndarray`\n        Indices of the envelope troughs\n    - `proms`: `np.ndarray`\n        Prominences of the envelope troughs\n    \"\"\"\n    # normalize the envelope using the amplitude of the raw signal\n    # to preserve the amplitude of the envelope\n    env = env / np.max(np.abs(raw))\n\n    # cut of the first and last 20% of the envelope\n    env[: int(0.25 * len(env))] = np.nan\n    env[int(0.75 * len(env)) :] = np.nan\n\n    # find troughs in the envelope and compute trough prominences\n    peaks, params = find_peaks(-env, prominence=1e-3)\n    proms = params[\"prominences\"]\n    return peaks, proms\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.make_indices","title":"make_indices(chirp_df, data, idx, chirp)","text":"

Make indices for the chirp window.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.make_indices--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • data: gridtools.datasets.Dataset Dataset object containing the data
  • idx: int Index of the chirp in the chirp_df
  • chirp: float Chirp time
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.make_indices--returns","title":"Returns","text":"
  • start_idx: int Start index of the chirp window
  • stop_idx: int Stop index of the chirp window
  • center_idx: int Center index of the chirp window
Source code in chirpdetector/assign_chirps.py
def make_indices(\n    chirp_df: pd.DataFrame, data: Dataset, idx: int, chirp: float\n) -> Tuple[int, int, int]:\n    \"\"\"Make indices for the chirp window.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    - `idx`: `int`\n        Index of the chirp in the chirp_df\n    - `chirp`: `float`\n        Chirp time\n\n    Returns\n    -------\n    - `start_idx`: `int`\n        Start index of the chirp window\n    - `stop_idx`: `int`\n        Stop index of the chirp window\n    - `center_idx`: `int`\n        Center index of the chirp window\n    \"\"\"\n    # determine start and stop index of time window on raw data\n    # using bounding box start and stop times of chirp detection\n    diffr = chirp_df.t2.to_numpy()[idx] - chirp_df.t1.to_numpy()[idx]\n    t1 = chirp_df.t1.to_numpy()[idx] - 0.5 * diffr\n    t2 = chirp_df.t2.to_numpy()[idx] + 0.5 * diffr\n\n    start_idx = int(np.round(t1 * data.grid.samplerate))\n    stop_idx = int(np.round(t2 * data.grid.samplerate))\n    center_idx = int(np.round(chirp * data.grid.samplerate))\n\n    return start_idx, stop_idx, center_idx\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.non_max_suppression_fast","title":"non_max_suppression_fast(chirp_df, overlapthresh)","text":"

Raster implementation of non-maximum suppression.

To remove overlapping bounding boxes.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.non_max_suppression_fast--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • overlapthresh: float Threshold for overlap between bboxes
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.non_max_suppression_fast--returns","title":"Returns","text":"
  • pick: list List of indices of bboxes to keep
Source code in chirpdetector/assign_chirps.py
def non_max_suppression_fast(\n    chirp_df: pd.DataFrame,\n    overlapthresh: float,\n) -> list:\n    \"\"\"Raster implementation of non-maximum suppression.\n\n    To remove overlapping bounding boxes.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `overlapthresh`: `float`\n        Threshold for overlap between bboxes\n\n    Returns\n    -------\n    - `pick`: `list`\n        List of indices of bboxes to keep\n    \"\"\"\n    # slightly modified version of\n    # https://pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n\n    # convert boxes to list of tuples and then to numpy array\n    boxes = chirp_df[[\"t1\", \"f1\", \"t2\", \"f2\"]].to_numpy()\n\n    # if there are no boxes, return an empty list\n    if len(boxes) == 0:\n        return []\n\n    # initialize the list of picked indexes\n    pick = []\n\n    # grab the coordinates of the bounding boxes\n    x1 = boxes[:, 0]\n    y1 = boxes[:, 1]\n    x2 = boxes[:, 2]\n    y2 = boxes[:, 3]\n\n    # compute the area of the bounding boxes and sort the bounding\n    # boxes by the bottom-right y-coordinate of the bounding box\n    area = (x2 - x1) * (y2 - y1)\n    idxs = np.argsort(y2)\n\n    # keep looping while some indexes still remain in the indexes\n    # list\n    while len(idxs) > 0:\n        # grab the last index in the indexes list and add the\n        # index value to the list of picked indexes\n        last = len(idxs) - 1\n        i = idxs[last]\n        pick.append(i)\n\n        # find the largest (x, y) coordinates for the start of\n        # the bounding box and the smallest (x, y) coordinates\n        # for the end of the bounding box\n        xx1 = np.maximum(x1[i], x1[idxs[:last]])\n        yy1 = np.maximum(y1[i], y1[idxs[:last]])\n        xx2 = np.minimum(x2[i], x2[idxs[:last]])\n        yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n        # compute the width and height of the bounding box\n        w = np.maximum(0, xx2 - xx1)\n        h = np.maximum(0, yy2 - yy1)\n\n        # compute the ratio of overlap (intersection over union)\n        overlap = (w * h) / area[idxs[:last]]\n\n        # delete all indexes from the index list that have\n        idxs = np.delete(\n            idxs,\n            np.concatenate(([last], np.where(overlap > overlapthresh)[0])),\n        )\n        # return the indicies of the picked boxes\n    return pick\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.track_filter","title":"track_filter(chirp_df, minf, maxf)","text":"

Remove chirp bboxes that do not overlap with tracks.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.track_filter--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • minf: float Minimum frequency of the range
  • maxf: float Maximum frequency of the range
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.track_filter--returns","title":"Returns","text":"
  • chirp_df_tf: pd.dataframe Dataframe containing the chirp bboxes that overlap with the range
Source code in chirpdetector/assign_chirps.py
def track_filter(\n    chirp_df: pd.DataFrame,\n    minf: float,\n    maxf: float,\n) -> pd.DataFrame:\n    \"\"\"Remove chirp bboxes that do not overlap with tracks.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `minf`: `float`\n        Minimum frequency of the range\n    - `maxf`: `float`\n        Maximum frequency of the range\n\n    Returns\n    -------\n    - `chirp_df_tf`: `pd.dataframe`\n        Dataframe containing the chirp bboxes that overlap with the range\n    \"\"\"\n    # remove all chirp bboxes that have no overlap with the range spanned by\n    # minf and maxf\n\n    # first build a box that spans the entire range\n    range_box = np.array([0, minf, np.max(chirp_df.t2), maxf])\n\n    # now compute the intersection between the range box and each chirp bboxes\n    # and keep only those that have an intersection area > 0\n    chirp_df_tf = chirp_df.copy()\n    intersection = chirp_df_tf.apply(\n        lambda row: (\n            max(0, min(row[\"t2\"], range_box[2]) - max(row[\"t1\"], range_box[0]))\n            * max(\n                0,\n                min(row[\"f2\"], range_box[3]) - max(row[\"f1\"], range_box[1]),\n            )\n        ),\n        axis=1,\n    )\n    return chirp_df_tf.loc[intersection > 0, :]\n
"},{"location":"api/convert_data/","title":"convert_data","text":"

Functions and classes for converting data.

"},{"location":"api/convert_data/#chirpdetector.convert_data.chirp_bounding_boxes","title":"chirp_bounding_boxes(data, nfft)","text":"

Make bounding boxes of simulated chirps using the chirp parameters.

"},{"location":"api/convert_data/#chirpdetector.convert_data.chirp_bounding_boxes--parameters","title":"Parameters","text":"
  • data : Dataset The dataset to make bounding boxes for.
  • nfft : int The number of samples in the FFT.
"},{"location":"api/convert_data/#chirpdetector.convert_data.chirp_bounding_boxes--returns","title":"Returns","text":"

pandas.DataFrame A dataframe with the bounding boxes.

Source code in chirpdetector/convert_data.py
def chirp_bounding_boxes(data: Dataset, nfft: int) -> pd.DataFrame:\n    \"\"\"Make bounding boxes of simulated chirps using the chirp parameters.\n\n    Parameters\n    ----------\n    - `data` : `Dataset`\n        The dataset to make bounding boxes for.\n    - `nfft` : int\n        The number of samples in the FFT.\n\n    Returns\n    -------\n    `pandas.DataFrame`\n        A dataframe with the bounding boxes.\n    \"\"\"\n    assert hasattr(\n        data.com.chirp,\n        \"params\",\n    ), \"Dataset must have a chirp attribute with a params attribute\"\n\n    # Time padding is one NFFT window\n    pad_time = nfft / data.grid.samplerate\n\n    # Freq padding is fixed by the frequency resolution\n    freq_res = data.grid.samplerate / nfft\n    pad_freq = freq_res * 50\n\n    boxes = []\n    ids = []\n    for fish_id in data.track.ids:\n        freqs = data.track.freqs[data.track.idents == fish_id]\n        times = data.track.times[\n            data.track.indices[data.track.idents == fish_id]\n        ]\n        chirps = data.com.chirp.times[data.com.chirp.idents == fish_id]\n        params = data.com.chirp.params[data.com.chirp.idents == fish_id]\n\n        for chirp, param in zip(chirps, params):\n            # take the two closest frequency points\n            f_closest = freqs[np.argsort(np.abs(times - chirp))[:2]]\n\n            # take the two closest time points\n            t_closest = times[np.argsort(np.abs(times - chirp))[:2]]\n\n            # compute the weighted average of the two closest frequency points\n            # using the dt between chirp time and sampled time as weights\n            f_closest = np.average(\n                f_closest,\n                weights=np.abs(t_closest - chirp),\n            )\n\n            # we now have baseline eodf and time point of the chirp. Now\n            # we get some parameters from the params to build the bounding box\n            # for the chirp\n            height = param[1]\n            width = param[2]\n\n            # now define bounding box as center coordinates, width and height\n            t_center = chirp\n            f_center = f_closest + height / 2\n\n            bbox_height = height + pad_freq\n            bbox_width = width + pad_time\n\n            boxes.append((t_center, f_center, bbox_width, bbox_height))\n            ids.append(fish_id)\n\n    dataframe = pd.DataFrame(\n        boxes,\n        columns=[\"t_center\", \"f_center\", \"width\", \"height\"],\n    )\n    dataframe[\"fish_id\"] = ids\n    return dataframe\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert","title":"convert(data, conf, output, label_mode)","text":"

Convert a gridtools dataset to a YOLO dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.convert--parameters","title":"Parameters","text":"
  • data : Dataset The dataset to convert.
  • conf : Config The configuration.
  • output : pathlib.Path The output directory.
  • label_mode : str The label mode. Can be one of 'none', 'synthetic' or 'detected'.
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert--returns","title":"Returns","text":"
  • None
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert--notes","title":"Notes","text":"

This function iterates through a raw recording in chunks and computes the sum spectrogram of each chunk. The chunk size needs to be chosen such that the images can be nicely fed to a detector. The function also computes the bounding boxes of chirps in that chunk and saves them to a dataframe and a txt file into a labels directory.

Source code in chirpdetector/convert_data.py
def convert(\n    data: Dataset,\n    conf: Config,\n    output: pathlib.Path,\n    label_mode: str,\n) -> None:\n    \"\"\"Convert a gridtools dataset to a YOLO dataset.\n\n    Parameters\n    ----------\n    - `data` : `Dataset`\n        The dataset to convert.\n    - `conf` : `Config`\n        The configuration.\n    - `output` : `pathlib.Path`\n        The output directory.\n    - `label_mode` : `str`\n        The label mode. Can be one of 'none', 'synthetic' or 'detected'.\n\n    Returns\n    -------\n    - `None`\n\n    Notes\n    -----\n    This function iterates through a raw recording in chunks and computes the\n    sum spectrogram of each chunk. The chunk size needs to be chosen such that\n    the images can be nicely fed to a detector. The function also computes\n    the bounding boxes of chirps in that chunk and saves them to a dataframe\n    and a txt file into a labels directory.\n    \"\"\"\n    assert hasattr(data, \"grid\"), \"Dataset must have a grid attribute\"\n    assert label_mode in [\n        \"none\",\n        \"synthetic\",\n        \"detected\",\n    ], \"label_mode must be one of 'none', 'synthetic' or 'detected'\"\n\n    dataroot = output\n\n    n_electrodes = data.grid.rec.shape[1]\n\n    # How much time to put into each spectrogram\n    time_window = conf.spec.time_window  # seconds\n    window_overlap = conf.spec.spec_overlap  # seconds\n    freq_pad = conf.spec.freq_pad  # Hz\n    window_overlap_samples = window_overlap * data.grid.samplerate  # samples\n\n    # Spectrogram computation parameters\n    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples\n    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples\n    chunksize = time_window * data.grid.samplerate  # samples\n    n_chunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)\n\n    rprint(\n        \"Dividing recording of duration\"\n        f\"{data.grid.rec.shape[0] / data.grid.samplerate} into {n_chunks}\"\n        f\"chunks of {time_window} seconds each.\",\n    )\n\n    bbox_dfs = []\n\n    # shift the time of the tracks to start at 0\n    # because a subset starts at the orignal time\n    # TODO: Remove this when gridtools is fixed\n    data.track.times -= data.track.times[0]\n\n    for chunk_no in range(n_chunks):\n        # get start and stop indices for the current chunk\n        # including some overlap to compensate for edge effects\n        # this diffrers for the first and last chunk\n\n        if chunk_no == 0:\n            idx1 = sint(chunk_no * chunksize)\n            idx2 = sint((chunk_no + 1) * chunksize + window_overlap_samples)\n        elif chunk_no == n_chunks - 1:\n            idx1 = sint(chunk_no * chunksize - window_overlap_samples)\n            idx2 = sint((chunk_no + 1) * chunksize)\n        else:\n            idx1 = sint(chunk_no * chunksize - window_overlap_samples)\n            idx2 = sint((chunk_no + 1) * chunksize + window_overlap_samples)\n\n        # idx1 and idx2 now determine the window I cut out of the raw signal\n        # to compute the spectrogram of.\n\n        # compute the time and frequency axes of the spectrogram now that we\n        # include the start and stop indices of the current chunk and thus the\n        # right start and stop time. The `spectrogram` function does not know\n        # about this and would start every time axis at 0.\n        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate\n        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft\n\n        # create a subset from the grid dataset\n        if idx2 > data.grid.rec.shape[0]:\n            idx2 = data.grid.rec.shape[0] - 1\n\n        chunk = subset(data, idx1, idx2, mode=\"index\")\n\n        # compute the spectrogram for each electrode of the current chunk\n        spec = None\n        for el in range(n_electrodes):\n            # get the signal for the current electrode\n            sig = chunk.grid.rec[:, el]\n\n            # compute the spectrogram for the current electrode\n            chunk_spec, _, _ = spectrogram(\n                data=sig.copy(),\n                samplingrate=data.grid.samplerate,\n                nfft=nfft,\n                hop_length=hop_len,\n            )\n\n            # sum spectrogram over all electrodes\n            # the spec is a tensor\n            if el == 0:\n                spec = chunk_spec\n            else:\n                spec += chunk_spec\n\n        if spec is None:\n            msg = \"Failed to compute spectrogram.\"\n            raise ValueError(msg)\n\n        # normalize spectrogram by the number of electrodes\n        # the spec is still a tensor\n        spec /= n_electrodes\n\n        # convert the spectrogram to dB\n        # .. still a tensor\n        spec = decibel(spec)\n\n        # cut off everything outside the upper frequency limit\n        # the spec is still a tensor\n\n        spectrogram_freq_limits = (\n            np.min(chunk.track.freqs) - freq_pad,\n            np.max(chunk.track.freqs) + freq_pad,\n        )\n\n        spec = spec[\n            (spec_freqs >= spectrogram_freq_limits[0])\n            & (spec_freqs <= spectrogram_freq_limits[1]),\n            :,\n        ]\n        spec_freqs = spec_freqs[\n            (spec_freqs >= spectrogram_freq_limits[0])\n            & (spec_freqs <= spectrogram_freq_limits[1])\n        ]\n\n        # normalize the spectrogram to zero mean and unit variance\n        # the spec is still a tensor\n        spec = (spec - spec.mean()) / spec.std()\n\n        # convert the spectrogram to a PIL image\n        spec = spec.detach().cpu().numpy()\n        img = numpy_to_pil(spec)\n\n        imgname = f\"{chunk.path.name}.png\"\n        if label_mode == \"synthetic\":\n            bbox_df, img = synthetic_labels(\n                dataroot,\n                chunk,\n                nfft,\n                spec,\n                spec_times,\n                spec_freqs,\n                imgname,\n                chunk_no,\n                img,\n            )\n            if bbox_df is None:\n                continue\n            bbox_dfs.append(bbox_df)\n        elif label_mode == \"detected\":\n            detected_labels(dataroot, chunk, imgname, spec, spec_times)\n\n        # save image\n        img.save(dataroot / \"images\" / f\"{imgname}\")\n\n    if label_mode == \"synthetic\":\n        bbox_df = pd.concat(bbox_dfs, ignore_index=True)\n        bbox_df.to_csv(dataroot / f\"{data.path.name}_bboxes.csv\", index=False)\n\n    # save the classes.txt file\n    classes = [\"__background__\", \"chirp\"]\n    with pathlib.Path.open(dataroot / \"classes.txt\", \"w\") as f:\n        f.write(\"\\n\".join(classes))\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert_cli","title":"convert_cli(path, output, label_mode)","text":"

Parse all datasets in a directory and convert them to a YOLO dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.convert_cli--parameters","title":"Parameters","text":"
  • path : pathlib.Path The root directory of the datasets.
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert_cli--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/convert_data.py
def convert_cli(\n    path: pathlib.Path,\n    output: pathlib.Path,\n    label_mode: str,\n) -> None:\n    \"\"\"Parse all datasets in a directory and convert them to a YOLO dataset.\n\n    Parameters\n    ----------\n    - `path` : `pathlib.Path`\n        The root directory of the datasets.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    make_file_tree(output)\n    config = load_config(str(path / \"chirpdetector.toml\"))\n\n    for p in track(list(path.iterdir()), description=\"Building datasets\"):\n        if p.is_file():\n            continue\n        data = load(p)\n        convert(data, config, output, label_mode)\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.detected_labels","title":"detected_labels(output, chunk, imgname, spec, spec_times)","text":"

Use the detect_chirps to make a YOLO dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.detected_labels--parameters","title":"Parameters","text":"
  • output : pathlib.Path The output directory.
  • chunk : Dataset The dataset to make bounding boxes for.
  • imgname : str The name of the image.
  • spec : np.ndarray The spectrogram.
  • spec_times : np.ndarray The time axis of the spectrogram.
"},{"location":"api/convert_data/#chirpdetector.convert_data.detected_labels--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/convert_data.py
def detected_labels(\n    output: pathlib.Path,\n    chunk: Dataset,\n    imgname: str,\n    spec: np.ndarray,\n    spec_times: np.ndarray,\n) -> None:\n    \"\"\"Use the detect_chirps to make a YOLO dataset.\n\n    Parameters\n    ----------\n    - `output` : `pathlib.Path`\n        The output directory.\n    - `chunk` : `Dataset`\n        The dataset to make bounding boxes for.\n    - `imgname` : `str`\n        The name of the image.\n    - `spec` : `np.ndarray`\n        The spectrogram.\n    - `spec_times` : `np.ndarray`\n        The time axis of the spectrogram.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # load the detected bboxes csv\n    # TODO: This is a workaround. Instead improve the subset naming convention\n    # in gridtools\n    source_dataset = chunk.path.name.split(\"_\")[1:-4]\n    source_dataset = \"_\".join(source_dataset)\n    source_dataset = chunk.path.parent / source_dataset\n\n    dataframe = pd.read_csv(source_dataset / \"chirpdetector_bboxes.csv\")\n\n    # get chunk start and stop time\n    start, stop = spec_times[0], spec_times[-1]\n\n    # get the bboxes for this chunk\n    bboxes = dataframe[(dataframe.t1 >= start) & (dataframe.t2 <= stop)]\n\n    # get the x and y coordinates of the bboxes in pixels as dataframe\n    bboxes_xy = bboxes[[\"x1\", \"y1\", \"x2\", \"y2\"]]\n\n    # convert from x1, y1, x2, y2 to centerx, centery, width, height\n    centerx = np.array((bboxes_xy[\"x1\"] + bboxes_xy[\"x2\"]) / 2)\n    centery = np.array((bboxes_xy[\"y1\"] + bboxes_xy[\"y2\"]) / 2)\n    width = np.array(bboxes_xy[\"x2\"] - bboxes_xy[\"x1\"])\n    height = np.array(bboxes_xy[\"y2\"] - bboxes_xy[\"y1\"])\n\n    # flip centery because origin is top left\n    centery = spec.shape[0] - centery\n\n    # make relative to image size\n    centerx = centerx / spec.shape[1]\n    centery = centery / spec.shape[0]\n    width = width / spec.shape[1]\n    height = height / spec.shape[0]\n    labels = np.ones_like(centerx, dtype=int)\n\n    # make a new dataframe with the relative coordinates\n    new_bboxes = pd.DataFrame(\n        {\"l\": labels, \"x\": centerx, \"y\": centery, \"w\": width, \"h\": height},\n    )\n\n    # save dataframe for every spec without headers as txt\n    new_bboxes.to_csv(\n        output / \"labels\" / f\"{imgname[:-4]}.txt\",\n        header=False,\n        index=False,\n        sep=\" \",\n    )\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.make_file_tree","title":"make_file_tree(path)","text":"

Build a file tree for the training dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.make_file_tree--parameters","title":"Parameters","text":"

path : pathlib.Path The root directory of the dataset.

Source code in chirpdetector/convert_data.py
def make_file_tree(path: pathlib.Path) -> None:\n    \"\"\"Build a file tree for the training dataset.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The root directory of the dataset.\n    \"\"\"\n    if path.parent.exists() and path.parent.is_file():\n        msg = (\n            f\"Parent directory of {path} is a file. \"\n            \"Please specify a directory.\"\n        )\n        raise ValueError(msg)\n\n    if path.exists():\n        shutil.rmtree(path)\n\n    path.mkdir(exist_ok=True, parents=True)\n\n    train_imgs = path / \"images\"\n    train_labels = path / \"labels\"\n    train_imgs.mkdir(exist_ok=True, parents=True)\n    train_labels.mkdir(exist_ok=True, parents=True)\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.numpy_to_pil","title":"numpy_to_pil(img)","text":"

Convert a 2D numpy array to a PIL image.

"},{"location":"api/convert_data/#chirpdetector.convert_data.numpy_to_pil--parameters","title":"Parameters","text":"

img : np.ndarray The input image.

"},{"location":"api/convert_data/#chirpdetector.convert_data.numpy_to_pil--returns","title":"Returns","text":"

PIL.Image The converted image.

Source code in chirpdetector/convert_data.py
def numpy_to_pil(img: np.ndarray) -> Image.Image:\n    \"\"\"Convert a 2D numpy array to a PIL image.\n\n    Parameters\n    ----------\n    img : np.ndarray\n        The input image.\n\n    Returns\n    -------\n    PIL.Image\n        The converted image.\n    \"\"\"\n    img_dimens = 2\n    if len(img.shape) != img_dimens:\n        msg = f\"Image must be {img_dimens}D\"\n        raise ValueError(msg)\n\n    if img.max() == img.min():\n        msg = \"Image must have more than one value\"\n        raise ValueError(msg)\n\n    img = np.flipud(img)\n    intimg = np.uint8((img - img.min()) / (img.max() - img.min()) * 255)\n    return Image.fromarray(intimg)\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.synthetic_labels","title":"synthetic_labels(output, chunk, nfft, spec, spec_times, spec_freqs, imgname, chunk_no, img)","text":"

Generate labels of a simulated dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.synthetic_labels--parameters","title":"Parameters","text":"
  • output : pathlib.Path The output directory.
  • chunk : Dataset The dataset to make bounding boxes for.
  • nfft : int The number of samples in the FFT.
  • spec : np.ndarray The spectrogram.
  • spec_times : np.ndarray The time axis of the spectrogram.
  • spec_freqs : np.ndarray The frequency axis of the spectrogram.
  • imgname : str The name of the image.
  • chunk_no : int The chunk number.
  • img : Image The image.
"},{"location":"api/convert_data/#chirpdetector.convert_data.synthetic_labels--returns","title":"Returns","text":"
  • pandas.DataFrame A dataframe with the bounding boxes.
Source code in chirpdetector/convert_data.py
def synthetic_labels(\n    output: pathlib.Path,\n    chunk: Dataset,\n    nfft: int,\n    spec: np.ndarray,\n    spec_times: np.ndarray,\n    spec_freqs: np.ndarray,\n    imgname: str,\n    chunk_no: int,\n    img: Image.Image,\n) -> Union[Tuple[pd.DataFrame, Image.Image], Tuple[None, None]]:\n    \"\"\"Generate labels of a simulated dataset.\n\n    Parameters\n    ----------\n    - `output` : `pathlib.Path`\n        The output directory.\n    - `chunk` : `Dataset`\n        The dataset to make bounding boxes for.\n    - `nfft` : `int`\n        The number of samples in the FFT.\n    - `spec` : `np.ndarray`\n        The spectrogram.\n    - `spec_times` : `np.ndarray`\n        The time axis of the spectrogram.\n    - `spec_freqs` : `np.ndarray`\n        The frequency axis of the spectrogram.\n    - `imgname` : `str`\n        The name of the image.\n    - `chunk_no` : `int`\n        The chunk number.\n    - `img` : `Image`\n        The image.\n\n    Returns\n    -------\n    - `pandas.DataFrame`\n        A dataframe with the bounding boxes.\n    \"\"\"\n    # compute the bounding boxes for this chunk\n    bboxes = chirp_bounding_boxes(chunk, nfft)\n\n    if len(bboxes) == 0:\n        return None, None\n\n    # convert bounding box center coordinates to spectrogram coordinates\n    # find the indices on the spec_times corresponding to the center times\n    x = np.searchsorted(spec_times, bboxes.t_center)\n    y = np.searchsorted(spec_freqs, bboxes.f_center)\n    widths = np.searchsorted(spec_times - spec_times[0], bboxes.width)\n    heights = np.searchsorted(spec_freqs - spec_freqs[0], bboxes.height)\n\n    # now we have center coordinates, widths and heights in indices. But PIL\n    # expects coordinates in pixels in the format\n    # (Upper left x coordinate, upper left y coordinate,\n    # lower right x coordinate, lower right y coordinate)\n    # In addiotion, an image starts in the top left corner so the bboxes\n    # need to be mirrored horizontally.\n\n    y = spec.shape[0] - y  # flip the y values to fit y=0 at the top\n    lxs, lys = x - widths / 2, y - heights / 2\n    rxs, rys = x + widths / 2, y + heights / 2\n\n    # add them to the bboxes dataframe\n    bboxes[\"upperleft_img_x\"] = lxs\n    bboxes[\"upperleft_img_y\"] = lys\n    bboxes[\"lowerright_img_x\"] = rxs\n    bboxes[\"lowerright_img_y\"] = rys\n\n    # yolo format is centerx, centery, width, height\n    # convert xmin, ymin, xmax, ymax to centerx, centery, width, height\n    centerx = (lxs + rxs) / 2\n    centery = (lys + rys) / 2\n    width = rxs - lxs\n    height = rys - lys\n\n    # most deep learning frameworks expect bounding box coordinates\n    # as relative to the image size. So we normalize the coordinates\n    # to the image size\n    centerx_norm = centerx / spec.shape[1]\n    centery_norm = centery / spec.shape[0]\n    width_norm = width / spec.shape[1]\n    height_norm = height / spec.shape[0]\n\n    # add them to the bboxes dataframe\n    bboxes[\"centerx_norm\"] = centerx_norm\n    bboxes[\"centery_norm\"] = centery_norm\n    bboxes[\"width_norm\"] = width_norm\n    bboxes[\"height_norm\"] = height_norm\n\n    # add chunk ID to the bboxes dataframe\n    bboxes[\"chunk_id\"] = chunk_no\n\n    # put them into a dataframe to save for eahc spectrogram\n    dataframe = pd.DataFrame(\n        {\n            \"cx\": centerx_norm,\n            \"cy\": centery_norm,\n            \"w\": width_norm,\n            \"h\": height_norm,\n        },\n    )\n\n    # add as first colum instance id\n    dataframe.insert(0, \"instance_id\", np.ones_like(lxs, dtype=int))\n\n    # stash the bboxes dataframe for this chunk\n    bboxes[\"image\"] = imgname\n\n    # save dataframe for every spec without headers as txt\n    dataframe.to_csv(\n        output / \"labels\" / f\"{chunk.path.name}.txt\",\n        header=False,\n        index=False,\n        sep=\" \",\n    )\n    return bboxes, img\n
"},{"location":"api/dataset_utils/","title":"dataset_utils","text":"

Utility functions for training datasets in the YOLO format.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.clean_yolo_dataset","title":"clean_yolo_dataset(path, img_ext)","text":"

Remove images and labels when the label file is empty.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.clean_yolo_dataset--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the dataset. img_ext : str

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.clean_yolo_dataset--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def clean_yolo_dataset(path: pathlib.Path, img_ext: str) -> None:\n    \"\"\"Remove images and labels when the label file is empty.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the dataset.\n    img_ext : str\n\n    Returns\n    -------\n    None\n    \"\"\"\n    img_path = path / \"images\"\n    lbl_path = path / \"labels\"\n\n    images = list(img_path.glob(f\"*{img_ext}\"))\n\n    for image in images:\n        lbl = lbl_path / f\"{image.stem}.txt\"\n        if lbl.stat().st_size == 0:\n            image.unlink()\n            lbl.unlink()\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.load_img","title":"load_img(path)","text":"

Load an image from a path as a numpy array.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.load_img--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the image.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.load_img--returns","title":"Returns","text":"

img : np.ndarray The image as a numpy array.

Source code in chirpdetector/dataset_utils.py
def load_img(path: pathlib.Path) -> np.ndarray:\n    \"\"\"Load an image from a path as a numpy array.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the image.\n\n    Returns\n    -------\n    img : np.ndarray\n        The image as a numpy array.\n    \"\"\"\n    img = Image.open(path)\n    return np.asarray(img)\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.merge_yolo_datasets","title":"merge_yolo_datasets(dataset1, dataset2, output)","text":"

Merge two yolo-style datasets into one.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.merge_yolo_datasets--parameters","title":"Parameters","text":"

dataset1 : str The path to the first dataset. dataset2 : str The path to the second dataset. output : str The path to the output dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.merge_yolo_datasets--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def merge_yolo_datasets(\n    dataset1: pathlib.Path,\n    dataset2: pathlib.Path,\n    output: pathlib.Path,\n) -> None:\n    \"\"\"Merge two yolo-style datasets into one.\n\n    Parameters\n    ----------\n    dataset1 : str\n        The path to the first dataset.\n    dataset2 : str\n        The path to the second dataset.\n    output : str\n        The path to the output dataset.\n\n    Returns\n    -------\n    None\n    \"\"\"\n    dataset1 = pathlib.Path(dataset1)\n    dataset2 = pathlib.Path(dataset2)\n    output = pathlib.Path(output)\n\n    if not dataset1.exists():\n        msg = f\"{dataset1} does not exist.\"\n        raise FileNotFoundError(msg)\n    if not dataset2.exists():\n        msg = f\"{dataset2} does not exist.\"\n        raise FileNotFoundError(msg)\n    if output.exists():\n        msg = f\"{output} already exists.\"\n        raise FileExistsError(msg)\n\n    output_images = output / \"images\"\n    output_images.mkdir(parents=True, exist_ok=False)\n    output_labels = output / \"labels\"\n    output_labels.mkdir(parents=True, exist_ok=False)\n\n    imgs1 = list((dataset1 / \"images\").iterdir())\n    labels1 = list((dataset1 / \"labels\").iterdir())\n    imgs2 = list((dataset2 / \"images\").iterdir())\n    labels2 = list((dataset2 / \"labels\").iterdir())\n\n    print(f\"Found {len(imgs1)} images in {dataset1}.\")\n    print(f\"Found {len(imgs2)} images in {dataset2}.\")\n\n    print(f\"Copying images and labels to {output}...\")\n    for idx, _ in enumerate(imgs1):\n        shutil.copy(imgs1[idx], output_images / imgs1[idx].name)\n        shutil.copy(labels1[idx], output_labels / labels1[idx].name)\n\n    for idx, _ in enumerate(imgs2):\n        shutil.copy(imgs2[idx], output_images / imgs2[idx].name)\n        shutil.copy(labels2[idx], output_labels / labels2[idx].name)\n\n    classes = dataset1 / \"classes.txt\"\n    shutil.copy(classes, output / classes.name)\n\n    print(f\"Done. Merged {len(imgs1) + len(imgs2)} images.\")\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.plot_yolo_dataset","title":"plot_yolo_dataset(path, n)","text":"

Plot n random images YOLO-style dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.plot_yolo_dataset--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.plot_yolo_dataset--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def plot_yolo_dataset(path: pathlib.Path, n: int) -> None:\n    \"\"\"Plot n random images YOLO-style dataset.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the dataset.\n\n    Returns\n    -------\n    None\n    \"\"\"\n    mpl.use(\"TkAgg\")\n    labelpath = path / \"labels\"\n    imgpath = path / \"images\"\n\n    label_paths = np.array(list(labelpath.glob(\"*.txt\")))\n    label_paths = np.random.choice(label_paths, n)\n\n    for lp in label_paths:\n        imgp = imgpath / (lp.stem + \".png\")\n        img = load_img(imgp)\n        labs = np.loadtxt(lp, dtype=np.float32).reshape(-1, 5)\n\n        coords = labs[:, 1:]\n\n        # make coords absolute and normalize\n        coords[:, 0] *= img.shape[1]\n        coords[:, 1] *= img.shape[0]\n        coords[:, 2] *= img.shape[1]\n        coords[:, 3] *= img.shape[0]\n\n        # turn centerx, centery, width, height into xmin, ymin, xmax, ymax\n        xmin = coords[:, 0] - coords[:, 2] / 2\n        ymin = coords[:, 1] - coords[:, 3] / 2\n        xmax = coords[:, 0] + coords[:, 2] / 2\n        ymax = coords[:, 1] + coords[:, 3] / 2\n\n        # plot the image\n        _, ax = plt.subplots(figsize=(15, 5), constrained_layout=True)\n        ax.imshow(img, cmap=\"magma\")\n        for i in range(len(xmin)):\n            ax.add_patch(\n                Rectangle(\n                    (xmin[i], ymin[i]),\n                    xmax[i] - xmin[i],\n                    ymax[i] - ymin[i],\n                    fill=False,\n                    color=\"white\",\n                ),\n            )\n        ax.set_title(imgp.stem)\n        plt.axis(\"off\")\n        plt.show()\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.subset_yolo_dataset","title":"subset_yolo_dataset(path, img_ext, n)","text":"

Subset a YOLO dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.subset_yolo_dataset--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the dataset root. img_ext : str The image extension, e.g. .png or .jpg n : int The size of the subset

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.subset_yolo_dataset--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def subset_yolo_dataset(path: pathlib.Path, img_ext: str, n: int) -> None:\n    \"\"\"Subset a YOLO dataset.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the dataset root.\n    img_ext : str\n        The image extension, e.g. .png or .jpg\n    n : int\n        The size of the subset\n\n    Returns\n    -------\n    None\n    \"\"\"\n    img_path = path / \"images\"\n    lbl_path = path / \"labels\"\n\n    images = np.array(img_path.glob(f\"*{img_ext}\"))\n    np.random.shuffle(images)\n\n    images = images[:n]\n\n    subset_dir = path.parent / f\"{path.name}_subset\"\n    subset_dir.mkdir(exist_ok=True)\n\n    subset_img_path = subset_dir / \"images\"\n    subset_img_path.mkdir(exist_ok=True)\n    subset_lbl_path = subset_dir / \"labels\"\n    subset_lbl_path.mkdir(exist_ok=True)\n\n    shutil.copy(path / \"classes.txt\", subset_dir)\n\n    for image in images:\n        shutil.copy(image, subset_img_path)\n        shutil.copy(lbl_path / f\"{image.stem}.txt\", subset_lbl_path)\n
"},{"location":"api/detect_chirps/","title":"detect_chirps","text":"

Detect chirps on a spectrogram.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.coords_to_mpl_rectangle","title":"coords_to_mpl_rectangle(boxes)","text":"

Convert normal bounding box to matplotlib.pathes.Rectangle format.

Convert box defined by corner coordinates (x1, y1, x2, y2) to box defined by lower left, width and height (x1, y1, w, h).

The corner coordinates are the model output, but the center coordinates are needed by the matplotlib.patches.Rectangle object for plotting.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.coords_to_mpl_rectangle--parameters","title":"Parameters","text":"
  • boxes : numpy.ndarray The boxes to be converted.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.coords_to_mpl_rectangle--returns","title":"Returns","text":"
  • numpy.ndarray The converted boxes.
Source code in chirpdetector/detect_chirps.py
def coords_to_mpl_rectangle(boxes: np.ndarray) -> np.ndarray:\n    \"\"\"Convert normal bounding box to matplotlib.pathes.Rectangle format.\n\n    Convert box defined by corner coordinates (x1, y1, x2, y2)\n    to box defined by lower left, width and height (x1, y1, w, h).\n\n    The corner coordinates are the model output, but the center coordinates\n    are needed by the `matplotlib.patches.Rectangle` object for plotting.\n\n    Parameters\n    ----------\n    - `boxes` : `numpy.ndarray`\n        The boxes to be converted.\n\n    Returns\n    -------\n    - `numpy.ndarray`\n        The converted boxes.\n    \"\"\"\n    boxes_dims = 2\n    if len(boxes.shape) != boxes_dims:\n        msg = (\n            \"The boxes array must be 2-dimensional.\\n\"\n            f\"Shape of boxes: {boxes.shape}\"\n        )\n        raise ValueError(msg)\n    boxes_cols = 4\n    if boxes.shape[1] != boxes_cols:\n        msg = (\n            \"The boxes array must have 4 columns.\\n\"\n            f\"Shape of boxes: {boxes.shape}\"\n        )\n        raise ValueError(msg)\n\n    new_boxes = np.zeros_like(boxes)\n    new_boxes[:, 0] = boxes[:, 0]\n    new_boxes[:, 1] = boxes[:, 1]\n    new_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]\n    new_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]\n\n    return new_boxes\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_chirps","title":"detect_chirps(conf, data)","text":"

Detect chirps on a spectrogram.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_chirps--parameters","title":"Parameters","text":"
  • conf : Config The configuration object.
  • data : Dataset The gridtools dataset to detect chirps on.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_chirps--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/detect_chirps.py
def detect_chirps(conf: Config, data: Dataset) -> None:\n    \"\"\"Detect chirps on a spectrogram.\n\n    Parameters\n    ----------\n    - `conf` : `Config`\n        The configuration object.\n    - `data` : `Dataset`\n        The gridtools dataset to detect chirps on.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # get the number of electrodes\n    n_electrodes = data.grid.rec.shape[1]\n\n    # load the model and the checkpoint, and set it to evaluation mode\n    device = get_device()\n    model = load_fasterrcnn(num_classes=len(conf.hyper.classes))\n    checkpoint = torch.load(\n        f\"{conf.hyper.modelpath}/model.pt\",\n        map_location=device,\n    )\n    model.load_state_dict(checkpoint[\"model_state_dict\"])\n    model.to(device).eval()\n\n    # make spec config\n    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples\n    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples\n    chunksize = conf.spec.time_window * data.grid.samplerate  # samples\n    nchunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)\n    window_overlap_samples = int(conf.spec.spec_overlap * data.grid.samplerate)\n\n    bbox_dfs = []\n\n    # iterate over the chunks\n    overwritten = False\n    for chunk_no in range(nchunks):\n        # get start and stop indices for the current chunk\n        # including some overlap to compensate for edge effects\n        # this diffrers for the first and last chunk\n\n        if chunk_no == 0:\n            idx1 = int(chunk_no * chunksize)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n        elif chunk_no == nchunks - 1:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize)\n        else:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n\n        # idx1 and idx2 now determine the window I cut out of the raw signal\n        # to compute the spectrogram of.\n\n        # compute the time and frequency axes of the spectrogram now that we\n        # include the start and stop indices of the current chunk and thus the\n        # right start and stop time. The `spectrogram` function does not know\n        # about this and would start every time axis at 0.\n        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate\n        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft\n\n        # create a subset from the grid dataset\n        if idx2 > data.grid.rec.shape[0]:\n            idx2 = data.grid.rec.shape[0] - 1\n\n        # This bit should alleviate the edge effects of the tracks \n        # by limiting the start and stop times of the spectrogram\n        # to the start and stop times of the track.\n        start_t = idx1 / data.grid.samplerate\n        stop_t = idx2 / data.grid.samplerate\n        if data.track.times[-1] < stop_t:\n            stop_t = data.track.times[-1]\n            idx2 = int(stop_t * data.grid.samplerate)\n        if data.track.times[0] > start_t:\n            start_t = data.track.times[0]\n            idx1 = int(start_t * data.grid.samplerate)\n        if start_t > data.track.times[-1] or stop_t < data.track.times[0]:\n            continue\n\n        chunk = subset(data, idx1, idx2, mode=\"index\")\n        if len(chunk.track.indices) == 0:\n            continue\n\n        # compute the spectrogram for each electrode of the current chunk\n        spec = torch.zeros((len(spec_freqs), len(spec_times)))\n        for el in range(n_electrodes):\n            # get the signal for the current electrode\n            sig = chunk.grid.rec[:, el]\n\n            # compute the spectrogram for the current electrode\n            chunk_spec, _, _ = spectrogram(\n                data=sig.copy(),\n                samplingrate=data.grid.rec.samplerate,\n                nfft=nfft,\n                hop_length=hop_len,\n            )\n\n            # sum spectrogram over all electrodes\n            # the spec is a tensor\n            if el == 0:\n                spec = chunk_spec\n            else:\n                spec += chunk_spec\n\n        # normalize spectrogram by the number of electrodes\n        # the spec is still a tensor\n        spec /= n_electrodes\n\n        # convert the spectrogram to dB\n        # .. still a tensor\n        spec = decibel(spec)\n\n        # cut off everything outside the upper frequency limit\n        # the spec is still a tensor\n        # TODO: THIS IS SKETCHY AS HELL! As a result, only time and frequency\n        # bounding boxes can be used later! The spectrogram limits change\n        # for every window!\n        flims = (\n            np.min(chunk.track.freqs) - conf.spec.freq_pad,\n            np.max(chunk.track.freqs) + conf.spec.freq_pad,\n        )\n        spec = spec[(spec_freqs >= flims[0]) & (spec_freqs <= flims[1]), :]\n        spec_freqs = spec_freqs[\n            (spec_freqs >= flims[0]) & (spec_freqs <= flims[1])\n        ]\n\n        # make a path to save the spectrogram\n        path = data.path / \"chirpdetections\"\n        if path.exists() and overwritten is False:\n            shutil.rmtree(path)\n            overwritten = True\n        path.mkdir(exist_ok=True)\n        path /= f\"chunk{chunk_no:05d}.png\"\n\n        # add the 3 channels, normalize to 0-1, etc\n        img = spec_to_image(spec)\n\n        # perform the detection\n        with torch.inference_mode():\n            outputs = model([img])\n\n        # put the boxes, scores and labels into the dataset\n        bboxes = outputs[0][\"boxes\"].detach().cpu().numpy()\n        scores = outputs[0][\"scores\"].detach().cpu().numpy()\n        labels = outputs[0][\"labels\"].detach().cpu().numpy()\n\n        # remove all boxes with a score below the threshold\n        bboxes = bboxes[scores > conf.det.threshold]\n        labels = labels[scores > conf.det.threshold]\n        scores = scores[scores > conf.det.threshold]\n\n        # if np.any(scores > conf.det.threshold):\n        #     plot_detections(img, outputs[0], conf.det.threshold, path, conf)\n\n        # save the bboxes to a dataframe\n        bbox_df = pd.DataFrame(\n            data=bboxes,\n            columns=[\"x1\", \"y1\", \"x2\", \"y2\"],\n        )\n        bbox_df[\"score\"] = scores\n        bbox_df[\"label\"] = labels\n\n        # convert x values to time on spec_times\n        spec_times_index = np.arange(0, len(spec_times))\n        bbox_df[\"t1\"] = float_index_interpolation(\n            bbox_df[\"x1\"].to_numpy(),\n            spec_times_index,\n            spec_times,\n        )\n        bbox_df[\"t2\"] = float_index_interpolation(\n            bbox_df[\"x2\"].to_numpy(),\n            spec_times_index,\n            spec_times,\n        )\n\n        # convert y values to frequency on spec_freqs\n        spec_freqs_index = np.arange(len(spec_freqs))\n        bbox_df[\"f1\"] = float_index_interpolation(\n            bbox_df[\"y1\"].to_numpy(),\n            spec_freqs_index,\n            spec_freqs,\n        )\n        bbox_df[\"f2\"] = float_index_interpolation(\n            bbox_df[\"y2\"].to_numpy(),\n            spec_freqs_index,\n            spec_freqs,\n        )\n\n        # save df to list\n        bbox_dfs.append(bbox_df)\n\n    # concatenate all dataframes\n    bbox_df = pd.concat(bbox_dfs)\n    bbox_reset = bbox_df.reset_index(drop=True)\n\n    # sort the dataframe by t1\n    bbox_sorted = bbox_reset.sort_values(by=\"t1\")\n\n    # sort the columns\n    bbox_sorted = bbox_sorted[\n        [\"label\", \"score\", \"x1\", \"y1\", \"x2\", \"y2\", \"t1\", \"f1\", \"t2\", \"f2\"]\n    ]\n\n    # save the dataframe\n    bbox_sorted.to_csv(data.path / \"chirpdetector_bboxes.csv\", index=False)\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_cli","title":"detect_cli(input_path)","text":"

Terminal interface for the detection function.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_cli--parameters","title":"Parameters","text":"
  • path : str
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_cli--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/detect_chirps.py
def detect_cli(input_path: pathlib.Path) -> None:\n    \"\"\"Terminal interface for the detection function.\n\n    Parameters\n    ----------\n    - `path` : `str`\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # make the global logger object\n    # global logger  # pylint: disable=global-statement\n    path = pathlib.Path(input_path)\n    logger = make_logger(__name__, path / \"chirpdetector.log\")\n    datasets = [folder for folder in path.iterdir() if folder.is_dir()]\n    confpath = path / \"chirpdetector.toml\"\n\n    # load the config file and print a warning if it does not exist\n    if confpath.exists():\n        config = load_config(str(confpath))\n    else:\n        msg = (\n            \"The configuration file could not be found in the specified path.\"\n            \"Please run `chirpdetector copyconfig` and change the \"\n            \"configuration file to your needs.\"\n        )\n        raise FileNotFoundError(msg)\n\n    # detect chirps in all datasets in the specified path\n    # and show a progress bar\n    prog.console.rule(\"Starting detection\")\n    with prog:\n        task = prog.add_task(\"Detecting chirps...\", total=len(datasets))\n        for dataset in datasets:\n            msg = f\"Detecting chirps in {dataset.name}...\"\n            prog.console.log(msg)\n            logger.info(msg)\n\n            data = load(dataset)\n            detect_chirps(config, data)\n            prog.update(task, advance=1)\n        prog.update(task, completed=len(datasets))\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation","title":"float_index_interpolation(values, index_arr, data_arr)","text":"

Convert float indices to values by linear interpolation.

Interpolates a set of float indices within the given index array to obtain corresponding values from the data array using linear interpolation.

Given a set of float indices (values), this function determines the corresponding values in the data_arr by linearly interpolating between adjacent indices in the index_arr. Linear interpolation involves calculating weighted averages based on the fractional parts of the float indices.

This function is useful to transform float coordinates on a spectrogram matrix to the corresponding time and frequency values. The reason for this is, that the model outputs bounding boxes in float coordinates, i.e. it does not care about the exact pixel location of the bounding box.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--parameters","title":"Parameters","text":"
  • values : np.ndarray The index value as a float that should be interpolated.
  • index_arr : numpy.ndarray The array of indices on the data array.
  • data_arr : numpy.ndarray The array of data.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--returns","title":"Returns","text":"
  • numpy.ndarray The interpolated value.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--raises","title":"Raises","text":"
  • ValueError If any of the input float indices (values) are outside the range of the provided index_arr.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--examples","title":"Examples","text":"

values = np.array([2.5, 3.2, 4.8]) index_arr = np.array([2, 3, 4, 5]) data_arr = np.array([10, 15, 20, 25]) result = float_index_interpolation(values, index_arr, data_arr) print(result) array([12.5, 16. , 22.5])

Source code in chirpdetector/detect_chirps.py
def float_index_interpolation(\n    values: np.ndarray,\n    index_arr: np.ndarray,\n    data_arr: np.ndarray,\n) -> np.ndarray:\n    \"\"\"Convert float indices to values by linear interpolation.\n\n    Interpolates a set of float indices within the given index\n    array to obtain corresponding values from the data\n    array using linear interpolation.\n\n    Given a set of float indices (`values`), this function determines\n    the corresponding values in the `data_arr` by linearly interpolating\n    between adjacent indices in the `index_arr`. Linear interpolation\n    involves calculating weighted averages based on the fractional\n    parts of the float indices.\n\n    This function is useful to transform float coordinates on a spectrogram\n    matrix to the corresponding time and frequency values. The reason for\n    this is, that the model outputs bounding boxes in float coordinates,\n    i.e. it does not care about the exact pixel location of the bounding\n    box.\n\n    Parameters\n    ----------\n    - `values` : `np.ndarray`\n        The index value as a float that should be interpolated.\n    - `index_arr` : `numpy.ndarray`\n        The array of indices on the data array.\n    - `data_arr` : `numpy.ndarray`\n        The array of data.\n\n    Returns\n    -------\n    - `numpy.ndarray`\n        The interpolated value.\n\n    Raises\n    ------\n    - `ValueError`\n        If any of the input float indices (`values`) are outside\n        the range of the provided `index_arr`.\n\n    Examples\n    --------\n    >>> values = np.array([2.5, 3.2, 4.8])\n    >>> index_arr = np.array([2, 3, 4, 5])\n    >>> data_arr = np.array([10, 15, 20, 25])\n    >>> result = float_index_interpolation(values, index_arr, data_arr)\n    >>> print(result)\n    array([12.5, 16. , 22.5])\n    \"\"\"\n    # Check if the values are within the range of the index array\n    if np.any(values < (np.min(index_arr) - 1)) or np.any(\n        values > (np.max(index_arr) + 1),\n    ):\n        msg = (\n            \"Values outside the range of index array\\n\"\n            f\"Target values: {values}\\n\"\n            f\"Index array: {index_arr}\\n\"\n            f\"Data array: {data_arr}\"\n        )\n        raise ValueError(msg)\n\n    # Find the indices corresponding to the values\n    lower_indices = np.floor(values).astype(int)\n    upper_indices = np.ceil(values).astype(int)\n\n    # Ensure upper indices are within the array bounds\n    upper_indices = np.minimum(upper_indices, len(index_arr) - 1)\n    lower_indices = np.minimum(lower_indices, len(index_arr) - 1)\n\n    # Calculate the interpolation weights\n    weights = values - lower_indices\n\n    # Linear interpolation\n    return (1 - weights) * data_arr[lower_indices] + weights * data_arr[\n        upper_indices\n    ]\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.plot_detections","title":"plot_detections(img_tensor, output, threshold, save_path, conf)","text":"

Plot the detections on the spectrogram.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.plot_detections--parameters","title":"Parameters","text":"
  • img_tensor : torch.Tensor The spectrogram.
  • output : torch.Tensor The output of the model.
  • threshold : float The threshold for the detections.
  • save_path : pathlib.Path The path to save the plot to.
  • conf : Config The configuration object.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.plot_detections--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/detect_chirps.py
def plot_detections(\n    img_tensor: torch.Tensor,\n    output: torch.Tensor,\n    threshold: float,\n    save_path: pathlib.Path,\n    conf: Config,\n) -> None:\n    \"\"\"Plot the detections on the spectrogram.\n\n    Parameters\n    ----------\n    - `img_tensor` : `torch.Tensor`\n        The spectrogram.\n    - `output` : `torch.Tensor`\n        The output of the model.\n    - `threshold` : `float`\n        The threshold for the detections.\n    - `save_path` : `pathlib.Path`\n        The path to save the plot to.\n    - `conf` : `Config`\n        The configuration object.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # retrieve all the data from the output and convert\n    # spectrogram to numpy array\n    img = img_tensor.detach().cpu().numpy().transpose(1, 2, 0)[..., 0]\n    boxes = output[\"boxes\"].detach().cpu().numpy()\n    boxes = coords_to_mpl_rectangle(boxes)\n    scores = output[\"scores\"].detach().cpu().numpy()\n    labels = output[\"labels\"].detach().cpu().numpy()\n    labels = [conf.hyper.classes[i] for i in labels]\n\n    _, ax = plt.subplots(figsize=(20, 10))\n\n    ax.pcolormesh(img, cmap=\"magma\")\n\n    for i, box in enumerate(boxes):\n        if scores[i] > threshold:\n            ax.scatter(\n                box[0],\n                box[1],\n            )\n            ax.add_patch(\n                Rectangle(\n                    box[:2],\n                    box[2],\n                    box[3],\n                    fill=False,\n                    color=\"white\",\n                    linewidth=1,\n                ),\n            )\n            ax.text(\n                box[0],\n                box[1],\n                f\"{scores[i]:.2f}\",\n                color=\"black\",\n                fontsize=8,\n                bbox={\"facecolor\":\"white\", \"alpha\":1},\n            )\n    plt.axis(\"off\")\n    plt.savefig(save_path, dpi=300, bbox_inches=\"tight\", pad_inches=0)\n    plt.close()\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.spec_to_image","title":"spec_to_image(spec)","text":"

Convert a spectrogram to an image.

Add 3 color channels, normalize to 0-1, etc.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.spec_to_image--parameters","title":"Parameters","text":"
  • spec : torch.Tensor
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.spec_to_image--returns","title":"Returns","text":"
  • torch.Tensor
Source code in chirpdetector/detect_chirps.py
def spec_to_image(spec: torch.Tensor) -> torch.Tensor:\n    \"\"\"Convert a spectrogram to an image.\n\n    Add 3 color channels, normalize to 0-1, etc.\n\n    Parameters\n    ----------\n    - `spec` : `torch.Tensor`\n\n    Returns\n    -------\n    - `torch.Tensor`\n    \"\"\"\n    # make sure the spectrogram is a tensor\n    if not isinstance(spec, torch.Tensor):\n        msg = (\n            \"The spectrogram must be a torch.Tensor.\\n\"\n            f\"Type of spectrogram: {type(spec)}\"\n        )\n        raise TypeError(msg)\n\n    # make sure the spectrogram is 2-dimensional\n    spec_dims = 2\n    if len(spec.size()) != spec_dims:\n        msg = (\n            \"The spectrogram must be a 2-dimensional matrix.\\n\"\n            f\"Shape of spectrogram: {spec.size()}\"\n        )\n        raise ValueError(msg)\n\n    # make sure the spectrogram contains some data\n    if (\n        np.max(spec.detach().cpu().numpy())\n        - np.min(spec.detach().cpu().numpy())\n        == 0\n    ):\n        msg = (\n            \"The spectrogram must contain some data.\\n\"\n            f\"Max value: {np.max(spec.detach().cpu().numpy())}\\n\"\n            f\"Min value: {np.min(spec.detach().cpu().numpy())}\"\n        )\n        raise ValueError(msg)\n\n    # Get the dimensions of the original matrix\n    original_shape = spec.size()\n\n    # Calculate the number of rows and columns in the matrix\n    num_rows, num_cols = original_shape\n\n    # duplicate the matrix 3 times\n    spec = spec.repeat(3, 1, 1)\n\n    # Reshape the matrix to the desired shape (3, num_rows, num_cols)\n    desired_shape = (3, num_rows, num_cols)\n    reshaped_tensor = spec.view(desired_shape)\n\n    # normalize the spectrogram to be between 0 and 1\n    normalized_tensor = (reshaped_tensor - reshaped_tensor.min()) / (\n        reshaped_tensor.max() - reshaped_tensor.min()\n    )\n\n    # make sure image is float32\n    return normalized_tensor.float()\n
"},{"location":"api/plot_detections/","title":"plot_detections","text":"

Functions to visualize detections on images.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_all_plots_cli","title":"clean_all_plots_cli(path)","text":"

Remove all plots from the chirpdetections folder.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_all_plots_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def clean_all_plots_cli(path: pathlib.Path) -> None:\n    \"\"\"Remove all plots from the chirpdetections folder.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]\n    with prog:\n        task = prog.add_task(\"Cleaning plots...\", total=len(dirs))\n        for dataset in dirs:\n            prog.console.log(f\"Cleaning plots for {dataset.name}\")\n            clean_plots_cli(dataset)\n            prog.advance(task)\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_plots_cli","title":"clean_plots_cli(path)","text":"

Remove all plots from the chirpdetections folder.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_plots_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def clean_plots_cli(path: pathlib.Path) -> None:\n    \"\"\"Remove all plots from the chirpdetections folder.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    savepath = path / \"chirpdetections\"\n    for f in savepath.iterdir():\n        f.unlink()\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_all_detections_cli","title":"plot_all_detections_cli(path)","text":"

Plot detections on images.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_all_detections_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def plot_all_detections_cli(path: pathlib.Path) -> None:\n    \"\"\"Plot detections on images.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    conf = load_config(path / \"chirpdetector.toml\")\n\n    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]\n    with prog:\n        task = prog.add_task(\"Plotting detections...\", total=len(dirs))\n        for dataset in dirs:\n            prog.console.log(f\"Plotting detections for {dataset.name}\")\n            data = load(dataset)\n            chirp_df = pd.read_csv(dataset / \"chirpdetector_bboxes.csv\")\n            plot_detections(data, chirp_df, conf)\n            prog.advance(task)\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections","title":"plot_detections(data, chirp_df, conf)","text":"

Plot detections on spectrograms.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections--parameters","title":"Parameters","text":"

data : Dataset The dataset. chirp_df : pd.DataFrame The dataframe containing the chirp detections. conf : Config The config file.

Source code in chirpdetector/plot_detections.py
def plot_detections(\n    data: Dataset,\n    chirp_df: pd.DataFrame,\n    conf: Config,\n) -> None:\n    \"\"\"Plot detections on spectrograms.\n\n    Parameters\n    ----------\n    data : Dataset\n        The dataset.\n    chirp_df : pd.DataFrame\n        The dataframe containing the chirp detections.\n    conf : Config\n        The config file.\n    \"\"\"\n    time_window = 15\n    n_electrodes = data.grid.rec.shape[1]\n\n    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples\n    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples\n    chunksize = time_window * data.grid.samplerate  # samples\n    nchunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)\n    window_overlap_samples = int(conf.spec.spec_overlap * data.grid.samplerate)\n\n    for chunk_no in range(nchunks):\n        # get start and stop indices for the current chunk\n        # including some overlap to compensate for edge effects\n        # this diffrers for the first and last chunk\n\n        if chunk_no == 0:\n            idx1 = int(chunk_no * chunksize)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n        elif chunk_no == nchunks - 1:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize)\n        else:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n\n        # idx1 and idx2 now determine the window I cut out of the raw signal\n        # to compute the spectrogram of.\n\n        # compute the time and frequency axes of the spectrogram now that we\n        # include the start and stop indices of the current chunk and thus the\n        # right start and stop time. The `spectrogram` function does not know\n        # about this and would start every time axis at 0.\n        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate\n        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft\n\n        # create a subset from the grid dataset\n        if idx2 > data.grid.rec.shape[0]:\n            idx2 = data.grid.rec.shape[0] - 1\n        chunk = subset(data, idx1, idx2, mode=\"index\")\n\n        # dont plot chunks without chirps\n        if len(chunk.com.chirp.times) == 0:\n            continue\n\n        # compute the spectrogram for each electrode of the current chunk\n        spec = torch.zeros((len(spec_freqs), len(spec_times)))\n        for el in range(n_electrodes):\n            # get the signal for the current electrode\n            sig = chunk.grid.rec[:, el]\n\n            # compute the spectrogram for the current electrode\n            chunk_spec, _, _ = spectrogram(\n                data=sig.copy(),\n                samplingrate=data.grid.samplerate,\n                nfft=nfft,\n                hop_length=hop_len,\n            )\n\n            # sum spectrogram over all electrodes\n            if el == 0:\n                spec = chunk_spec\n            else:\n                spec += chunk_spec\n\n        # normalize spectrogram by the number of electrodes\n        spec /= n_electrodes\n\n        # convert the spectrogram to dB\n        spec = decibel(spec)\n        spec = spec.detach().cpu().numpy()\n\n        # Set y limits\n        flims = (\n            np.min(data.track.freqs) - 200,\n            np.max(data.track.freqs) + 700,\n        )\n        spec = spec[(spec_freqs >= flims[0]) & (spec_freqs <= flims[1]), :]\n        spec_freqs = spec_freqs[\n            (spec_freqs >= flims[0]) & (spec_freqs <= flims[1])\n        ]\n\n        # Extract the bounding boxes for the current chunk\n        chunk_t1 = idx1 / data.grid.samplerate\n        chunk_t2 = idx2 / data.grid.samplerate\n        chunk_df = chirp_df[\n            (chirp_df[\"t1\"] >= chunk_t1) & (chirp_df[\"t2\"] <= chunk_t2)\n        ]\n\n        # get t1, t2, f1, f2 from chunk_df\n        bboxes = chunk_df[[\"score\", \"t1\", \"f1\", \"t2\", \"f2\"]].to_numpy()\n\n        # get chirp times and chirp ids\n        chirp_times = chunk_df[\"envelope_trough_time\"]\n        chirp_ids = chunk_df[\"assigned_track\"]\n\n        _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)\n\n        # plot bounding boxes\n        ax.imshow(\n            spec,\n            aspect=\"auto\",\n            origin=\"lower\",\n            interpolation=\"gaussian\",\n            extent=[\n                spec_times[0],\n                spec_times[-1],\n                spec_freqs[0],\n                spec_freqs[-1],\n            ],\n            cmap=\"magma\",\n            vmin=-80,\n            vmax=-45,\n        )\n        for bbox in bboxes:\n            ax.add_patch(\n                Rectangle(\n                    (bbox[1], bbox[2]),\n                    bbox[3] - bbox[1],\n                    bbox[4] - bbox[2],\n                    fill=False,\n                    color=\"gray\",\n                    linewidth=1,\n                    label=\"faster-R-CNN predictions\",\n                ),\n            )\n            ax.text(\n                bbox[1],\n                bbox[4] + 15,\n                f\"{bbox[0]:.2f}\",\n                color=\"gray\",\n                fontsize=10,\n                verticalalignment=\"bottom\",\n                horizontalalignment=\"left\",\n                rotation=90,\n            )\n\n        # plot chirp times and frequency traces\n        for track_id in np.unique(data.track.idents):\n            ctimes = chirp_times[chirp_ids == track_id]\n\n            freqs = data.track.freqs[data.track.idents == track_id]\n            times = data.track.times[\n                data.track.indices[data.track.idents == track_id]\n            ]\n            freqs = freqs[\n                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)\n            ]\n            times = times[\n                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)\n            ]\n\n            # get freqs where times are closest to ctimes\n            cfreqs = np.zeros_like(ctimes)\n            for i, ctime in enumerate(ctimes):\n                try:\n                    indx = np.argmin(np.abs(times - ctime))\n                    cfreqs[i] = freqs[indx]\n                except ValueError:\n                    msg = (\n                        \"Failed to find track time closest to chirp time \"\n                        f\"in chunk {chunk_no}, check the plots.\"\n                    )\n                    prog.console.log(msg)\n\n            if len(times) != 0:\n                ax.plot(\n                    times,\n                    freqs,\n                    lw=2,\n                    color=\"black\",\n                    label=\"Frequency traces\",\n                )\n\n            ax.scatter(\n                ctimes,\n                cfreqs,\n                marker=\"o\",\n                lw=1,\n                facecolor=\"white\",\n                edgecolor=\"black\",\n                s=25,\n                zorder=10,\n                label=\"Chirp assignments\",\n            )\n\n        ax.set_ylim(flims[0] + 5, flims[1] - 5)\n        ax.set_xlim([spec_times[0], spec_times[-1]])\n        ax.set_xlabel(\"Time [s]\", fontsize=12)\n        ax.set_ylabel(\"Frequency [Hz]\", fontsize=12)\n\n        handles, labels = plt.gca().get_legend_handles_labels()\n        by_label = dict(zip(labels, handles))\n        plt.legend(\n            by_label.values(),\n            by_label.keys(),\n            bbox_to_anchor=(0.5, 1.02),\n            loc=\"lower center\",\n            mode=\"None\",\n            borderaxespad=0,\n            ncol=3,\n            fancybox=False,\n            framealpha=0,\n        )\n\n        savepath = data.path / \"chirpdetections\"\n        savepath.mkdir(exist_ok=True)\n        plt.savefig(\n            savepath / f\"cpd_{chunk_no}.png\",\n            dpi=300,\n            bbox_inches=\"tight\",\n        )\n\n        plt.close()\n        plt.clf()\n        plt.cla()\n        plt.close(\"all\")\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections_cli","title":"plot_detections_cli(path)","text":"

Plot detections on images.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def plot_detections_cli(path: pathlib.Path) -> None:\n    \"\"\"Plot detections on images.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    conf = load_config(path.parent / \"chirpdetector.toml\")\n    data = load(path)\n    chirp_df = pd.read_csv(path / \"chirpdetector_bboxes.csv\")\n    plot_detections(data, chirp_df, conf)\n
"},{"location":"api/train_model/","title":"train_model","text":""},{"location":"api/train_model/#chirpdetector.train_model--train-the-faster-r-cnn-model","title":"Train the faster-R-CNN model.","text":"

Train and test the neural network specified in the config file.

"},{"location":"api/train_model/#chirpdetector.train_model.plot_epochs","title":"plot_epochs(epoch_train_loss, epoch_val_loss, epoch_avg_train_loss, epoch_avg_val_loss, path)","text":"

Plot the loss for each epoch.

"},{"location":"api/train_model/#chirpdetector.train_model.plot_epochs--parameters","title":"Parameters","text":"
  • epoch_train_loss: list The training loss for each epoch.
  • epoch_val_loss: list The validation loss for each epoch.
  • epoch_avg_train_loss: list The average training loss for each epoch.
  • epoch_avg_val_loss: list The average validation loss for each epoch.
  • path: pathlib.Path The path to save the plot to.
"},{"location":"api/train_model/#chirpdetector.train_model.plot_epochs--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def plot_epochs(\n    epoch_train_loss: list,\n    epoch_val_loss: list,\n    epoch_avg_train_loss: list,\n    epoch_avg_val_loss: list,\n    path: pathlib.Path,\n) -> None:\n    \"\"\"Plot the loss for each epoch.\n\n    Parameters\n    ----------\n    - `epoch_train_loss`: `list`\n        The training loss for each epoch.\n    - `epoch_val_loss`: `list`\n        The validation loss for each epoch.\n    - `epoch_avg_train_loss`: `list`\n        The average training loss for each epoch.\n    - `epoch_avg_val_loss`: `list`\n        The average validation loss for each epoch.\n    - `path`: `pathlib.Path`\n        The path to save the plot to.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    _, ax = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)\n\n    x_train = np.arange(len(epoch_train_loss[0])) + 1\n    x_val = np.arange(len(epoch_val_loss[0])) + len(epoch_train_loss[0]) + 1\n\n    for train_loss, val_loss in zip(epoch_train_loss, epoch_val_loss):\n        ax[0].plot(x_train, train_loss, c=\"tab:blue\", label=\"_\")\n        ax[0].plot(x_val, val_loss, c=\"tab:orange\", label=\"_\")\n        x_train = np.arange(len(epoch_train_loss[0])) + x_val[-1]\n        x_val = np.arange(len(epoch_val_loss[0])) + x_train[-1]\n\n    x_avg = np.arange(len(epoch_avg_train_loss)) + 1\n    ax[1].plot(\n        x_avg,\n        epoch_avg_train_loss,\n        label=\"Training Loss\",\n        c=\"tab:blue\",\n    )\n    ax[1].plot(\n        x_avg,\n        epoch_avg_val_loss,\n        label=\"Validation Loss\",\n        c=\"tab:orange\",\n    )\n\n    ax[0].set_ylabel(\"Loss\")\n    ax[0].set_xlabel(\"Batch\")\n    ax[0].set_ylim(bottom=0)\n    ax[0].set_title(\"Loss per batch\")\n\n    ax[1].set_ylabel(\"Loss\")\n    ax[1].set_xlabel(\"Epoch\")\n    ax[1].legend()\n    ax[1].set_ylim(bottom=0)\n    ax[1].set_title(\"Avg loss per epoch\")\n\n    plt.savefig(path)\n    plt.close()\n
"},{"location":"api/train_model/#chirpdetector.train_model.plot_folds","title":"plot_folds(fold_avg_train_loss, fold_avg_val_loss, path)","text":"

Plot the loss for each fold.

"},{"location":"api/train_model/#chirpdetector.train_model.plot_folds--parameters","title":"Parameters","text":"
  • fold_avg_train_loss: list The average training loss for each fold.
  • fold_avg_val_loss: list The average validation loss for each fold.
  • path: pathlib.Path The path to save the plot to.
"},{"location":"api/train_model/#chirpdetector.train_model.plot_folds--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def plot_folds(\n    fold_avg_train_loss: list,\n    fold_avg_val_loss: list,\n    path: pathlib.Path,\n) -> None:\n    \"\"\"Plot the loss for each fold.\n\n    Parameters\n    ----------\n    - `fold_avg_train_loss`: `list`\n        The average training loss for each fold.\n    - `fold_avg_val_loss`: `list`\n        The average validation loss for each fold.\n    - `path`: `pathlib.Path`\n        The path to save the plot to.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)\n\n    for train_loss, val_loss in zip(fold_avg_train_loss, fold_avg_val_loss):\n        x = np.arange(len(train_loss)) + 1\n        ax.plot(x, train_loss, c=\"tab:blue\", alpha=0.3, label=\"_\")\n        ax.plot(x, val_loss, c=\"tab:orange\", alpha=0.3, label=\"_\")\n\n    avg_train = np.mean(fold_avg_train_loss, axis=0)\n    avg_val = np.mean(fold_avg_val_loss, axis=0)\n    x = np.arange(len(avg_train)) + 1\n    ax.plot(\n        x,\n        avg_train,\n        label=\"Training Loss\",\n        c=\"tab:blue\",\n    )\n    ax.plot(\n        x,\n        avg_val,\n        label=\"Validation Loss\",\n        c=\"tab:orange\",\n    )\n\n    ax.set_ylabel(\"Loss\")\n    ax.set_xlabel(\"Epoch\")\n    ax.legend()\n    ax.set_ylim(bottom=0)\n\n    plt.savefig(path)\n    plt.close()\n
"},{"location":"api/train_model/#chirpdetector.train_model.save_model","title":"save_model(epoch, model, optimizer, path)","text":"

Save the model state dict.

"},{"location":"api/train_model/#chirpdetector.train_model.save_model--parameters","title":"Parameters","text":"
  • epoch: int The current epoch.
  • model: torch.nn.Module The model to save.
  • optimizer: torch.optim.Optimizer The optimizer to save.
  • path: str The path to save the model to.
"},{"location":"api/train_model/#chirpdetector.train_model.save_model--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def save_model(\n    epoch: int,\n    model: torch.nn.Module,\n    optimizer: torch.optim.Optimizer,\n    path: str,\n) -> None:\n    \"\"\"Save the model state dict.\n\n    Parameters\n    ----------\n    - `epoch`: `int`\n        The current epoch.\n    - `model`: `torch.nn.Module`\n        The model to save.\n    - `optimizer`: `torch.optim.Optimizer`\n        The optimizer to save.\n    - `path`: `str`\n        The path to save the model to.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    path = pathlib.Path(path)\n    path.mkdir(parents=True, exist_ok=True)\n    torch.save(\n        {\n            \"epoch\": epoch,\n            \"model_state_dict\": model.state_dict(),\n            \"optimizer_state_dict\": optimizer.state_dict(),\n        },\n        path / \"model.pt\",\n    )\n
"},{"location":"api/train_model/#chirpdetector.train_model.train","title":"train(config, mode='pretrain')","text":"

Train the model.

"},{"location":"api/train_model/#chirpdetector.train_model.train--parameters","title":"Parameters","text":"
  • config: Config The config file.
  • mode: str The mode to train in. Either pretrain or finetune.
"},{"location":"api/train_model/#chirpdetector.train_model.train--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def train(config: Config, mode: str = \"pretrain\") -> None:\n    \"\"\"Train the model.\n\n    Parameters\n    ----------\n    - `config`: `Config`\n        The config file.\n    - `mode`: `str`\n        The mode to train in. Either `pretrain` or `finetune`.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # Load a pretrained model from pytorch if in pretrain mode,\n    # otherwise open an already trained model from the\n    # model state dict.\n    assert mode in [\"pretrain\", \"finetune\"]\n    if mode == \"pretrain\":\n        assert config.train.datapath is not None\n        datapath = config.train.datapath\n    elif mode == \"finetune\":\n        assert config.finetune.datapath is not None\n        datapath = config.finetune.datapath\n\n    # Check if the path to the data actually exists\n    if not pathlib.Path(datapath).exists():\n        raise FileNotFoundError(f\"Path {datapath} does not exist.\")\n\n    # Initialize the logger and progress bar, make the logger global\n    global logger\n    logger = make_logger(\n        __name__,\n        pathlib.Path(config.path).parent / \"chirpdetector.log\",\n    )\n\n    # Get the device (e.g. GPU or CPU)\n    device = get_device()\n\n    # Print information about starting training\n    progress.console.rule(\"Starting training\")\n    msg = (\n        f\"Device: {device}, Config: {config.path},\"\n        f\" Mode: {mode}, Data: {datapath}\"\n    )\n    progress.console.log(msg)\n    logger.info(msg)\n\n    # initialize the dataset\n    data = CustomDataset(\n        path=datapath,\n        classes=config.hyper.classes,\n    )\n\n    # initialize the k-fold cross-validation\n    splits = KFold(n_splits=config.hyper.kfolds, shuffle=True, random_state=42)\n\n    # initialize the best validation loss to a large number\n    best_val_loss = float(\"inf\")\n\n    # iterate over the folds for k-fold cross-validation\n    with progress:\n        # save loss across all epochs and folds\n        fold_train_loss = []\n        fold_val_loss = []\n        fold_avg_train_loss = []\n        fold_avg_val_loss = []\n\n        # Add kfolds progress bar that runs alongside the epochs progress bar\n        task_folds = progress.add_task(\n            f\"[blue]{config.hyper.kfolds}-Fold Crossvalidation\",\n            total=config.hyper.kfolds,\n        )\n\n        # iterate over the folds\n        for fold, (train_idx, val_idx) in enumerate(\n            splits.split(np.arange(len(data))),\n        ):\n            # initialize the model and optimizer\n            model = load_fasterrcnn(num_classes=len(config.hyper.classes)).to(\n                device,\n            )\n\n            # If the mode is finetune, load the model state dict from\n            # previous training\n            if mode == \"finetune\":\n                modelpath = pathlib.Path(config.hyper.modelpath) / \"model.pt\"\n                checkpoint = torch.load(modelpath, map_location=device)\n                model.load_state_dict(checkpoint[\"model_state_dict\"])\n\n            # Initialize stochastic gradient descent optimizer\n            params = [p for p in model.parameters() if p.requires_grad]\n            optimizer = torch.optim.SGD(\n                params,\n                lr=config.hyper.learning_rate,\n                momentum=config.hyper.momentum,\n                weight_decay=config.hyper.weight_decay,\n            )\n\n            # make train and validation dataloaders for the current fold\n            train_data = torch.utils.data.Subset(data, train_idx)\n            val_data = torch.utils.data.Subset(data, val_idx)\n\n            # this is for training\n            train_loader = DataLoader(\n                train_data,\n                batch_size=config.hyper.batch_size,\n                shuffle=True,\n                num_workers=config.hyper.num_workers,\n                collate_fn=collate_fn,\n            )\n\n            # this is only for validation\n            val_loader = DataLoader(\n                val_data,\n                batch_size=config.hyper.batch_size,\n                shuffle=True,\n                num_workers=config.hyper.num_workers,\n                collate_fn=collate_fn,\n            )\n\n            # save loss across all epochs\n            epoch_avg_train_loss = []\n            epoch_avg_val_loss = []\n            epoch_train_loss = []\n            epoch_val_loss = []\n\n            # train the model for the specified number of epochs\n            task_epochs = progress.add_task(\n                f\"{config.hyper.num_epochs} Epochs for fold k={fold + 1}\",\n                total=config.hyper.num_epochs,\n            )\n\n            # iterate across n epochs\n            for epoch in range(config.hyper.num_epochs):\n                # print information about the current epoch\n                msg = (\n                    f\"Training epoch {epoch + 1} of {config.hyper.num_epochs} \"\n                    f\"for fold {fold + 1} of {config.hyper.kfolds}\"\n                )\n                progress.console.log(msg)\n                logger.info(msg)\n\n                # train the epoch\n                train_loss = train_epoch(\n                    dataloader=train_loader,\n                    device=device,\n                    model=model,\n                    optimizer=optimizer,\n                )\n\n                # validate the epoch\n                _, val_loss = val_epoch(\n                    dataloader=val_loader,\n                    device=device,\n                    model=model,\n                )\n\n                # save losses for this epoch\n                epoch_train_loss.append(train_loss)\n                epoch_val_loss.append(val_loss)\n\n                # save the average loss for this epoch\n                epoch_avg_train_loss.append(np.median(train_loss))\n                epoch_avg_val_loss.append(np.median(val_loss))\n\n                # save the model if it is the best so far\n                if np.mean(val_loss) < best_val_loss:\n                    best_val_loss = sum(val_loss) / len(val_loss)\n\n                    msg = (\n                        f\"New best validation loss: {best_val_loss:.4f}, \"\n                        \"saving model...\"\n                    )\n                    progress.console.log(msg)\n                    logger.info(msg)\n\n                    save_model(\n                        epoch=epoch,\n                        model=model,\n                        optimizer=optimizer,\n                        path=config.hyper.modelpath,\n                    )\n\n                # plot the losses for this epoch\n                plot_epochs(\n                    epoch_train_loss=epoch_train_loss,\n                    epoch_val_loss=epoch_val_loss,\n                    epoch_avg_train_loss=epoch_avg_train_loss,\n                    epoch_avg_val_loss=epoch_avg_val_loss,\n                    path=pathlib.Path(config.hyper.modelpath)\n                    / f\"fold{fold + 1}.png\",\n                )\n\n                # update the progress bar for the epochs\n                progress.update(task_epochs, advance=1)\n\n            # update the progress bar for the epochs and hide it if done\n            progress.update(task_epochs, visible=False)\n\n            # save the losses for this fold\n            fold_train_loss.append(epoch_train_loss)\n            fold_val_loss.append(epoch_val_loss)\n            fold_avg_train_loss.append(epoch_avg_train_loss)\n            fold_avg_val_loss.append(epoch_avg_val_loss)\n\n            plot_folds(\n                fold_avg_train_loss=fold_avg_train_loss,\n                fold_avg_val_loss=fold_avg_val_loss,\n                path=pathlib.Path(config.hyper.modelpath) / \"losses.png\",\n            )\n\n            # update the progress bar for the folds\n            progress.update(task_folds, advance=1)\n\n        # update the progress bar for the folds and hide it if done\n        progress.update(task_folds, visible=False)\n\n        # print information about the training\n        msg = (\n            \"Average validation loss of last epoch across folds: \"\n            f\"{np.mean(fold_val_loss):.4f}\"\n        )\n        progress.console.log(msg)\n        logger.info(msg)\n        progress.console.rule(\"[bold blue]Finished training\")\n
"},{"location":"api/train_model/#chirpdetector.train_model.train_cli","title":"train_cli(config_path, mode)","text":"

Train the model from the command line.

"},{"location":"api/train_model/#chirpdetector.train_model.train_cli--parameters","title":"Parameters","text":"
  • config_path: pathlib.Path The path to the config file.
  • mode: str The mode to train in. Either pretrain or finetune.
"},{"location":"api/train_model/#chirpdetector.train_model.train_cli--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def train_cli(config_path: pathlib.Path, mode: str) -> None:\n    \"\"\"Train the model from the command line.\n\n    Parameters\n    ----------\n    - `config_path`: `pathlib.Path`\n        The path to the config file.\n    - `mode`: `str`\n        The mode to train in. Either `pretrain` or `finetune`.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    config = load_config(config_path)\n    train(config, mode=mode)\n
"},{"location":"api/train_model/#chirpdetector.train_model.train_epoch","title":"train_epoch(dataloader, device, model, optimizer)","text":"

Train the model for one epoch.

"},{"location":"api/train_model/#chirpdetector.train_model.train_epoch--parameters","title":"Parameters","text":"
  • dataloader: DataLoader The dataloader for the training data.
  • device: torch.device The device to train on.
  • model: torch.nn.Module The model to train.
  • optimizer: torch.optim.Optimizer The optimizer to use.
"},{"location":"api/train_model/#chirpdetector.train_model.train_epoch--returns","title":"Returns","text":"
  • train_loss: List The training loss for each batch.
Source code in chirpdetector/train_model.py
def train_epoch(\n    dataloader: DataLoader,\n    device: torch.device,\n    model: torch.nn.Module,\n    optimizer: torch.optim.Optimizer,\n) -> List:\n    \"\"\"Train the model for one epoch.\n\n    Parameters\n    ----------\n    - `dataloader`: `DataLoader`\n        The dataloader for the training data.\n    - `device`: `torch.device`\n        The device to train on.\n    - `model`: `torch.nn.Module`\n        The model to train.\n    - `optimizer`: `torch.optim.Optimizer`\n        The optimizer to use.\n\n    Returns\n    -------\n    - `train_loss`: `List`\n        The training loss for each batch.\n    \"\"\"\n    train_loss = []\n\n    for samples, targets in dataloader:\n        images = list(sample.to(device) for sample in samples)\n        targets = [\n            {k: v.to(device) for k, v in t.items() if k != \"image_name\"}\n            for t in targets\n        ]\n\n        loss_dict = model(images, targets)\n        losses = sum(loss for loss in loss_dict.values())\n        train_loss.append(losses.item())\n\n        optimizer.zero_grad()\n        losses.backward()\n        optimizer.step()\n\n    return train_loss\n
"},{"location":"api/train_model/#chirpdetector.train_model.val_epoch","title":"val_epoch(dataloader, device, model)","text":"

Validate the model for one epoch.

"},{"location":"api/train_model/#chirpdetector.train_model.val_epoch--parameters","title":"Parameters","text":"
  • dataloader: DataLoader The dataloader for the validation data.
  • device: torch.device The device to train on.
  • model: torch.nn.Module The model to train.
"},{"location":"api/train_model/#chirpdetector.train_model.val_epoch--returns","title":"Returns","text":"
  • loss_dict: dict The loss dictionary.
Source code in chirpdetector/train_model.py
def val_epoch(\n    dataloader: DataLoader,\n    device: torch.device,\n    model: torch.nn.Module,\n) -> List:\n    \"\"\"Validate the model for one epoch.\n\n    Parameters\n    ----------\n    - `dataloader`: `DataLoader`\n        The dataloader for the validation data.\n    - `device`: `torch.device`\n        The device to train on.\n    - `model`: `torch.nn.Module`\n        The model to train.\n\n    Returns\n    -------\n    - `loss_dict`: `dict`\n        The loss dictionary.\n    \"\"\"\n    val_loss = []\n    for samples, targets in dataloader:\n        images = list(sample.to(device) for sample in samples)\n        targets = [\n            {k: v.to(device) for k, v in t.items() if k != \"image_name\"}\n            for t in targets\n        ]\n\n        with torch.inference_mode():\n            loss_dict = model(images, targets)\n\n        losses = sum(loss for loss in loss_dict.values())\n        val_loss.append(losses.item())\n\n    return loss_dict, val_loss\n
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"

Chirpdetector

Detect communication signals of electric fish using deep neural networks \ud83d\udc1f\u26a1\ud83e\udde0 This project is still work in progress and will approximately be released in spring of 2024.

Why? \ud83e\udd28

Chirps are by far the most thoroughly researched communication signal of electric, probably even all fish. But detecting chirps becomes hard when more than one fish is recorded. As a result, most of the research to date analyzes this signal in isolated individuals. This is not good.

To tackle this isse, this package provides a simple toolbox to detect chirps of multiple fish on spectrograms. This enables true quantitative analyses of chirping between freely behaving fish for the first time.

"},{"location":"assingment/","title":"Assingment","text":"

Wow, such empty

"},{"location":"contributing/","title":"Contributing","text":"

We are thrilled to have you join in making this project even better. Please feel free to browse through the resources and guidelines provided here, and let us know if there is anything specific you would like to contribute or discuss.

If you would like to help to develop this package you can skim through the to-do list below as well as the contribution guidelines. Just fork the project, add your code and send a pull request. We are always happy to get some help !

If you encountered an issue using the chirpdetector, feel free to open an issue here.

"},{"location":"contributing/#contributors-guidelines","title":"Contributors guidelines","text":"

I try our best to adhere to good coding practices and catch up on writing tests for this package. As I am currently the only one working on it, here is some documentation of the development packages I use:

  • pre-commit for pre-commit hooks
  • pytest and pytest-coverage for unit tests
  • ruff for linting and formatting
  • pyright for static type checking

Before every commit, a pre-commit hook runs all these packages on the code base and refuses a push if errors are raised. If you want to contribute, please make sure that your code is proberly formatted and run the tests before issuing a pull request. The formatting guidelines should be automatically picked up by your ruff installaton from the pyproject.toml file.

"},{"location":"contributing/#to-do","title":"To Do","text":"

After the first release, this section will be removed an tasks will be organized as github issues. Until them, if you fixed something, please check it off on this list before opening a pull request.

  • Refactor train, detect, convert. All into much smaller functions. Move accesory functions to utils
  • Move hardcoded params from assignment algo into config.toml
  • Resolve all pylint and mypy errors and warnings.. and ruff warnings ... etc
  • Fix make test, fails after ruff run
  • Build github actions CI/CD pipeline for codecov etc.
  • Move the dataconverter from gridtools to chirpdetector
  • Extend the dataconverter to just output the spectrograms so that hand-labelling can be done in a separate step
  • Add a main script so that the cli is chirpdetector <task> --<flag> <args>
  • Improve simulation of chirps to include more realistic noise, undershoot and maybe even phasic-tonic evolution of the frequency of the big chirps
  • make the copyconfig script more
  • start writing the chirp assignment algorithm
  • Move all the pprinting and logging constructors to a separate module and build a unified console object so that saving logs to file is easier, also log to file as well
  • Split the messy training loop into functions
  • Add label-studio
  • Supply scripts to convert completely unannotated or partially annotated data to the label-studio format to make manual labeling easier
  • Make possible to output detections as a yolo dataset
  • Look up how to convert a yolo dataset to a label-studio input so we can label pre-annotated data, facilitating a full human-in-the-loop approach
  • Add augmentation transforms to the dataset class and add augmentations to the simulation in gridtools. Note to this: Unnessecary, using real data.
  • Change bbox to actual yolo format, not the weird one I made up (which is x1, y1, x2, y2 instead of x1, y1, w, h). This is why the label-studio export is not working.
  • Port cli to click, works better
  • Try clustering the detected chirp windows on a spectrogram, could be interesting
"},{"location":"dataset/","title":"Creating a dataset","text":"

Wow, such empty

"},{"location":"demo/","title":"Detecting chirps with a few terminal commands","text":"

Once everything is set up correctly, detecting chirps is a breeze. The terminal utility can be called by chirpdetector or simply cpd.

Simply run

cpd detect --path \"/path/to/dataset\"\n
And the bounding boxes will be computed and saved to a .csv file. Then run
cpd assign --path \"/path/to/dataset\"\n
to assing each detected chirp to a fundamental frequency of a fish. The results will be added to the .csv file in the dataset. To check if this went well, you can run
cpd plot --path \"/path/to/dataset\"\n
And the spectrograms, bounding boxes, and assigned chirps of all the detected chirps will be plotted and saved as .png images into a subfolder of your dataset.

The result will look something like this:

15 seconds of a recording containing two chirping fish with bounding boxes around chirps and dots indicating to which frequency they are assigned to.

"},{"location":"detection/","title":"Detection","text":"

Wow, such empty

"},{"location":"how_it_works/","title":"How it works","text":"

How? \ud83e\udd14

Chirps manifest as excursions in the electric organ discharge frequency. To discern the individual chirps in a recording featuring multiple fish separated solely by frequency, we delve into the frequency domain. This involves the computation of spectrograms, ensuring ample temporal resolution for chirp distinction and sufficient frequency resolution for fish differentiation. The outcome is a series of images.

This framework facilitates the application of potent computer vision algorithms, such as a faster-R-CNN, for the detection of objects like chirps within these 'images.' Each chirp detection yields a bounding box, a motif echoed in the package's logo.

Post-processing steps refine the results, assigning chirp times to the fundamental frequencies of each fish captured in the recording.

Still not sold? Check out the demo \u00bb

"},{"location":"installation/","title":"Installation","text":"

Wow, such empty

"},{"location":"labeling/","title":"Labeling a dataset","text":"

Wow, such empty

"},{"location":"setup/","title":"Setup","text":"

Wow, such empty

"},{"location":"training/","title":"Training","text":"

Wow, such empty

"},{"location":"visualization/","title":"Visualization","text":"

Wow, such empty

"},{"location":"yolo-helpers/","title":"Helper commands","text":"

Wow, such empty

"},{"location":"api/assign_chirps/","title":"assign_chirps","text":"

Assign chirps detected on a spectrogram to wavetracker tracks.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_chirps","title":"assign_chirps(assign_data, chirp_df, data)","text":"

Assign chirps to wavetracker tracks.

This function uses the extracted envelope troughs to assign chirps to tracks. It computes a cost function that is high when the trough prominence is high and the distance to the chirp center is low. For each chirp, the track with the highest cost function value is chosen.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_chirps--parameters","title":"Parameters","text":"
  • assign_data: dict Dictionary containing the data needed for assignment
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • data: gridtools.datasets.Dataset Dataset object containing the data
Source code in chirpdetector/assign_chirps.py
def assign_chirps(\n    assign_data: Dict[str, np.ndarray],\n    chirp_df: pd.DataFrame,\n    data: Dataset,\n) -> None:\n    \"\"\"Assign chirps to wavetracker tracks.\n\n    This function uses the extracted envelope troughs to assign chirps to\n    tracks. It computes a cost function that is high when the trough prominence\n    is high and the distance to the chirp center is low. For each chirp, the\n    track with the highest cost function value is chosen.\n\n    Parameters\n    ----------\n    - `assign_data`: `dict`\n        Dictionary containing the data needed for assignment\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    \"\"\"\n    # extract data from assign_data\n    peak_prominences = assign_data[\"proms\"]\n    peak_distances = assign_data[\"peaks\"]\n    peak_times = assign_data[\"ptimes\"]\n    chirp_indices = assign_data[\"cindices\"]\n    track_ids = assign_data[\"track_ids\"]\n\n    # compute cost function.\n    # this function is high when the trough prominence is high\n    # (-> chirp with high contrast)\n    # and when the trough is close to the chirp center as detected by the\n    # r-cnn (-> detected chirp is close to the actual chirp)\n    cost = peak_prominences / peak_distances**2\n\n    # set cost to zero for cases where no peak was found\n    cost[np.isnan(cost)] = 0\n\n    # for each chirp, choose the track where the cost is highest\n    # TODO: to avoid confusion make a cost function where high is good and low\n    # is bad. this is more like a \"gain function\"\n    chosen_tracks = []\n    chosen_track_times = []\n    for idx in np.unique(chirp_indices):\n        candidate_tracks = track_ids[chirp_indices == idx]\n        candidate_costs = cost[chirp_indices == idx]\n        candidate_times = peak_times[chirp_indices == idx]\n        chosen_tracks.append(candidate_tracks[np.argmax(candidate_costs)])\n        chosen_track_times.append(candidate_times[np.argmax(candidate_costs)])\n\n    # store chosen tracks in chirp_df\n    chirp_df[\"assigned_track\"] = chosen_tracks\n\n    # store chirp time estimated from envelope trough in chirp_df\n    chirp_df[\"envelope_trough_time\"] = chosen_track_times\n\n    # save chirp_df\n    chirp_df.to_csv(data.path / \"chirpdetector_bboxes.csv\", index=False)\n\n    # save old format:\n    np.save(data.path / \"chirp_ids_rcnn.npy\", chosen_tracks)\n    np.save(data.path / \"chirp_times_rcnn.npy\", chosen_track_times)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_cli","title":"assign_cli(path)","text":"

Assign chirps to wavetracker tracks.

this is the command line interface for the assign_chirps function.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.assign_cli--parameters","title":"Parameters","text":"
  • path: pathlib.path path to the directory containing the chirpdetector.toml file
Source code in chirpdetector/assign_chirps.py
def assign_cli(path: pathlib.Path) -> None:\n    \"\"\"Assign chirps to wavetracker tracks.\n\n    this is the command line interface for the assign_chirps function.\n\n    Parameters\n    ----------\n    - `path`: `pathlib.path`\n        path to the directory containing the chirpdetector.toml file\n    \"\"\"\n    if not path.is_dir():\n        msg = f\"{path} is not a directory\"\n        raise ValueError(msg)\n\n    if not (path / \"chirpdetector.toml\").is_file():\n        msg = f\"{path} does not contain a chirpdetector.toml file\"\n        raise ValueError(msg)\n\n    logger = make_logger(__name__, path / \"chirpdetector.log\")\n    # config = load_config(path / \"chirpdetector.toml\")\n    recs = list(path.iterdir())\n    recs = [r for r in recs if r.is_dir()]\n    # recs = [path / \"subset_2020-03-18-10_34_t0_9320.0_t1_9920.0\"]\n\n    msg = f\"found {len(recs)} recordings in {path}, starting assignment\"\n    prog.console.log(msg)\n    logger.info(msg)\n\n    prog.console.rule(\"starting assignment\")\n    with prog:\n        task = prog.add_task(\"assigning chirps\", total=len(recs))\n        for rec in recs:\n            msg = f\"assigning chirps in {rec}\"\n            logger.info(msg)\n            prog.console.log(msg)\n\n            data = load(rec)\n            chirp_df = pd.read_csv(rec / \"chirpdetector_bboxes.csv\")\n            assign_data, chirp_df, data = extract_assignment_data(\n                data, chirp_df\n            )\n            assign_chirps(assign_data, chirp_df, data)\n            prog.update(task, advance=1)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.bbox_to_chirptimes","title":"bbox_to_chirptimes(chirp_df)","text":"

Convert chirp bboxes to chirp times.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.bbox_to_chirptimes--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe dataframe containing the chirp bboxes
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.bbox_to_chirptimes--returns","title":"Returns","text":"
  • chirp_df: pd.dataframe dataframe containing the chirp bboxes with chirp times.
Source code in chirpdetector/assign_chirps.py
def bbox_to_chirptimes(chirp_df: pd.DataFrame) -> pd.DataFrame:\n    \"\"\"Convert chirp bboxes to chirp times.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        dataframe containing the chirp bboxes\n\n    Returns\n    -------\n    - `chirp_df`: `pd.dataframe`\n        dataframe containing the chirp bboxes with chirp times.\n    \"\"\"\n    chirp_df[\"chirp_times\"] = np.mean(chirp_df[[\"t1\", \"t2\"]], axis=1)\n\n    return chirp_df\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.clean_bboxes","title":"clean_bboxes(data, chirp_df)","text":"

Clean the chirp bboxes.

This is a collection of filters that remove bboxes that either overlap, are out of range or otherwise do not make sense.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.clean_bboxes--parameters","title":"Parameters","text":"
  • data: gridtools.datasets.Dataset Dataset object containing the data
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.clean_bboxes--returns","title":"Returns","text":"
  • chirp_df_tf: pd.dataframe Dataframe containing the chirp bboxes that overlap with the range
Source code in chirpdetector/assign_chirps.py
def clean_bboxes(data: Dataset, chirp_df: pd.DataFrame) -> pd.DataFrame:\n    \"\"\"Clean the chirp bboxes.\n\n    This is a collection of filters that remove bboxes that\n    either overlap, are out of range or otherwise do not make sense.\n\n    Parameters\n    ----------\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n\n    Returns\n    -------\n    - `chirp_df_tf`: `pd.dataframe`\n        Dataframe containing the chirp bboxes that overlap with the range\n    \"\"\"\n    # non-max suppression: remove all chirp bboxes that overlap with\n    # another more than threshold\n    pick_indices = non_max_suppression_fast(chirp_df, 0.5)\n    chirp_df_nms = chirp_df.loc[pick_indices, :]\n\n    # track filter: remove all chirp bboxes that do not overlap with\n    # the range spanned by the min and max of the wavetracker frequency tracks\n    minf = np.min(data.track.freqs).astype(float)\n    maxf = np.max(data.track.freqs).astype(float)\n    # maybe add some more cleaning here, such\n    # as removing chirps that are too short or too long\n    return track_filter(chirp_df_nms, minf, maxf)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.cleanup","title":"cleanup(chirp_df, data)","text":"

Clean the chirp bboxes.

This is a collection of filters that remove bboxes that either overlap, are out of range or otherwise do not make sense.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.cleanup--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • data: gridtools.datasets.Dataset Dataset object containing the data
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.cleanup--returns","title":"Returns","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes that overlap with the range
Source code in chirpdetector/assign_chirps.py
def cleanup(chirp_df: pd.DataFrame, data: Dataset) -> pd.DataFrame:\n    \"\"\"Clean the chirp bboxes.\n\n    This is a collection of filters that remove bboxes that\n    either overlap, are out of range or otherwise do not make sense.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n\n    Returns\n    -------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes that overlap with the range\n    \"\"\"\n    # first clean the bboxes\n    chirp_df = clean_bboxes(data, chirp_df)\n    # sort chirps in df by time, i.e. t1\n    chirp_df = chirp_df.sort_values(by=\"t1\", ascending=True)\n    # compute chirp times, i.e. center of the bbox x axis\n    return bbox_to_chirptimes(chirp_df)\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_assignment_data","title":"extract_assignment_data(data, chirp_df)","text":"

Get envelope troughs to determine chirp assignment.

This algorigthm assigns chirps to wavetracker tracks by a series of steps: 1. clean the chirp bboxes 2. for each fish track, filter the signal on the best electrode 3. find troughs in the envelope of the filtered signal 4. compute the prominence of the trough and the distance to the chirp center 5. compute a cost function that is high when the trough prominence is high and the distance to the chirp center is low 6. compare the value of the cost function for each track and choose the track with the highest cost function value

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_assignment_data--parameters","title":"Parameters","text":"
  • data: dataset Dataset object containing the data
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
Source code in chirpdetector/assign_chirps.py
def extract_assignment_data(\n    data: Dataset, chirp_df: pd.DataFrame\n) -> Tuple[Dict[str, np.ndarray], pd.DataFrame, Dataset]:\n    \"\"\"Get envelope troughs to determine chirp assignment.\n\n    This algorigthm assigns chirps to wavetracker tracks by a series of steps:\n    1. clean the chirp bboxes\n    2. for each fish track, filter the signal on the best electrode\n    3. find troughs in the envelope of the filtered signal\n    4. compute the prominence of the trough and the distance to the chirp\n    center\n    5. compute a cost function that is high when the trough prominence is high\n    and the distance to the chirp center is low\n    6. compare the value of the cost function for each track and choose the\n    track with the highest cost function value\n\n    Parameters\n    ----------\n    - `data`: `dataset`\n        Dataset object containing the data\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    \"\"\"\n    # clean the chirp bboxes\n    chirp_df = cleanup(chirp_df, data)\n\n    # now loop over all tracks and assign chirps to tracks\n    chirp_indices = []  # index of chirp in chirp_df\n    track_ids = []  # id of track / fish\n    peak_prominences = []  # prominence of trough in envelope\n    peak_distances = []  # distance of trough to chirp center\n    peak_times = []  # time of trough in envelope, should be close to chirp\n\n    for fish_id in data.track.ids:\n        # get chirps, times and freqs and powers for this track\n        chirps = np.array(chirp_df.chirp_times.values)\n        time = data.track.times[\n            data.track.indices[data.track.idents == fish_id]\n        ]\n        freq = data.track.freqs[data.track.idents == fish_id]\n        powers = data.track.powers[data.track.idents == fish_id, :]\n\n        if len(time) == 0:\n            continue # skip if no track is found\n\n        for idx, chirp in enumerate(chirps):\n            # find the closest time, freq and power to the chirp time\n            closest_idx = np.argmin(np.abs(time - chirp))\n            best_electrode = np.argmax(powers[closest_idx, :]).astype(int)\n            second_best_electrode = np.argsort(powers[closest_idx, :])[-2]\n            best_freq = freq[closest_idx]\n\n            # check if chirp overlaps with track\n            f1 = chirp_df.f1.to_numpy()[idx]\n            f2 = chirp_df.f2.to_numpy()[idx]\n            f2 = f1 + (f2 - f1) * 0.5 # range is the lower half of the bbox\n            if (f1 > best_freq) or (f2 < best_freq):\n                peak_distances.append(np.nan)\n                peak_prominences.append(np.nan)\n                peak_times.append(np.nan)\n                chirp_indices.append(idx)\n                track_ids.append(fish_id)\n                continue\n\n            # determine start and stop index of time window on raw data\n            # using bounding box start and stop times of chirp detection\n            start_idx, stop_idx, center_idx = make_indices(\n                chirp_df, data, idx, chirp\n            )\n\n            indices = (start_idx, stop_idx, center_idx)\n            peaks, proms = extract_envelope_trough(\n                data,\n                best_electrode,\n                second_best_electrode,\n                best_freq,\n                indices,\n            )\n\n            # if no peaks are found, skip this chirp\n            if len(peaks) == 0:\n                peak_distances.append(np.nan)\n                peak_prominences.append(np.nan)\n                peak_times.append(np.nan)\n                chirp_indices.append(idx)\n                track_ids.append(fish_id)\n                continue\n\n            # compute index to closest peak to chirp center\n            distances = np.abs(peaks - (center_idx - start_idx))\n            closest_peak_idx = np.argmin(distances)\n\n            # store peak prominence and distance to chirp center\n            peak_distances.append(distances[closest_peak_idx])\n            peak_prominences.append(proms[closest_peak_idx])\n            peak_times.append(\n                (start_idx + peaks[closest_peak_idx]) / data.grid.samplerate,\n            )\n            chirp_indices.append(idx)\n            track_ids.append(fish_id)\n\n    peak_prominences = np.array(peak_prominences)\n    peak_distances = (\n        np.array(peak_distances) + 1\n    )  # add 1 to avoid division by zero\n    peak_times = np.array(peak_times)\n    chirp_indices = np.array(chirp_indices)\n    track_ids = np.array(track_ids)\n\n    assignment_data = {\n        \"proms\": peak_prominences,\n        \"peaks\": peak_distances,\n        \"ptimes\": peak_times,\n        \"cindices\": chirp_indices,\n        \"track_ids\": track_ids,\n    }\n    return (\n        assignment_data,\n        chirp_df,\n        data,\n    )\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_envelope_trough","title":"extract_envelope_trough(data, best_electrode, second_best_electrode, best_freq, indices)","text":"

Extract envelope troughs.

Extracts a snippet from the raw data around the chirp time and computes the envelope of the bandpass filtered signal. Then finds the troughs in the envelope and computes their prominences.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_envelope_trough--parameters","title":"Parameters","text":"
  • data: gridtools.datasets.Dataset Dataset object containing the data
  • best_electrode: int Index of the best electrode
  • second_best_electrode: int Index of the second best electrode
  • best_freq: float Frequency of the chirp
  • indices: Tuple[int, int, int] Tuple containing the start, center, stop indices of the chirp
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.extract_envelope_trough--returns","title":"Returns","text":"
  • peaks: np.ndarray Indices of the envelope troughs
  • proms: np.ndarray Prominences of the envelope troughs
Source code in chirpdetector/assign_chirps.py
def extract_envelope_trough(\n    data: Dataset,\n    best_electrode: int,\n    second_best_electrode: int,\n    best_freq: float,\n    indices: Tuple[int, int, int],\n) -> Tuple[np.ndarray, np.ndarray]:\n    \"\"\"Extract envelope troughs.\n\n    Extracts a snippet from the raw data around the chirp time and computes\n    the envelope of the bandpass filtered signal. Then finds the troughs in\n    the envelope and computes their prominences.\n\n    Parameters\n    ----------\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    - `best_electrode`: `int`\n        Index of the best electrode\n    - `second_best_electrode`: `int`\n        Index of the second best electrode\n    - `best_freq`: `float`\n        Frequency of the chirp\n    - `indices`: `Tuple[int, int, int]`\n        Tuple containing the start, center, stop indices of the chirp\n\n    Returns\n    -------\n    - `peaks`: `np.ndarray`\n        Indices of the envelope troughs\n    - `proms`: `np.ndarray`\n        Prominences of the envelope troughs\n    \"\"\"\n    start_idx, stop_idx, _= indices\n\n    # determine bandpass cutoffs above and below baseline frequency\n    lower_f = best_freq - 15\n    upper_f = best_freq + 15\n\n    # get the raw signal on the 2 best electrodes and make differential\n    raw1 = data.grid.rec[start_idx:stop_idx, best_electrode]\n    raw2 = data.grid.rec[start_idx:stop_idx, second_best_electrode]\n    raw = raw1 - raw2\n\n    # bandpass filter the raw signal\n    raw_filtered = bandpass_filter(\n        raw,\n        data.grid.samplerate,\n        lower_f,\n        upper_f,\n    )\n\n    # compute the envelope of the filtered signal\n    env = envelope(\n        signal=raw_filtered,\n        samplerate=data.grid.samplerate,\n        cutoff_frequency=50,\n    )\n    peaks, proms = get_env_trough(env, raw_filtered)\n    # mpl.use(\"TkAgg\")\n    # plt.plot(env)\n    # plt.plot(raw_filtered)\n    # plt.plot(peaks, env[peaks], \"x\")\n    # plt.show()\n    return peaks, proms\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.get_env_trough","title":"get_env_trough(env, raw)","text":"

Get the envelope troughs and their prominences.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.get_env_trough--parameters","title":"Parameters","text":"
  • env: np.ndarray Envelope of the filtered signal
  • raw: np.ndarray Raw signal
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.get_env_trough--returns","title":"Returns","text":"
  • peaks: np.ndarray Indices of the envelope troughs
  • proms: np.ndarray Prominences of the envelope troughs
Source code in chirpdetector/assign_chirps.py
def get_env_trough(\n    env: np.ndarray,\n    raw: np.ndarray,\n) -> Tuple[np.ndarray, np.ndarray]:\n    \"\"\"Get the envelope troughs and their prominences.\n\n    Parameters\n    ----------\n    - `env`: `np.ndarray`\n        Envelope of the filtered signal\n    - `raw`: `np.ndarray`\n        Raw signal\n\n    Returns\n    -------\n    - `peaks`: `np.ndarray`\n        Indices of the envelope troughs\n    - `proms`: `np.ndarray`\n        Prominences of the envelope troughs\n    \"\"\"\n    # normalize the envelope using the amplitude of the raw signal\n    # to preserve the amplitude of the envelope\n    env = env / np.max(np.abs(raw))\n\n    # cut of the first and last 20% of the envelope\n    env[: int(0.25 * len(env))] = np.nan\n    env[int(0.75 * len(env)) :] = np.nan\n\n    # find troughs in the envelope and compute trough prominences\n    peaks, params = find_peaks(-env, prominence=1e-3)\n    proms = params[\"prominences\"]\n    return peaks, proms\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.make_indices","title":"make_indices(chirp_df, data, idx, chirp)","text":"

Make indices for the chirp window.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.make_indices--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • data: gridtools.datasets.Dataset Dataset object containing the data
  • idx: int Index of the chirp in the chirp_df
  • chirp: float Chirp time
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.make_indices--returns","title":"Returns","text":"
  • start_idx: int Start index of the chirp window
  • stop_idx: int Stop index of the chirp window
  • center_idx: int Center index of the chirp window
Source code in chirpdetector/assign_chirps.py
def make_indices(\n    chirp_df: pd.DataFrame, data: Dataset, idx: int, chirp: float\n) -> Tuple[int, int, int]:\n    \"\"\"Make indices for the chirp window.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `data`: `gridtools.datasets.Dataset`\n        Dataset object containing the data\n    - `idx`: `int`\n        Index of the chirp in the chirp_df\n    - `chirp`: `float`\n        Chirp time\n\n    Returns\n    -------\n    - `start_idx`: `int`\n        Start index of the chirp window\n    - `stop_idx`: `int`\n        Stop index of the chirp window\n    - `center_idx`: `int`\n        Center index of the chirp window\n    \"\"\"\n    # determine start and stop index of time window on raw data\n    # using bounding box start and stop times of chirp detection\n    diffr = chirp_df.t2.to_numpy()[idx] - chirp_df.t1.to_numpy()[idx]\n    t1 = chirp_df.t1.to_numpy()[idx] - 0.5 * diffr\n    t2 = chirp_df.t2.to_numpy()[idx] + 0.5 * diffr\n\n    start_idx = int(np.round(t1 * data.grid.samplerate))\n    stop_idx = int(np.round(t2 * data.grid.samplerate))\n    center_idx = int(np.round(chirp * data.grid.samplerate))\n\n    return start_idx, stop_idx, center_idx\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.non_max_suppression_fast","title":"non_max_suppression_fast(chirp_df, overlapthresh)","text":"

Raster implementation of non-maximum suppression.

To remove overlapping bounding boxes.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.non_max_suppression_fast--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • overlapthresh: float Threshold for overlap between bboxes
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.non_max_suppression_fast--returns","title":"Returns","text":"
  • pick: list List of indices of bboxes to keep
Source code in chirpdetector/assign_chirps.py
def non_max_suppression_fast(\n    chirp_df: pd.DataFrame,\n    overlapthresh: float,\n) -> list:\n    \"\"\"Raster implementation of non-maximum suppression.\n\n    To remove overlapping bounding boxes.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `overlapthresh`: `float`\n        Threshold for overlap between bboxes\n\n    Returns\n    -------\n    - `pick`: `list`\n        List of indices of bboxes to keep\n    \"\"\"\n    # slightly modified version of\n    # https://pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\n\n    # convert boxes to list of tuples and then to numpy array\n    boxes = chirp_df[[\"t1\", \"f1\", \"t2\", \"f2\"]].to_numpy()\n\n    # if there are no boxes, return an empty list\n    if len(boxes) == 0:\n        return []\n\n    # initialize the list of picked indexes\n    pick = []\n\n    # grab the coordinates of the bounding boxes\n    x1 = boxes[:, 0]\n    y1 = boxes[:, 1]\n    x2 = boxes[:, 2]\n    y2 = boxes[:, 3]\n\n    # compute the area of the bounding boxes and sort the bounding\n    # boxes by the bottom-right y-coordinate of the bounding box\n    area = (x2 - x1) * (y2 - y1)\n    idxs = np.argsort(y2)\n\n    # keep looping while some indexes still remain in the indexes\n    # list\n    while len(idxs) > 0:\n        # grab the last index in the indexes list and add the\n        # index value to the list of picked indexes\n        last = len(idxs) - 1\n        i = idxs[last]\n        pick.append(i)\n\n        # find the largest (x, y) coordinates for the start of\n        # the bounding box and the smallest (x, y) coordinates\n        # for the end of the bounding box\n        xx1 = np.maximum(x1[i], x1[idxs[:last]])\n        yy1 = np.maximum(y1[i], y1[idxs[:last]])\n        xx2 = np.minimum(x2[i], x2[idxs[:last]])\n        yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n        # compute the width and height of the bounding box\n        w = np.maximum(0, xx2 - xx1)\n        h = np.maximum(0, yy2 - yy1)\n\n        # compute the ratio of overlap (intersection over union)\n        overlap = (w * h) / area[idxs[:last]]\n\n        # delete all indexes from the index list that have\n        idxs = np.delete(\n            idxs,\n            np.concatenate(([last], np.where(overlap > overlapthresh)[0])),\n        )\n        # return the indicies of the picked boxes\n    return pick\n
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.track_filter","title":"track_filter(chirp_df, minf, maxf)","text":"

Remove chirp bboxes that do not overlap with tracks.

"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.track_filter--parameters","title":"Parameters","text":"
  • chirp_df: pd.dataframe Dataframe containing the chirp bboxes
  • minf: float Minimum frequency of the range
  • maxf: float Maximum frequency of the range
"},{"location":"api/assign_chirps/#chirpdetector.assign_chirps.track_filter--returns","title":"Returns","text":"
  • chirp_df_tf: pd.dataframe Dataframe containing the chirp bboxes that overlap with the range
Source code in chirpdetector/assign_chirps.py
def track_filter(\n    chirp_df: pd.DataFrame,\n    minf: float,\n    maxf: float,\n) -> pd.DataFrame:\n    \"\"\"Remove chirp bboxes that do not overlap with tracks.\n\n    Parameters\n    ----------\n    - `chirp_df`: `pd.dataframe`\n        Dataframe containing the chirp bboxes\n    - `minf`: `float`\n        Minimum frequency of the range\n    - `maxf`: `float`\n        Maximum frequency of the range\n\n    Returns\n    -------\n    - `chirp_df_tf`: `pd.dataframe`\n        Dataframe containing the chirp bboxes that overlap with the range\n    \"\"\"\n    # remove all chirp bboxes that have no overlap with the range spanned by\n    # minf and maxf\n\n    # first build a box that spans the entire range\n    range_box = np.array([0, minf, np.max(chirp_df.t2), maxf])\n\n    # now compute the intersection between the range box and each chirp bboxes\n    # and keep only those that have an intersection area > 0\n    chirp_df_tf = chirp_df.copy()\n    intersection = chirp_df_tf.apply(\n        lambda row: (\n            max(0, min(row[\"t2\"], range_box[2]) - max(row[\"t1\"], range_box[0]))\n            * max(\n                0,\n                min(row[\"f2\"], range_box[3]) - max(row[\"f1\"], range_box[1]),\n            )\n        ),\n        axis=1,\n    )\n    return chirp_df_tf.loc[intersection > 0, :]\n
"},{"location":"api/convert_data/","title":"convert_data","text":"

Functions and classes for converting data.

"},{"location":"api/convert_data/#chirpdetector.convert_data.chirp_bounding_boxes","title":"chirp_bounding_boxes(data, nfft)","text":"

Make bounding boxes of simulated chirps using the chirp parameters.

"},{"location":"api/convert_data/#chirpdetector.convert_data.chirp_bounding_boxes--parameters","title":"Parameters","text":"
  • data : Dataset The dataset to make bounding boxes for.
  • nfft : int The number of samples in the FFT.
"},{"location":"api/convert_data/#chirpdetector.convert_data.chirp_bounding_boxes--returns","title":"Returns","text":"

pandas.DataFrame A dataframe with the bounding boxes.

Source code in chirpdetector/convert_data.py
def chirp_bounding_boxes(data: Dataset, nfft: int) -> pd.DataFrame:\n    \"\"\"Make bounding boxes of simulated chirps using the chirp parameters.\n\n    Parameters\n    ----------\n    - `data` : `Dataset`\n        The dataset to make bounding boxes for.\n    - `nfft` : int\n        The number of samples in the FFT.\n\n    Returns\n    -------\n    `pandas.DataFrame`\n        A dataframe with the bounding boxes.\n    \"\"\"\n    assert hasattr(\n        data.com.chirp,\n        \"params\",\n    ), \"Dataset must have a chirp attribute with a params attribute\"\n\n    # Time padding is one NFFT window\n    pad_time = nfft / data.grid.samplerate\n\n    # Freq padding is fixed by the frequency resolution\n    freq_res = data.grid.samplerate / nfft\n    pad_freq = freq_res * 50\n\n    boxes = []\n    ids = []\n    for fish_id in data.track.ids:\n        freqs = data.track.freqs[data.track.idents == fish_id]\n        times = data.track.times[\n            data.track.indices[data.track.idents == fish_id]\n        ]\n        chirps = data.com.chirp.times[data.com.chirp.idents == fish_id]\n        params = data.com.chirp.params[data.com.chirp.idents == fish_id]\n\n        for chirp, param in zip(chirps, params):\n            # take the two closest frequency points\n            f_closest = freqs[np.argsort(np.abs(times - chirp))[:2]]\n\n            # take the two closest time points\n            t_closest = times[np.argsort(np.abs(times - chirp))[:2]]\n\n            # compute the weighted average of the two closest frequency points\n            # using the dt between chirp time and sampled time as weights\n            f_closest = np.average(\n                f_closest,\n                weights=np.abs(t_closest - chirp),\n            )\n\n            # we now have baseline eodf and time point of the chirp. Now\n            # we get some parameters from the params to build the bounding box\n            # for the chirp\n            height = param[1]\n            width = param[2]\n\n            # now define bounding box as center coordinates, width and height\n            t_center = chirp\n            f_center = f_closest + height / 2\n\n            bbox_height = height + pad_freq\n            bbox_width = width + pad_time\n\n            boxes.append((t_center, f_center, bbox_width, bbox_height))\n            ids.append(fish_id)\n\n    dataframe = pd.DataFrame(\n        boxes,\n        columns=[\"t_center\", \"f_center\", \"width\", \"height\"],\n    )\n    dataframe[\"fish_id\"] = ids\n    return dataframe\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert","title":"convert(data, conf, output, label_mode)","text":"

Convert a gridtools dataset to a YOLO dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.convert--parameters","title":"Parameters","text":"
  • data : Dataset The dataset to convert.
  • conf : Config The configuration.
  • output : pathlib.Path The output directory.
  • label_mode : str The label mode. Can be one of 'none', 'synthetic' or 'detected'.
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert--returns","title":"Returns","text":"
  • None
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert--notes","title":"Notes","text":"

This function iterates through a raw recording in chunks and computes the sum spectrogram of each chunk. The chunk size needs to be chosen such that the images can be nicely fed to a detector. The function also computes the bounding boxes of chirps in that chunk and saves them to a dataframe and a txt file into a labels directory.

Source code in chirpdetector/convert_data.py
def convert(\n    data: Dataset,\n    conf: Config,\n    output: pathlib.Path,\n    label_mode: str,\n) -> None:\n    \"\"\"Convert a gridtools dataset to a YOLO dataset.\n\n    Parameters\n    ----------\n    - `data` : `Dataset`\n        The dataset to convert.\n    - `conf` : `Config`\n        The configuration.\n    - `output` : `pathlib.Path`\n        The output directory.\n    - `label_mode` : `str`\n        The label mode. Can be one of 'none', 'synthetic' or 'detected'.\n\n    Returns\n    -------\n    - `None`\n\n    Notes\n    -----\n    This function iterates through a raw recording in chunks and computes the\n    sum spectrogram of each chunk. The chunk size needs to be chosen such that\n    the images can be nicely fed to a detector. The function also computes\n    the bounding boxes of chirps in that chunk and saves them to a dataframe\n    and a txt file into a labels directory.\n    \"\"\"\n    assert hasattr(data, \"grid\"), \"Dataset must have a grid attribute\"\n    assert label_mode in [\n        \"none\",\n        \"synthetic\",\n        \"detected\",\n    ], \"label_mode must be one of 'none', 'synthetic' or 'detected'\"\n\n    dataroot = output\n\n    n_electrodes = data.grid.rec.shape[1]\n\n    # How much time to put into each spectrogram\n    time_window = conf.spec.time_window  # seconds\n    window_overlap = conf.spec.spec_overlap  # seconds\n    freq_pad = conf.spec.freq_pad  # Hz\n    window_overlap_samples = window_overlap * data.grid.samplerate  # samples\n\n    # Spectrogram computation parameters\n    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples\n    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples\n    chunksize = time_window * data.grid.samplerate  # samples\n    n_chunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)\n\n    rprint(\n        \"Dividing recording of duration\"\n        f\"{data.grid.rec.shape[0] / data.grid.samplerate} into {n_chunks}\"\n        f\"chunks of {time_window} seconds each.\",\n    )\n\n    bbox_dfs = []\n\n    # shift the time of the tracks to start at 0\n    # because a subset starts at the orignal time\n    # TODO: Remove this when gridtools is fixed\n    data.track.times -= data.track.times[0]\n\n    for chunk_no in range(n_chunks):\n        # get start and stop indices for the current chunk\n        # including some overlap to compensate for edge effects\n        # this diffrers for the first and last chunk\n\n        if chunk_no == 0:\n            idx1 = sint(chunk_no * chunksize)\n            idx2 = sint((chunk_no + 1) * chunksize + window_overlap_samples)\n        elif chunk_no == n_chunks - 1:\n            idx1 = sint(chunk_no * chunksize - window_overlap_samples)\n            idx2 = sint((chunk_no + 1) * chunksize)\n        else:\n            idx1 = sint(chunk_no * chunksize - window_overlap_samples)\n            idx2 = sint((chunk_no + 1) * chunksize + window_overlap_samples)\n\n        # idx1 and idx2 now determine the window I cut out of the raw signal\n        # to compute the spectrogram of.\n\n        # compute the time and frequency axes of the spectrogram now that we\n        # include the start and stop indices of the current chunk and thus the\n        # right start and stop time. The `spectrogram` function does not know\n        # about this and would start every time axis at 0.\n        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate\n        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft\n\n        # create a subset from the grid dataset\n        if idx2 > data.grid.rec.shape[0]:\n            idx2 = data.grid.rec.shape[0] - 1\n\n        chunk = subset(data, idx1, idx2, mode=\"index\")\n\n        # compute the spectrogram for each electrode of the current chunk\n        spec = None\n        for el in range(n_electrodes):\n            # get the signal for the current electrode\n            sig = chunk.grid.rec[:, el]\n\n            # compute the spectrogram for the current electrode\n            chunk_spec, _, _ = spectrogram(\n                data=sig.copy(),\n                samplingrate=data.grid.samplerate,\n                nfft=nfft,\n                hop_length=hop_len,\n            )\n\n            # sum spectrogram over all electrodes\n            # the spec is a tensor\n            if el == 0:\n                spec = chunk_spec\n            else:\n                spec += chunk_spec\n\n        if spec is None:\n            msg = \"Failed to compute spectrogram.\"\n            raise ValueError(msg)\n\n        # normalize spectrogram by the number of electrodes\n        # the spec is still a tensor\n        spec /= n_electrodes\n\n        # convert the spectrogram to dB\n        # .. still a tensor\n        spec = decibel(spec)\n\n        # cut off everything outside the upper frequency limit\n        # the spec is still a tensor\n\n        spectrogram_freq_limits = (\n            np.min(chunk.track.freqs) - freq_pad,\n            np.max(chunk.track.freqs) + freq_pad,\n        )\n\n        spec = spec[\n            (spec_freqs >= spectrogram_freq_limits[0])\n            & (spec_freqs <= spectrogram_freq_limits[1]),\n            :,\n        ]\n        spec_freqs = spec_freqs[\n            (spec_freqs >= spectrogram_freq_limits[0])\n            & (spec_freqs <= spectrogram_freq_limits[1])\n        ]\n\n        # normalize the spectrogram to zero mean and unit variance\n        # the spec is still a tensor\n        spec = (spec - spec.mean()) / spec.std()\n\n        # convert the spectrogram to a PIL image\n        spec = spec.detach().cpu().numpy()\n        img = numpy_to_pil(spec)\n\n        imgname = f\"{chunk.path.name}.png\"\n        if label_mode == \"synthetic\":\n            bbox_df, img = synthetic_labels(\n                dataroot,\n                chunk,\n                nfft,\n                spec,\n                spec_times,\n                spec_freqs,\n                imgname,\n                chunk_no,\n                img,\n            )\n            if bbox_df is None:\n                continue\n            bbox_dfs.append(bbox_df)\n        elif label_mode == \"detected\":\n            detected_labels(dataroot, chunk, imgname, spec, spec_times)\n\n        # save image\n        img.save(dataroot / \"images\" / f\"{imgname}\")\n\n    if label_mode == \"synthetic\":\n        bbox_df = pd.concat(bbox_dfs, ignore_index=True)\n        bbox_df.to_csv(dataroot / f\"{data.path.name}_bboxes.csv\", index=False)\n\n    # save the classes.txt file\n    classes = [\"__background__\", \"chirp\"]\n    with pathlib.Path.open(dataroot / \"classes.txt\", \"w\") as f:\n        f.write(\"\\n\".join(classes))\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert_cli","title":"convert_cli(path, output, label_mode)","text":"

Parse all datasets in a directory and convert them to a YOLO dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.convert_cli--parameters","title":"Parameters","text":"
  • path : pathlib.Path The root directory of the datasets.
"},{"location":"api/convert_data/#chirpdetector.convert_data.convert_cli--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/convert_data.py
def convert_cli(\n    path: pathlib.Path,\n    output: pathlib.Path,\n    label_mode: str,\n) -> None:\n    \"\"\"Parse all datasets in a directory and convert them to a YOLO dataset.\n\n    Parameters\n    ----------\n    - `path` : `pathlib.Path`\n        The root directory of the datasets.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    make_file_tree(output)\n    config = load_config(str(path / \"chirpdetector.toml\"))\n\n    for p in track(list(path.iterdir()), description=\"Building datasets\"):\n        if p.is_file():\n            continue\n        data = load(p)\n        convert(data, config, output, label_mode)\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.detected_labels","title":"detected_labels(output, chunk, imgname, spec, spec_times)","text":"

Use the detect_chirps to make a YOLO dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.detected_labels--parameters","title":"Parameters","text":"
  • output : pathlib.Path The output directory.
  • chunk : Dataset The dataset to make bounding boxes for.
  • imgname : str The name of the image.
  • spec : np.ndarray The spectrogram.
  • spec_times : np.ndarray The time axis of the spectrogram.
"},{"location":"api/convert_data/#chirpdetector.convert_data.detected_labels--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/convert_data.py
def detected_labels(\n    output: pathlib.Path,\n    chunk: Dataset,\n    imgname: str,\n    spec: np.ndarray,\n    spec_times: np.ndarray,\n) -> None:\n    \"\"\"Use the detect_chirps to make a YOLO dataset.\n\n    Parameters\n    ----------\n    - `output` : `pathlib.Path`\n        The output directory.\n    - `chunk` : `Dataset`\n        The dataset to make bounding boxes for.\n    - `imgname` : `str`\n        The name of the image.\n    - `spec` : `np.ndarray`\n        The spectrogram.\n    - `spec_times` : `np.ndarray`\n        The time axis of the spectrogram.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # load the detected bboxes csv\n    # TODO: This is a workaround. Instead improve the subset naming convention\n    # in gridtools\n    source_dataset = chunk.path.name.split(\"_\")[1:-4]\n    source_dataset = \"_\".join(source_dataset)\n    source_dataset = chunk.path.parent / source_dataset\n\n    dataframe = pd.read_csv(source_dataset / \"chirpdetector_bboxes.csv\")\n\n    # get chunk start and stop time\n    start, stop = spec_times[0], spec_times[-1]\n\n    # get the bboxes for this chunk\n    bboxes = dataframe[(dataframe.t1 >= start) & (dataframe.t2 <= stop)]\n\n    # get the x and y coordinates of the bboxes in pixels as dataframe\n    bboxes_xy = bboxes[[\"x1\", \"y1\", \"x2\", \"y2\"]]\n\n    # convert from x1, y1, x2, y2 to centerx, centery, width, height\n    centerx = np.array((bboxes_xy[\"x1\"] + bboxes_xy[\"x2\"]) / 2)\n    centery = np.array((bboxes_xy[\"y1\"] + bboxes_xy[\"y2\"]) / 2)\n    width = np.array(bboxes_xy[\"x2\"] - bboxes_xy[\"x1\"])\n    height = np.array(bboxes_xy[\"y2\"] - bboxes_xy[\"y1\"])\n\n    # flip centery because origin is top left\n    centery = spec.shape[0] - centery\n\n    # make relative to image size\n    centerx = centerx / spec.shape[1]\n    centery = centery / spec.shape[0]\n    width = width / spec.shape[1]\n    height = height / spec.shape[0]\n    labels = np.ones_like(centerx, dtype=int)\n\n    # make a new dataframe with the relative coordinates\n    new_bboxes = pd.DataFrame(\n        {\"l\": labels, \"x\": centerx, \"y\": centery, \"w\": width, \"h\": height},\n    )\n\n    # save dataframe for every spec without headers as txt\n    new_bboxes.to_csv(\n        output / \"labels\" / f\"{imgname[:-4]}.txt\",\n        header=False,\n        index=False,\n        sep=\" \",\n    )\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.make_file_tree","title":"make_file_tree(path)","text":"

Build a file tree for the training dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.make_file_tree--parameters","title":"Parameters","text":"

path : pathlib.Path The root directory of the dataset.

Source code in chirpdetector/convert_data.py
def make_file_tree(path: pathlib.Path) -> None:\n    \"\"\"Build a file tree for the training dataset.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The root directory of the dataset.\n    \"\"\"\n    if path.parent.exists() and path.parent.is_file():\n        msg = (\n            f\"Parent directory of {path} is a file. \"\n            \"Please specify a directory.\"\n        )\n        raise ValueError(msg)\n\n    if path.exists():\n        shutil.rmtree(path)\n\n    path.mkdir(exist_ok=True, parents=True)\n\n    train_imgs = path / \"images\"\n    train_labels = path / \"labels\"\n    train_imgs.mkdir(exist_ok=True, parents=True)\n    train_labels.mkdir(exist_ok=True, parents=True)\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.numpy_to_pil","title":"numpy_to_pil(img)","text":"

Convert a 2D numpy array to a PIL image.

"},{"location":"api/convert_data/#chirpdetector.convert_data.numpy_to_pil--parameters","title":"Parameters","text":"

img : np.ndarray The input image.

"},{"location":"api/convert_data/#chirpdetector.convert_data.numpy_to_pil--returns","title":"Returns","text":"

PIL.Image The converted image.

Source code in chirpdetector/convert_data.py
def numpy_to_pil(img: np.ndarray) -> Image.Image:\n    \"\"\"Convert a 2D numpy array to a PIL image.\n\n    Parameters\n    ----------\n    img : np.ndarray\n        The input image.\n\n    Returns\n    -------\n    PIL.Image\n        The converted image.\n    \"\"\"\n    img_dimens = 2\n    if len(img.shape) != img_dimens:\n        msg = f\"Image must be {img_dimens}D\"\n        raise ValueError(msg)\n\n    if img.max() == img.min():\n        msg = \"Image must have more than one value\"\n        raise ValueError(msg)\n\n    img = np.flipud(img)\n    intimg = np.uint8((img - img.min()) / (img.max() - img.min()) * 255)\n    return Image.fromarray(intimg)\n
"},{"location":"api/convert_data/#chirpdetector.convert_data.synthetic_labels","title":"synthetic_labels(output, chunk, nfft, spec, spec_times, spec_freqs, imgname, chunk_no, img)","text":"

Generate labels of a simulated dataset.

"},{"location":"api/convert_data/#chirpdetector.convert_data.synthetic_labels--parameters","title":"Parameters","text":"
  • output : pathlib.Path The output directory.
  • chunk : Dataset The dataset to make bounding boxes for.
  • nfft : int The number of samples in the FFT.
  • spec : np.ndarray The spectrogram.
  • spec_times : np.ndarray The time axis of the spectrogram.
  • spec_freqs : np.ndarray The frequency axis of the spectrogram.
  • imgname : str The name of the image.
  • chunk_no : int The chunk number.
  • img : Image The image.
"},{"location":"api/convert_data/#chirpdetector.convert_data.synthetic_labels--returns","title":"Returns","text":"
  • pandas.DataFrame A dataframe with the bounding boxes.
Source code in chirpdetector/convert_data.py
def synthetic_labels(\n    output: pathlib.Path,\n    chunk: Dataset,\n    nfft: int,\n    spec: np.ndarray,\n    spec_times: np.ndarray,\n    spec_freqs: np.ndarray,\n    imgname: str,\n    chunk_no: int,\n    img: Image.Image,\n) -> Union[Tuple[pd.DataFrame, Image.Image], Tuple[None, None]]:\n    \"\"\"Generate labels of a simulated dataset.\n\n    Parameters\n    ----------\n    - `output` : `pathlib.Path`\n        The output directory.\n    - `chunk` : `Dataset`\n        The dataset to make bounding boxes for.\n    - `nfft` : `int`\n        The number of samples in the FFT.\n    - `spec` : `np.ndarray`\n        The spectrogram.\n    - `spec_times` : `np.ndarray`\n        The time axis of the spectrogram.\n    - `spec_freqs` : `np.ndarray`\n        The frequency axis of the spectrogram.\n    - `imgname` : `str`\n        The name of the image.\n    - `chunk_no` : `int`\n        The chunk number.\n    - `img` : `Image`\n        The image.\n\n    Returns\n    -------\n    - `pandas.DataFrame`\n        A dataframe with the bounding boxes.\n    \"\"\"\n    # compute the bounding boxes for this chunk\n    bboxes = chirp_bounding_boxes(chunk, nfft)\n\n    if len(bboxes) == 0:\n        return None, None\n\n    # convert bounding box center coordinates to spectrogram coordinates\n    # find the indices on the spec_times corresponding to the center times\n    x = np.searchsorted(spec_times, bboxes.t_center)\n    y = np.searchsorted(spec_freqs, bboxes.f_center)\n    widths = np.searchsorted(spec_times - spec_times[0], bboxes.width)\n    heights = np.searchsorted(spec_freqs - spec_freqs[0], bboxes.height)\n\n    # now we have center coordinates, widths and heights in indices. But PIL\n    # expects coordinates in pixels in the format\n    # (Upper left x coordinate, upper left y coordinate,\n    # lower right x coordinate, lower right y coordinate)\n    # In addiotion, an image starts in the top left corner so the bboxes\n    # need to be mirrored horizontally.\n\n    y = spec.shape[0] - y  # flip the y values to fit y=0 at the top\n    lxs, lys = x - widths / 2, y - heights / 2\n    rxs, rys = x + widths / 2, y + heights / 2\n\n    # add them to the bboxes dataframe\n    bboxes[\"upperleft_img_x\"] = lxs\n    bboxes[\"upperleft_img_y\"] = lys\n    bboxes[\"lowerright_img_x\"] = rxs\n    bboxes[\"lowerright_img_y\"] = rys\n\n    # yolo format is centerx, centery, width, height\n    # convert xmin, ymin, xmax, ymax to centerx, centery, width, height\n    centerx = (lxs + rxs) / 2\n    centery = (lys + rys) / 2\n    width = rxs - lxs\n    height = rys - lys\n\n    # most deep learning frameworks expect bounding box coordinates\n    # as relative to the image size. So we normalize the coordinates\n    # to the image size\n    centerx_norm = centerx / spec.shape[1]\n    centery_norm = centery / spec.shape[0]\n    width_norm = width / spec.shape[1]\n    height_norm = height / spec.shape[0]\n\n    # add them to the bboxes dataframe\n    bboxes[\"centerx_norm\"] = centerx_norm\n    bboxes[\"centery_norm\"] = centery_norm\n    bboxes[\"width_norm\"] = width_norm\n    bboxes[\"height_norm\"] = height_norm\n\n    # add chunk ID to the bboxes dataframe\n    bboxes[\"chunk_id\"] = chunk_no\n\n    # put them into a dataframe to save for eahc spectrogram\n    dataframe = pd.DataFrame(\n        {\n            \"cx\": centerx_norm,\n            \"cy\": centery_norm,\n            \"w\": width_norm,\n            \"h\": height_norm,\n        },\n    )\n\n    # add as first colum instance id\n    dataframe.insert(0, \"instance_id\", np.ones_like(lxs, dtype=int))\n\n    # stash the bboxes dataframe for this chunk\n    bboxes[\"image\"] = imgname\n\n    # save dataframe for every spec without headers as txt\n    dataframe.to_csv(\n        output / \"labels\" / f\"{chunk.path.name}.txt\",\n        header=False,\n        index=False,\n        sep=\" \",\n    )\n    return bboxes, img\n
"},{"location":"api/dataset_utils/","title":"dataset_utils","text":"

Utility functions for training datasets in the YOLO format.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.clean_yolo_dataset","title":"clean_yolo_dataset(path, img_ext)","text":"

Remove images and labels when the label file is empty.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.clean_yolo_dataset--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the dataset. img_ext : str

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.clean_yolo_dataset--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def clean_yolo_dataset(path: pathlib.Path, img_ext: str) -> None:\n    \"\"\"Remove images and labels when the label file is empty.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the dataset.\n    img_ext : str\n\n    Returns\n    -------\n    None\n    \"\"\"\n    img_path = path / \"images\"\n    lbl_path = path / \"labels\"\n\n    images = list(img_path.glob(f\"*{img_ext}\"))\n\n    for image in images:\n        lbl = lbl_path / f\"{image.stem}.txt\"\n        if lbl.stat().st_size == 0:\n            image.unlink()\n            lbl.unlink()\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.load_img","title":"load_img(path)","text":"

Load an image from a path as a numpy array.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.load_img--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the image.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.load_img--returns","title":"Returns","text":"

img : np.ndarray The image as a numpy array.

Source code in chirpdetector/dataset_utils.py
def load_img(path: pathlib.Path) -> np.ndarray:\n    \"\"\"Load an image from a path as a numpy array.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the image.\n\n    Returns\n    -------\n    img : np.ndarray\n        The image as a numpy array.\n    \"\"\"\n    img = Image.open(path)\n    return np.asarray(img)\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.merge_yolo_datasets","title":"merge_yolo_datasets(dataset1, dataset2, output)","text":"

Merge two yolo-style datasets into one.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.merge_yolo_datasets--parameters","title":"Parameters","text":"

dataset1 : str The path to the first dataset. dataset2 : str The path to the second dataset. output : str The path to the output dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.merge_yolo_datasets--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def merge_yolo_datasets(\n    dataset1: pathlib.Path,\n    dataset2: pathlib.Path,\n    output: pathlib.Path,\n) -> None:\n    \"\"\"Merge two yolo-style datasets into one.\n\n    Parameters\n    ----------\n    dataset1 : str\n        The path to the first dataset.\n    dataset2 : str\n        The path to the second dataset.\n    output : str\n        The path to the output dataset.\n\n    Returns\n    -------\n    None\n    \"\"\"\n    dataset1 = pathlib.Path(dataset1)\n    dataset2 = pathlib.Path(dataset2)\n    output = pathlib.Path(output)\n\n    if not dataset1.exists():\n        msg = f\"{dataset1} does not exist.\"\n        raise FileNotFoundError(msg)\n    if not dataset2.exists():\n        msg = f\"{dataset2} does not exist.\"\n        raise FileNotFoundError(msg)\n    if output.exists():\n        msg = f\"{output} already exists.\"\n        raise FileExistsError(msg)\n\n    output_images = output / \"images\"\n    output_images.mkdir(parents=True, exist_ok=False)\n    output_labels = output / \"labels\"\n    output_labels.mkdir(parents=True, exist_ok=False)\n\n    imgs1 = list((dataset1 / \"images\").iterdir())\n    labels1 = list((dataset1 / \"labels\").iterdir())\n    imgs2 = list((dataset2 / \"images\").iterdir())\n    labels2 = list((dataset2 / \"labels\").iterdir())\n\n    print(f\"Found {len(imgs1)} images in {dataset1}.\")\n    print(f\"Found {len(imgs2)} images in {dataset2}.\")\n\n    print(f\"Copying images and labels to {output}...\")\n    for idx, _ in enumerate(imgs1):\n        shutil.copy(imgs1[idx], output_images / imgs1[idx].name)\n        shutil.copy(labels1[idx], output_labels / labels1[idx].name)\n\n    for idx, _ in enumerate(imgs2):\n        shutil.copy(imgs2[idx], output_images / imgs2[idx].name)\n        shutil.copy(labels2[idx], output_labels / labels2[idx].name)\n\n    classes = dataset1 / \"classes.txt\"\n    shutil.copy(classes, output / classes.name)\n\n    print(f\"Done. Merged {len(imgs1) + len(imgs2)} images.\")\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.plot_yolo_dataset","title":"plot_yolo_dataset(path, n)","text":"

Plot n random images YOLO-style dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.plot_yolo_dataset--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.plot_yolo_dataset--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def plot_yolo_dataset(path: pathlib.Path, n: int) -> None:\n    \"\"\"Plot n random images YOLO-style dataset.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the dataset.\n\n    Returns\n    -------\n    None\n    \"\"\"\n    mpl.use(\"TkAgg\")\n    labelpath = path / \"labels\"\n    imgpath = path / \"images\"\n\n    label_paths = np.array(list(labelpath.glob(\"*.txt\")))\n    label_paths = np.random.choice(label_paths, n)\n\n    for lp in label_paths:\n        imgp = imgpath / (lp.stem + \".png\")\n        img = load_img(imgp)\n        labs = np.loadtxt(lp, dtype=np.float32).reshape(-1, 5)\n\n        coords = labs[:, 1:]\n\n        # make coords absolute and normalize\n        coords[:, 0] *= img.shape[1]\n        coords[:, 1] *= img.shape[0]\n        coords[:, 2] *= img.shape[1]\n        coords[:, 3] *= img.shape[0]\n\n        # turn centerx, centery, width, height into xmin, ymin, xmax, ymax\n        xmin = coords[:, 0] - coords[:, 2] / 2\n        ymin = coords[:, 1] - coords[:, 3] / 2\n        xmax = coords[:, 0] + coords[:, 2] / 2\n        ymax = coords[:, 1] + coords[:, 3] / 2\n\n        # plot the image\n        _, ax = plt.subplots(figsize=(15, 5), constrained_layout=True)\n        ax.imshow(img, cmap=\"magma\")\n        for i in range(len(xmin)):\n            ax.add_patch(\n                Rectangle(\n                    (xmin[i], ymin[i]),\n                    xmax[i] - xmin[i],\n                    ymax[i] - ymin[i],\n                    fill=False,\n                    color=\"white\",\n                ),\n            )\n        ax.set_title(imgp.stem)\n        plt.axis(\"off\")\n        plt.show()\n
"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.subset_yolo_dataset","title":"subset_yolo_dataset(path, img_ext, n)","text":"

Subset a YOLO dataset.

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.subset_yolo_dataset--parameters","title":"Parameters","text":"

path : pathlib.Path The path to the dataset root. img_ext : str The image extension, e.g. .png or .jpg n : int The size of the subset

"},{"location":"api/dataset_utils/#chirpdetector.dataset_utils.subset_yolo_dataset--returns","title":"Returns","text":"

None

Source code in chirpdetector/dataset_utils.py
def subset_yolo_dataset(path: pathlib.Path, img_ext: str, n: int) -> None:\n    \"\"\"Subset a YOLO dataset.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        The path to the dataset root.\n    img_ext : str\n        The image extension, e.g. .png or .jpg\n    n : int\n        The size of the subset\n\n    Returns\n    -------\n    None\n    \"\"\"\n    img_path = path / \"images\"\n    lbl_path = path / \"labels\"\n\n    images = np.array(img_path.glob(f\"*{img_ext}\"))\n    np.random.shuffle(images)\n\n    images = images[:n]\n\n    subset_dir = path.parent / f\"{path.name}_subset\"\n    subset_dir.mkdir(exist_ok=True)\n\n    subset_img_path = subset_dir / \"images\"\n    subset_img_path.mkdir(exist_ok=True)\n    subset_lbl_path = subset_dir / \"labels\"\n    subset_lbl_path.mkdir(exist_ok=True)\n\n    shutil.copy(path / \"classes.txt\", subset_dir)\n\n    for image in images:\n        shutil.copy(image, subset_img_path)\n        shutil.copy(lbl_path / f\"{image.stem}.txt\", subset_lbl_path)\n
"},{"location":"api/detect_chirps/","title":"detect_chirps","text":"

Detect chirps on a spectrogram.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.coords_to_mpl_rectangle","title":"coords_to_mpl_rectangle(boxes)","text":"

Convert normal bounding box to matplotlib.pathes.Rectangle format.

Convert box defined by corner coordinates (x1, y1, x2, y2) to box defined by lower left, width and height (x1, y1, w, h).

The corner coordinates are the model output, but the center coordinates are needed by the matplotlib.patches.Rectangle object for plotting.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.coords_to_mpl_rectangle--parameters","title":"Parameters","text":"
  • boxes : numpy.ndarray The boxes to be converted.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.coords_to_mpl_rectangle--returns","title":"Returns","text":"
  • numpy.ndarray The converted boxes.
Source code in chirpdetector/detect_chirps.py
def coords_to_mpl_rectangle(boxes: np.ndarray) -> np.ndarray:\n    \"\"\"Convert normal bounding box to matplotlib.pathes.Rectangle format.\n\n    Convert box defined by corner coordinates (x1, y1, x2, y2)\n    to box defined by lower left, width and height (x1, y1, w, h).\n\n    The corner coordinates are the model output, but the center coordinates\n    are needed by the `matplotlib.patches.Rectangle` object for plotting.\n\n    Parameters\n    ----------\n    - `boxes` : `numpy.ndarray`\n        The boxes to be converted.\n\n    Returns\n    -------\n    - `numpy.ndarray`\n        The converted boxes.\n    \"\"\"\n    boxes_dims = 2\n    if len(boxes.shape) != boxes_dims:\n        msg = (\n            \"The boxes array must be 2-dimensional.\\n\"\n            f\"Shape of boxes: {boxes.shape}\"\n        )\n        raise ValueError(msg)\n    boxes_cols = 4\n    if boxes.shape[1] != boxes_cols:\n        msg = (\n            \"The boxes array must have 4 columns.\\n\"\n            f\"Shape of boxes: {boxes.shape}\"\n        )\n        raise ValueError(msg)\n\n    new_boxes = np.zeros_like(boxes)\n    new_boxes[:, 0] = boxes[:, 0]\n    new_boxes[:, 1] = boxes[:, 1]\n    new_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]\n    new_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]\n\n    return new_boxes\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_chirps","title":"detect_chirps(conf, data)","text":"

Detect chirps on a spectrogram.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_chirps--parameters","title":"Parameters","text":"
  • conf : Config The configuration object.
  • data : Dataset The gridtools dataset to detect chirps on.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_chirps--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/detect_chirps.py
def detect_chirps(conf: Config, data: Dataset) -> None:\n    \"\"\"Detect chirps on a spectrogram.\n\n    Parameters\n    ----------\n    - `conf` : `Config`\n        The configuration object.\n    - `data` : `Dataset`\n        The gridtools dataset to detect chirps on.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # get the number of electrodes\n    n_electrodes = data.grid.rec.shape[1]\n\n    # load the model and the checkpoint, and set it to evaluation mode\n    device = get_device()\n    model = load_fasterrcnn(num_classes=len(conf.hyper.classes))\n    checkpoint = torch.load(\n        f\"{conf.hyper.modelpath}/model.pt\",\n        map_location=device,\n    )\n    model.load_state_dict(checkpoint[\"model_state_dict\"])\n    model.to(device).eval()\n\n    # make spec config\n    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples\n    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples\n    chunksize = conf.spec.time_window * data.grid.samplerate  # samples\n    nchunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)\n    window_overlap_samples = int(conf.spec.spec_overlap * data.grid.samplerate)\n\n    bbox_dfs = []\n\n    # iterate over the chunks\n    overwritten = False\n    for chunk_no in range(nchunks):\n        # get start and stop indices for the current chunk\n        # including some overlap to compensate for edge effects\n        # this diffrers for the first and last chunk\n\n        if chunk_no == 0:\n            idx1 = int(chunk_no * chunksize)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n        elif chunk_no == nchunks - 1:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize)\n        else:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n\n        # idx1 and idx2 now determine the window I cut out of the raw signal\n        # to compute the spectrogram of.\n\n        # compute the time and frequency axes of the spectrogram now that we\n        # include the start and stop indices of the current chunk and thus the\n        # right start and stop time. The `spectrogram` function does not know\n        # about this and would start every time axis at 0.\n        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate\n        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft\n\n        # create a subset from the grid dataset\n        if idx2 > data.grid.rec.shape[0]:\n            idx2 = data.grid.rec.shape[0] - 1\n\n        # This bit should alleviate the edge effects of the tracks\n        # by limiting the start and stop times of the spectrogram\n        # to the start and stop times of the track.\n        start_t = idx1 / data.grid.samplerate\n        stop_t = idx2 / data.grid.samplerate\n        if data.track.times[-1] < stop_t:\n            stop_t = data.track.times[-1]\n            idx2 = int(stop_t * data.grid.samplerate)\n        if data.track.times[0] > start_t:\n            start_t = data.track.times[0]\n            idx1 = int(start_t * data.grid.samplerate)\n        if start_t > data.track.times[-1] or stop_t < data.track.times[0]:\n            continue\n\n        chunk = subset(data, idx1, idx2, mode=\"index\")\n        if len(chunk.track.indices) == 0:\n            continue\n\n        # compute the spectrogram for each electrode of the current chunk\n        spec = torch.zeros((len(spec_freqs), len(spec_times)))\n        for el in range(n_electrodes):\n            # get the signal for the current electrode\n            sig = chunk.grid.rec[:, el]\n\n            # compute the spectrogram for the current electrode\n            chunk_spec, _, _ = spectrogram(\n                data=sig.copy(),\n                samplingrate=data.grid.rec.samplerate,\n                nfft=nfft,\n                hop_length=hop_len,\n            )\n\n            # sum spectrogram over all electrodes\n            # the spec is a tensor\n            if el == 0:\n                spec = chunk_spec\n            else:\n                spec += chunk_spec\n\n        # normalize spectrogram by the number of electrodes\n        # the spec is still a tensor\n        spec /= n_electrodes\n\n        # convert the spectrogram to dB\n        # .. still a tensor\n        spec = decibel(spec)\n\n        # cut off everything outside the upper frequency limit\n        # the spec is still a tensor\n        # TODO: THIS IS SKETCHY AS HELL! As a result, only time and frequency\n        # bounding boxes can be used later! The spectrogram limits change\n        # for every window!\n        flims = (\n            np.min(chunk.track.freqs) - conf.spec.freq_pad,\n            np.max(chunk.track.freqs) + conf.spec.freq_pad,\n        )\n        spec = spec[(spec_freqs >= flims[0]) & (spec_freqs <= flims[1]), :]\n        spec_freqs = spec_freqs[\n            (spec_freqs >= flims[0]) & (spec_freqs <= flims[1])\n        ]\n\n        # make a path to save the spectrogram\n        path = data.path / \"chirpdetections\"\n        if path.exists() and overwritten is False:\n            shutil.rmtree(path)\n            overwritten = True\n        path.mkdir(exist_ok=True)\n        path /= f\"chunk{chunk_no:05d}.png\"\n\n        # add the 3 channels, normalize to 0-1, etc\n        img = spec_to_image(spec)\n\n        # perform the detection\n        with torch.inference_mode():\n            outputs = model([img])\n\n        # put the boxes, scores and labels into the dataset\n        bboxes = outputs[0][\"boxes\"].detach().cpu().numpy()\n        scores = outputs[0][\"scores\"].detach().cpu().numpy()\n        labels = outputs[0][\"labels\"].detach().cpu().numpy()\n\n        # remove all boxes with a score below the threshold\n        bboxes = bboxes[scores > conf.det.threshold]\n        labels = labels[scores > conf.det.threshold]\n        scores = scores[scores > conf.det.threshold]\n\n        # if np.any(scores > conf.det.threshold):\n        #     plot_detections(img, outputs[0], conf.det.threshold, path, conf)\n\n        # save the bboxes to a dataframe\n        bbox_df = pd.DataFrame(\n            data=bboxes,\n            columns=[\"x1\", \"y1\", \"x2\", \"y2\"],\n        )\n        bbox_df[\"score\"] = scores\n        bbox_df[\"label\"] = labels\n\n        # convert x values to time on spec_times\n        spec_times_index = np.arange(0, len(spec_times))\n        bbox_df[\"t1\"] = float_index_interpolation(\n            bbox_df[\"x1\"].to_numpy(),\n            spec_times_index,\n            spec_times,\n        )\n        bbox_df[\"t2\"] = float_index_interpolation(\n            bbox_df[\"x2\"].to_numpy(),\n            spec_times_index,\n            spec_times,\n        )\n\n        # convert y values to frequency on spec_freqs\n        spec_freqs_index = np.arange(len(spec_freqs))\n        bbox_df[\"f1\"] = float_index_interpolation(\n            bbox_df[\"y1\"].to_numpy(),\n            spec_freqs_index,\n            spec_freqs,\n        )\n        bbox_df[\"f2\"] = float_index_interpolation(\n            bbox_df[\"y2\"].to_numpy(),\n            spec_freqs_index,\n            spec_freqs,\n        )\n\n        # save df to list\n        bbox_dfs.append(bbox_df)\n\n    # concatenate all dataframes\n    bbox_df = pd.concat(bbox_dfs)\n    bbox_reset = bbox_df.reset_index(drop=True)\n\n    # sort the dataframe by t1\n    bbox_sorted = bbox_reset.sort_values(by=\"t1\")\n\n    # sort the columns\n    bbox_sorted = bbox_sorted[\n        [\"label\", \"score\", \"x1\", \"y1\", \"x2\", \"y2\", \"t1\", \"f1\", \"t2\", \"f2\"]\n    ]\n\n    # save the dataframe\n    bbox_sorted.to_csv(data.path / \"chirpdetector_bboxes.csv\", index=False)\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_cli","title":"detect_cli(input_path)","text":"

Terminal interface for the detection function.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_cli--parameters","title":"Parameters","text":"
  • path : str
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.detect_cli--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/detect_chirps.py
def detect_cli(input_path: pathlib.Path) -> None:\n    \"\"\"Terminal interface for the detection function.\n\n    Parameters\n    ----------\n    - `path` : `str`\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # make the global logger object\n    # global logger  # pylint: disable=global-statement\n    path = pathlib.Path(input_path)\n    logger = make_logger(__name__, path / \"chirpdetector.log\")\n    datasets = [folder for folder in path.iterdir() if folder.is_dir()]\n    confpath = path / \"chirpdetector.toml\"\n\n    # load the config file and print a warning if it does not exist\n    if confpath.exists():\n        config = load_config(str(confpath))\n    else:\n        msg = (\n            \"The configuration file could not be found in the specified path.\"\n            \"Please run `chirpdetector copyconfig` and change the \"\n            \"configuration file to your needs.\"\n        )\n        raise FileNotFoundError(msg)\n\n    # detect chirps in all datasets in the specified path\n    # and show a progress bar\n    prog.console.rule(\"Starting detection\")\n    with prog:\n        task = prog.add_task(\"Detecting chirps...\", total=len(datasets))\n        for dataset in datasets:\n            msg = f\"Detecting chirps in {dataset.name}...\"\n            prog.console.log(msg)\n            logger.info(msg)\n\n            data = load(dataset)\n            detect_chirps(config, data)\n            prog.update(task, advance=1)\n        prog.update(task, completed=len(datasets))\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation","title":"float_index_interpolation(values, index_arr, data_arr)","text":"

Convert float indices to values by linear interpolation.

Interpolates a set of float indices within the given index array to obtain corresponding values from the data array using linear interpolation.

Given a set of float indices (values), this function determines the corresponding values in the data_arr by linearly interpolating between adjacent indices in the index_arr. Linear interpolation involves calculating weighted averages based on the fractional parts of the float indices.

This function is useful to transform float coordinates on a spectrogram matrix to the corresponding time and frequency values. The reason for this is, that the model outputs bounding boxes in float coordinates, i.e. it does not care about the exact pixel location of the bounding box.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--parameters","title":"Parameters","text":"
  • values : np.ndarray The index value as a float that should be interpolated.
  • index_arr : numpy.ndarray The array of indices on the data array.
  • data_arr : numpy.ndarray The array of data.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--returns","title":"Returns","text":"
  • numpy.ndarray The interpolated value.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--raises","title":"Raises","text":"
  • ValueError If any of the input float indices (values) are outside the range of the provided index_arr.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.float_index_interpolation--examples","title":"Examples","text":"

values = np.array([2.5, 3.2, 4.8]) index_arr = np.array([2, 3, 4, 5]) data_arr = np.array([10, 15, 20, 25]) result = float_index_interpolation(values, index_arr, data_arr) print(result) array([12.5, 16. , 22.5])

Source code in chirpdetector/detect_chirps.py
def float_index_interpolation(\n    values: np.ndarray,\n    index_arr: np.ndarray,\n    data_arr: np.ndarray,\n) -> np.ndarray:\n    \"\"\"Convert float indices to values by linear interpolation.\n\n    Interpolates a set of float indices within the given index\n    array to obtain corresponding values from the data\n    array using linear interpolation.\n\n    Given a set of float indices (`values`), this function determines\n    the corresponding values in the `data_arr` by linearly interpolating\n    between adjacent indices in the `index_arr`. Linear interpolation\n    involves calculating weighted averages based on the fractional\n    parts of the float indices.\n\n    This function is useful to transform float coordinates on a spectrogram\n    matrix to the corresponding time and frequency values. The reason for\n    this is, that the model outputs bounding boxes in float coordinates,\n    i.e. it does not care about the exact pixel location of the bounding\n    box.\n\n    Parameters\n    ----------\n    - `values` : `np.ndarray`\n        The index value as a float that should be interpolated.\n    - `index_arr` : `numpy.ndarray`\n        The array of indices on the data array.\n    - `data_arr` : `numpy.ndarray`\n        The array of data.\n\n    Returns\n    -------\n    - `numpy.ndarray`\n        The interpolated value.\n\n    Raises\n    ------\n    - `ValueError`\n        If any of the input float indices (`values`) are outside\n        the range of the provided `index_arr`.\n\n    Examples\n    --------\n    >>> values = np.array([2.5, 3.2, 4.8])\n    >>> index_arr = np.array([2, 3, 4, 5])\n    >>> data_arr = np.array([10, 15, 20, 25])\n    >>> result = float_index_interpolation(values, index_arr, data_arr)\n    >>> print(result)\n    array([12.5, 16. , 22.5])\n    \"\"\"\n    # Check if the values are within the range of the index array\n    if np.any(values < (np.min(index_arr) - 1)) or np.any(\n        values > (np.max(index_arr) + 1),\n    ):\n        msg = (\n            \"Values outside the range of index array\\n\"\n            f\"Target values: {values}\\n\"\n            f\"Index array: {index_arr}\\n\"\n            f\"Data array: {data_arr}\"\n        )\n        raise ValueError(msg)\n\n    # Find the indices corresponding to the values\n    lower_indices = np.floor(values).astype(int)\n    upper_indices = np.ceil(values).astype(int)\n\n    # Ensure upper indices are within the array bounds\n    upper_indices = np.minimum(upper_indices, len(index_arr) - 1)\n    lower_indices = np.minimum(lower_indices, len(index_arr) - 1)\n\n    # Calculate the interpolation weights\n    weights = values - lower_indices\n\n    # Linear interpolation\n    return (1 - weights) * data_arr[lower_indices] + weights * data_arr[\n        upper_indices\n    ]\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.plot_detections","title":"plot_detections(img_tensor, output, threshold, save_path, conf)","text":"

Plot the detections on the spectrogram.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.plot_detections--parameters","title":"Parameters","text":"
  • img_tensor : torch.Tensor The spectrogram.
  • output : torch.Tensor The output of the model.
  • threshold : float The threshold for the detections.
  • save_path : pathlib.Path The path to save the plot to.
  • conf : Config The configuration object.
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.plot_detections--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/detect_chirps.py
def plot_detections(\n    img_tensor: torch.Tensor,\n    output: torch.Tensor,\n    threshold: float,\n    save_path: pathlib.Path,\n    conf: Config,\n) -> None:\n    \"\"\"Plot the detections on the spectrogram.\n\n    Parameters\n    ----------\n    - `img_tensor` : `torch.Tensor`\n        The spectrogram.\n    - `output` : `torch.Tensor`\n        The output of the model.\n    - `threshold` : `float`\n        The threshold for the detections.\n    - `save_path` : `pathlib.Path`\n        The path to save the plot to.\n    - `conf` : `Config`\n        The configuration object.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # retrieve all the data from the output and convert\n    # spectrogram to numpy array\n    img = img_tensor.detach().cpu().numpy().transpose(1, 2, 0)[..., 0]\n    boxes = output[\"boxes\"].detach().cpu().numpy()\n    boxes = coords_to_mpl_rectangle(boxes)\n    scores = output[\"scores\"].detach().cpu().numpy()\n    labels = output[\"labels\"].detach().cpu().numpy()\n    labels = [conf.hyper.classes[i] for i in labels]\n\n    _, ax = plt.subplots(figsize=(20, 10))\n\n    ax.pcolormesh(img, cmap=\"magma\")\n\n    for i, box in enumerate(boxes):\n        if scores[i] > threshold:\n            ax.scatter(\n                box[0],\n                box[1],\n            )\n            ax.add_patch(\n                Rectangle(\n                    box[:2],\n                    box[2],\n                    box[3],\n                    fill=False,\n                    color=\"white\",\n                    linewidth=1,\n                ),\n            )\n            ax.text(\n                box[0],\n                box[1],\n                f\"{scores[i]:.2f}\",\n                color=\"black\",\n                fontsize=8,\n                bbox={\"facecolor\":\"white\", \"alpha\":1},\n            )\n    plt.axis(\"off\")\n    plt.savefig(save_path, dpi=300, bbox_inches=\"tight\", pad_inches=0)\n    plt.close()\n
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.spec_to_image","title":"spec_to_image(spec)","text":"

Convert a spectrogram to an image.

Add 3 color channels, normalize to 0-1, etc.

"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.spec_to_image--parameters","title":"Parameters","text":"
  • spec : torch.Tensor
"},{"location":"api/detect_chirps/#chirpdetector.detect_chirps.spec_to_image--returns","title":"Returns","text":"
  • torch.Tensor
Source code in chirpdetector/detect_chirps.py
def spec_to_image(spec: torch.Tensor) -> torch.Tensor:\n    \"\"\"Convert a spectrogram to an image.\n\n    Add 3 color channels, normalize to 0-1, etc.\n\n    Parameters\n    ----------\n    - `spec` : `torch.Tensor`\n\n    Returns\n    -------\n    - `torch.Tensor`\n    \"\"\"\n    # make sure the spectrogram is a tensor\n    if not isinstance(spec, torch.Tensor):\n        msg = (\n            \"The spectrogram must be a torch.Tensor.\\n\"\n            f\"Type of spectrogram: {type(spec)}\"\n        )\n        raise TypeError(msg)\n\n    # make sure the spectrogram is 2-dimensional\n    spec_dims = 2\n    if len(spec.size()) != spec_dims:\n        msg = (\n            \"The spectrogram must be a 2-dimensional matrix.\\n\"\n            f\"Shape of spectrogram: {spec.size()}\"\n        )\n        raise ValueError(msg)\n\n    # make sure the spectrogram contains some data\n    if (\n        np.max(spec.detach().cpu().numpy())\n        - np.min(spec.detach().cpu().numpy())\n        == 0\n    ):\n        msg = (\n            \"The spectrogram must contain some data.\\n\"\n            f\"Max value: {np.max(spec.detach().cpu().numpy())}\\n\"\n            f\"Min value: {np.min(spec.detach().cpu().numpy())}\"\n        )\n        raise ValueError(msg)\n\n    # Get the dimensions of the original matrix\n    original_shape = spec.size()\n\n    # Calculate the number of rows and columns in the matrix\n    num_rows, num_cols = original_shape\n\n    # duplicate the matrix 3 times\n    spec = spec.repeat(3, 1, 1)\n\n    # Reshape the matrix to the desired shape (3, num_rows, num_cols)\n    desired_shape = (3, num_rows, num_cols)\n    reshaped_tensor = spec.view(desired_shape)\n\n    # normalize the spectrogram to be between 0 and 1\n    normalized_tensor = (reshaped_tensor - reshaped_tensor.min()) / (\n        reshaped_tensor.max() - reshaped_tensor.min()\n    )\n\n    # make sure image is float32\n    return normalized_tensor.float()\n
"},{"location":"api/plot_detections/","title":"plot_detections","text":"

Functions to visualize detections on images.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_all_plots_cli","title":"clean_all_plots_cli(path)","text":"

Remove all plots from the chirpdetections folder.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_all_plots_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def clean_all_plots_cli(path: pathlib.Path) -> None:\n    \"\"\"Remove all plots from the chirpdetections folder.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]\n    with prog:\n        task = prog.add_task(\"Cleaning plots...\", total=len(dirs))\n        for dataset in dirs:\n            prog.console.log(f\"Cleaning plots for {dataset.name}\")\n            clean_plots_cli(dataset)\n            prog.advance(task)\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_plots_cli","title":"clean_plots_cli(path)","text":"

Remove all plots from the chirpdetections folder.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.clean_plots_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def clean_plots_cli(path: pathlib.Path) -> None:\n    \"\"\"Remove all plots from the chirpdetections folder.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    savepath = path / \"chirpdetections\"\n    for f in savepath.iterdir():\n        f.unlink()\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_all_detections_cli","title":"plot_all_detections_cli(path)","text":"

Plot detections on images.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_all_detections_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def plot_all_detections_cli(path: pathlib.Path) -> None:\n    \"\"\"Plot detections on images.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    conf = load_config(path / \"chirpdetector.toml\")\n\n    dirs = [dataset for dataset in path.iterdir() if dataset.is_dir()]\n    with prog:\n        task = prog.add_task(\"Plotting detections...\", total=len(dirs))\n        for dataset in dirs:\n            prog.console.log(f\"Plotting detections for {dataset.name}\")\n            data = load(dataset)\n            chirp_df = pd.read_csv(dataset / \"chirpdetector_bboxes.csv\")\n            plot_detections(data, chirp_df, conf)\n            prog.advance(task)\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections","title":"plot_detections(data, chirp_df, conf)","text":"

Plot detections on spectrograms.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections--parameters","title":"Parameters","text":"

data : Dataset The dataset. chirp_df : pd.DataFrame The dataframe containing the chirp detections. conf : Config The config file.

Source code in chirpdetector/plot_detections.py
def plot_detections(\n    data: Dataset,\n    chirp_df: pd.DataFrame,\n    conf: Config,\n) -> None:\n    \"\"\"Plot detections on spectrograms.\n\n    Parameters\n    ----------\n    data : Dataset\n        The dataset.\n    chirp_df : pd.DataFrame\n        The dataframe containing the chirp detections.\n    conf : Config\n        The config file.\n    \"\"\"\n    time_window = 15\n    n_electrodes = data.grid.rec.shape[1]\n\n    nfft = freqres_to_nfft(conf.spec.freq_res, data.grid.samplerate)  # samples\n    hop_len = overlap_to_hoplen(conf.spec.overlap_frac, nfft)  # samples\n    chunksize = time_window * data.grid.samplerate  # samples\n    nchunks = np.ceil(data.grid.rec.shape[0] / chunksize).astype(int)\n    window_overlap_samples = int(conf.spec.spec_overlap * data.grid.samplerate)\n\n    for chunk_no in range(nchunks):\n        # get start and stop indices for the current chunk\n        # including some overlap to compensate for edge effects\n        # this diffrers for the first and last chunk\n\n        if chunk_no == 0:\n            idx1 = int(chunk_no * chunksize)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n        elif chunk_no == nchunks - 1:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize)\n        else:\n            idx1 = int(chunk_no * chunksize - window_overlap_samples)\n            idx2 = int((chunk_no + 1) * chunksize + window_overlap_samples)\n\n        # idx1 and idx2 now determine the window I cut out of the raw signal\n        # to compute the spectrogram of.\n\n        # compute the time and frequency axes of the spectrogram now that we\n        # include the start and stop indices of the current chunk and thus the\n        # right start and stop time. The `spectrogram` function does not know\n        # about this and would start every time axis at 0.\n        spec_times = np.arange(idx1, idx2 + 1, hop_len) / data.grid.samplerate\n        spec_freqs = np.arange(0, nfft / 2 + 1) * data.grid.samplerate / nfft\n\n        # create a subset from the grid dataset\n        if idx2 > data.grid.rec.shape[0]:\n            idx2 = data.grid.rec.shape[0] - 1\n        chunk = subset(data, idx1, idx2, mode=\"index\")\n\n        # dont plot chunks without chirps\n        if len(chunk.com.chirp.times) == 0:\n            continue\n\n        # compute the spectrogram for each electrode of the current chunk\n        spec = torch.zeros((len(spec_freqs), len(spec_times)))\n        for el in range(n_electrodes):\n            # get the signal for the current electrode\n            sig = chunk.grid.rec[:, el]\n\n            # compute the spectrogram for the current electrode\n            chunk_spec, _, _ = spectrogram(\n                data=sig.copy(),\n                samplingrate=data.grid.samplerate,\n                nfft=nfft,\n                hop_length=hop_len,\n            )\n\n            # sum spectrogram over all electrodes\n            if el == 0:\n                spec = chunk_spec\n            else:\n                spec += chunk_spec\n\n        # normalize spectrogram by the number of electrodes\n        spec /= n_electrodes\n\n        # convert the spectrogram to dB\n        spec = decibel(spec)\n        spec = spec.detach().cpu().numpy()\n\n        # Set y limits\n        flims = (\n            np.min(data.track.freqs) - 200,\n            np.max(data.track.freqs) + 700,\n        )\n        spec = spec[(spec_freqs >= flims[0]) & (spec_freqs <= flims[1]), :]\n        spec_freqs = spec_freqs[\n            (spec_freqs >= flims[0]) & (spec_freqs <= flims[1])\n        ]\n\n        # Extract the bounding boxes for the current chunk\n        chunk_t1 = idx1 / data.grid.samplerate\n        chunk_t2 = idx2 / data.grid.samplerate\n        chunk_df = chirp_df[\n            (chirp_df[\"t1\"] >= chunk_t1) & (chirp_df[\"t2\"] <= chunk_t2)\n        ]\n\n        # get t1, t2, f1, f2 from chunk_df\n        bboxes = chunk_df[[\"score\", \"t1\", \"f1\", \"t2\", \"f2\"]].to_numpy()\n\n        # get chirp times and chirp ids\n        chirp_times = chunk_df[\"envelope_trough_time\"]\n        chirp_ids = chunk_df[\"assigned_track\"]\n\n        _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)\n\n        # plot bounding boxes\n        ax.imshow(\n            spec,\n            aspect=\"auto\",\n            origin=\"lower\",\n            interpolation=\"gaussian\",\n            extent=[\n                spec_times[0],\n                spec_times[-1],\n                spec_freqs[0],\n                spec_freqs[-1],\n            ],\n            cmap=\"magma\",\n            vmin=-80,\n            vmax=-45,\n        )\n        for bbox in bboxes:\n            ax.add_patch(\n                Rectangle(\n                    (bbox[1], bbox[2]),\n                    bbox[3] - bbox[1],\n                    bbox[4] - bbox[2],\n                    fill=False,\n                    color=\"gray\",\n                    linewidth=1,\n                    label=\"faster-R-CNN predictions\",\n                ),\n            )\n            ax.text(\n                bbox[1],\n                bbox[4] + 15,\n                f\"{bbox[0]:.2f}\",\n                color=\"gray\",\n                fontsize=10,\n                verticalalignment=\"bottom\",\n                horizontalalignment=\"left\",\n                rotation=90,\n            )\n\n        # plot chirp times and frequency traces\n        for track_id in np.unique(data.track.idents):\n            ctimes = chirp_times[chirp_ids == track_id]\n\n            freqs = data.track.freqs[data.track.idents == track_id]\n            times = data.track.times[\n                data.track.indices[data.track.idents == track_id]\n            ]\n            freqs = freqs[\n                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)\n            ]\n            times = times[\n                (times >= spec_times[0] - 10) & (times <= spec_times[-1] + 10)\n            ]\n\n            # get freqs where times are closest to ctimes\n            cfreqs = np.zeros_like(ctimes)\n            for i, ctime in enumerate(ctimes):\n                try:\n                    indx = np.argmin(np.abs(times - ctime))\n                    cfreqs[i] = freqs[indx]\n                except ValueError:\n                    msg = (\n                        \"Failed to find track time closest to chirp time \"\n                        f\"in chunk {chunk_no}, check the plots.\"\n                    )\n                    prog.console.log(msg)\n\n            if len(times) != 0:\n                ax.plot(\n                    times,\n                    freqs,\n                    lw=2,\n                    color=\"black\",\n                    label=\"Frequency traces\",\n                )\n\n            ax.scatter(\n                ctimes,\n                cfreqs,\n                marker=\"o\",\n                lw=1,\n                facecolor=\"white\",\n                edgecolor=\"black\",\n                s=25,\n                zorder=10,\n                label=\"Chirp assignments\",\n            )\n\n        ax.set_ylim(flims[0] + 5, flims[1] - 5)\n        ax.set_xlim([spec_times[0], spec_times[-1]])\n        ax.set_xlabel(\"Time [s]\", fontsize=12)\n        ax.set_ylabel(\"Frequency [Hz]\", fontsize=12)\n\n        handles, labels = plt.gca().get_legend_handles_labels()\n        by_label = dict(zip(labels, handles))\n        plt.legend(\n            by_label.values(),\n            by_label.keys(),\n            bbox_to_anchor=(0.5, 1.02),\n            loc=\"lower center\",\n            mode=\"None\",\n            borderaxespad=0,\n            ncol=3,\n            fancybox=False,\n            framealpha=0,\n        )\n\n        savepath = data.path / \"chirpdetections\"\n        savepath.mkdir(exist_ok=True)\n        plt.savefig(\n            savepath / f\"cpd_{chunk_no}.png\",\n            dpi=300,\n            bbox_inches=\"tight\",\n        )\n\n        plt.close()\n        plt.clf()\n        plt.cla()\n        plt.close(\"all\")\n
"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections_cli","title":"plot_detections_cli(path)","text":"

Plot detections on images.

"},{"location":"api/plot_detections/#chirpdetector.plot_detections.plot_detections_cli--parameters","title":"Parameters","text":"

path : pathlib.Path Path to the config file.

Source code in chirpdetector/plot_detections.py
def plot_detections_cli(path: pathlib.Path) -> None:\n    \"\"\"Plot detections on images.\n\n    Parameters\n    ----------\n    path : pathlib.Path\n        Path to the config file.\n    \"\"\"\n    conf = load_config(path.parent / \"chirpdetector.toml\")\n    data = load(path)\n    chirp_df = pd.read_csv(path / \"chirpdetector_bboxes.csv\")\n    plot_detections(data, chirp_df, conf)\n
"},{"location":"api/train_model/","title":"train_model","text":""},{"location":"api/train_model/#chirpdetector.train_model--train-the-faster-r-cnn-model","title":"Train the faster-R-CNN model.","text":"

Train and test the neural network specified in the config file.

"},{"location":"api/train_model/#chirpdetector.train_model.plot_epochs","title":"plot_epochs(epoch_train_loss, epoch_val_loss, epoch_avg_train_loss, epoch_avg_val_loss, path)","text":"

Plot the loss for each epoch.

"},{"location":"api/train_model/#chirpdetector.train_model.plot_epochs--parameters","title":"Parameters","text":"
  • epoch_train_loss: list The training loss for each epoch.
  • epoch_val_loss: list The validation loss for each epoch.
  • epoch_avg_train_loss: list The average training loss for each epoch.
  • epoch_avg_val_loss: list The average validation loss for each epoch.
  • path: pathlib.Path The path to save the plot to.
"},{"location":"api/train_model/#chirpdetector.train_model.plot_epochs--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def plot_epochs(\n    epoch_train_loss: list,\n    epoch_val_loss: list,\n    epoch_avg_train_loss: list,\n    epoch_avg_val_loss: list,\n    path: pathlib.Path,\n) -> None:\n    \"\"\"Plot the loss for each epoch.\n\n    Parameters\n    ----------\n    - `epoch_train_loss`: `list`\n        The training loss for each epoch.\n    - `epoch_val_loss`: `list`\n        The validation loss for each epoch.\n    - `epoch_avg_train_loss`: `list`\n        The average training loss for each epoch.\n    - `epoch_avg_val_loss`: `list`\n        The average validation loss for each epoch.\n    - `path`: `pathlib.Path`\n        The path to save the plot to.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    _, ax = plt.subplots(1, 2, figsize=(10, 5), constrained_layout=True)\n\n    x_train = np.arange(len(epoch_train_loss[0])) + 1\n    x_val = np.arange(len(epoch_val_loss[0])) + len(epoch_train_loss[0]) + 1\n\n    for train_loss, val_loss in zip(epoch_train_loss, epoch_val_loss):\n        ax[0].plot(x_train, train_loss, c=\"tab:blue\", label=\"_\")\n        ax[0].plot(x_val, val_loss, c=\"tab:orange\", label=\"_\")\n        x_train = np.arange(len(epoch_train_loss[0])) + x_val[-1]\n        x_val = np.arange(len(epoch_val_loss[0])) + x_train[-1]\n\n    x_avg = np.arange(len(epoch_avg_train_loss)) + 1\n    ax[1].plot(\n        x_avg,\n        epoch_avg_train_loss,\n        label=\"Training Loss\",\n        c=\"tab:blue\",\n    )\n    ax[1].plot(\n        x_avg,\n        epoch_avg_val_loss,\n        label=\"Validation Loss\",\n        c=\"tab:orange\",\n    )\n\n    ax[0].set_ylabel(\"Loss\")\n    ax[0].set_xlabel(\"Batch\")\n    ax[0].set_ylim(bottom=0)\n    ax[0].set_title(\"Loss per batch\")\n\n    ax[1].set_ylabel(\"Loss\")\n    ax[1].set_xlabel(\"Epoch\")\n    ax[1].legend()\n    ax[1].set_ylim(bottom=0)\n    ax[1].set_title(\"Avg loss per epoch\")\n\n    plt.savefig(path)\n    plt.close()\n
"},{"location":"api/train_model/#chirpdetector.train_model.plot_folds","title":"plot_folds(fold_avg_train_loss, fold_avg_val_loss, path)","text":"

Plot the loss for each fold.

"},{"location":"api/train_model/#chirpdetector.train_model.plot_folds--parameters","title":"Parameters","text":"
  • fold_avg_train_loss: list The average training loss for each fold.
  • fold_avg_val_loss: list The average validation loss for each fold.
  • path: pathlib.Path The path to save the plot to.
"},{"location":"api/train_model/#chirpdetector.train_model.plot_folds--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def plot_folds(\n    fold_avg_train_loss: list,\n    fold_avg_val_loss: list,\n    path: pathlib.Path,\n) -> None:\n    \"\"\"Plot the loss for each fold.\n\n    Parameters\n    ----------\n    - `fold_avg_train_loss`: `list`\n        The average training loss for each fold.\n    - `fold_avg_val_loss`: `list`\n        The average validation loss for each fold.\n    - `path`: `pathlib.Path`\n        The path to save the plot to.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    _, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)\n\n    for train_loss, val_loss in zip(fold_avg_train_loss, fold_avg_val_loss):\n        x = np.arange(len(train_loss)) + 1\n        ax.plot(x, train_loss, c=\"tab:blue\", alpha=0.3, label=\"_\")\n        ax.plot(x, val_loss, c=\"tab:orange\", alpha=0.3, label=\"_\")\n\n    avg_train = np.mean(fold_avg_train_loss, axis=0)\n    avg_val = np.mean(fold_avg_val_loss, axis=0)\n    x = np.arange(len(avg_train)) + 1\n    ax.plot(\n        x,\n        avg_train,\n        label=\"Training Loss\",\n        c=\"tab:blue\",\n    )\n    ax.plot(\n        x,\n        avg_val,\n        label=\"Validation Loss\",\n        c=\"tab:orange\",\n    )\n\n    ax.set_ylabel(\"Loss\")\n    ax.set_xlabel(\"Epoch\")\n    ax.legend()\n    ax.set_ylim(bottom=0)\n\n    plt.savefig(path)\n    plt.close()\n
"},{"location":"api/train_model/#chirpdetector.train_model.save_model","title":"save_model(epoch, model, optimizer, path)","text":"

Save the model state dict.

"},{"location":"api/train_model/#chirpdetector.train_model.save_model--parameters","title":"Parameters","text":"
  • epoch: int The current epoch.
  • model: torch.nn.Module The model to save.
  • optimizer: torch.optim.Optimizer The optimizer to save.
  • path: str The path to save the model to.
"},{"location":"api/train_model/#chirpdetector.train_model.save_model--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def save_model(\n    epoch: int,\n    model: torch.nn.Module,\n    optimizer: torch.optim.Optimizer,\n    path: str,\n) -> None:\n    \"\"\"Save the model state dict.\n\n    Parameters\n    ----------\n    - `epoch`: `int`\n        The current epoch.\n    - `model`: `torch.nn.Module`\n        The model to save.\n    - `optimizer`: `torch.optim.Optimizer`\n        The optimizer to save.\n    - `path`: `str`\n        The path to save the model to.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    path = pathlib.Path(path)\n    path.mkdir(parents=True, exist_ok=True)\n    torch.save(\n        {\n            \"epoch\": epoch,\n            \"model_state_dict\": model.state_dict(),\n            \"optimizer_state_dict\": optimizer.state_dict(),\n        },\n        path / \"model.pt\",\n    )\n
"},{"location":"api/train_model/#chirpdetector.train_model.train","title":"train(config, mode='pretrain')","text":"

Train the model.

"},{"location":"api/train_model/#chirpdetector.train_model.train--parameters","title":"Parameters","text":"
  • config: Config The config file.
  • mode: str The mode to train in. Either pretrain or finetune.
"},{"location":"api/train_model/#chirpdetector.train_model.train--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def train(config: Config, mode: str = \"pretrain\") -> None:\n    \"\"\"Train the model.\n\n    Parameters\n    ----------\n    - `config`: `Config`\n        The config file.\n    - `mode`: `str`\n        The mode to train in. Either `pretrain` or `finetune`.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    # Load a pretrained model from pytorch if in pretrain mode,\n    # otherwise open an already trained model from the\n    # model state dict.\n    assert mode in [\"pretrain\", \"finetune\"]\n    if mode == \"pretrain\":\n        assert config.train.datapath is not None\n        datapath = config.train.datapath\n    elif mode == \"finetune\":\n        assert config.finetune.datapath is not None\n        datapath = config.finetune.datapath\n\n    # Check if the path to the data actually exists\n    if not pathlib.Path(datapath).exists():\n        raise FileNotFoundError(f\"Path {datapath} does not exist.\")\n\n    # Initialize the logger and progress bar, make the logger global\n    global logger\n    logger = make_logger(\n        __name__,\n        pathlib.Path(config.path).parent / \"chirpdetector.log\",\n    )\n\n    # Get the device (e.g. GPU or CPU)\n    device = get_device()\n\n    # Print information about starting training\n    progress.console.rule(\"Starting training\")\n    msg = (\n        f\"Device: {device}, Config: {config.path},\"\n        f\" Mode: {mode}, Data: {datapath}\"\n    )\n    progress.console.log(msg)\n    logger.info(msg)\n\n    # initialize the dataset\n    data = CustomDataset(\n        path=datapath,\n        classes=config.hyper.classes,\n    )\n\n    # initialize the k-fold cross-validation\n    splits = KFold(n_splits=config.hyper.kfolds, shuffle=True, random_state=42)\n\n    # initialize the best validation loss to a large number\n    best_val_loss = float(\"inf\")\n\n    # iterate over the folds for k-fold cross-validation\n    with progress:\n        # save loss across all epochs and folds\n        fold_train_loss = []\n        fold_val_loss = []\n        fold_avg_train_loss = []\n        fold_avg_val_loss = []\n\n        # Add kfolds progress bar that runs alongside the epochs progress bar\n        task_folds = progress.add_task(\n            f\"[blue]{config.hyper.kfolds}-Fold Crossvalidation\",\n            total=config.hyper.kfolds,\n        )\n\n        # iterate over the folds\n        for fold, (train_idx, val_idx) in enumerate(\n            splits.split(np.arange(len(data))),\n        ):\n            # initialize the model and optimizer\n            model = load_fasterrcnn(num_classes=len(config.hyper.classes)).to(\n                device,\n            )\n\n            # If the mode is finetune, load the model state dict from\n            # previous training\n            if mode == \"finetune\":\n                modelpath = pathlib.Path(config.hyper.modelpath) / \"model.pt\"\n                checkpoint = torch.load(modelpath, map_location=device)\n                model.load_state_dict(checkpoint[\"model_state_dict\"])\n\n            # Initialize stochastic gradient descent optimizer\n            params = [p for p in model.parameters() if p.requires_grad]\n            optimizer = torch.optim.SGD(\n                params,\n                lr=config.hyper.learning_rate,\n                momentum=config.hyper.momentum,\n                weight_decay=config.hyper.weight_decay,\n            )\n\n            # make train and validation dataloaders for the current fold\n            train_data = torch.utils.data.Subset(data, train_idx)\n            val_data = torch.utils.data.Subset(data, val_idx)\n\n            # this is for training\n            train_loader = DataLoader(\n                train_data,\n                batch_size=config.hyper.batch_size,\n                shuffle=True,\n                num_workers=config.hyper.num_workers,\n                collate_fn=collate_fn,\n            )\n\n            # this is only for validation\n            val_loader = DataLoader(\n                val_data,\n                batch_size=config.hyper.batch_size,\n                shuffle=True,\n                num_workers=config.hyper.num_workers,\n                collate_fn=collate_fn,\n            )\n\n            # save loss across all epochs\n            epoch_avg_train_loss = []\n            epoch_avg_val_loss = []\n            epoch_train_loss = []\n            epoch_val_loss = []\n\n            # train the model for the specified number of epochs\n            task_epochs = progress.add_task(\n                f\"{config.hyper.num_epochs} Epochs for fold k={fold + 1}\",\n                total=config.hyper.num_epochs,\n            )\n\n            # iterate across n epochs\n            for epoch in range(config.hyper.num_epochs):\n                # print information about the current epoch\n                msg = (\n                    f\"Training epoch {epoch + 1} of {config.hyper.num_epochs} \"\n                    f\"for fold {fold + 1} of {config.hyper.kfolds}\"\n                )\n                progress.console.log(msg)\n                logger.info(msg)\n\n                # train the epoch\n                train_loss = train_epoch(\n                    dataloader=train_loader,\n                    device=device,\n                    model=model,\n                    optimizer=optimizer,\n                )\n\n                # validate the epoch\n                _, val_loss = val_epoch(\n                    dataloader=val_loader,\n                    device=device,\n                    model=model,\n                )\n\n                # save losses for this epoch\n                epoch_train_loss.append(train_loss)\n                epoch_val_loss.append(val_loss)\n\n                # save the average loss for this epoch\n                epoch_avg_train_loss.append(np.median(train_loss))\n                epoch_avg_val_loss.append(np.median(val_loss))\n\n                # save the model if it is the best so far\n                if np.mean(val_loss) < best_val_loss:\n                    best_val_loss = sum(val_loss) / len(val_loss)\n\n                    msg = (\n                        f\"New best validation loss: {best_val_loss:.4f}, \"\n                        \"saving model...\"\n                    )\n                    progress.console.log(msg)\n                    logger.info(msg)\n\n                    save_model(\n                        epoch=epoch,\n                        model=model,\n                        optimizer=optimizer,\n                        path=config.hyper.modelpath,\n                    )\n\n                # plot the losses for this epoch\n                plot_epochs(\n                    epoch_train_loss=epoch_train_loss,\n                    epoch_val_loss=epoch_val_loss,\n                    epoch_avg_train_loss=epoch_avg_train_loss,\n                    epoch_avg_val_loss=epoch_avg_val_loss,\n                    path=pathlib.Path(config.hyper.modelpath)\n                    / f\"fold{fold + 1}.png\",\n                )\n\n                # update the progress bar for the epochs\n                progress.update(task_epochs, advance=1)\n\n            # update the progress bar for the epochs and hide it if done\n            progress.update(task_epochs, visible=False)\n\n            # save the losses for this fold\n            fold_train_loss.append(epoch_train_loss)\n            fold_val_loss.append(epoch_val_loss)\n            fold_avg_train_loss.append(epoch_avg_train_loss)\n            fold_avg_val_loss.append(epoch_avg_val_loss)\n\n            plot_folds(\n                fold_avg_train_loss=fold_avg_train_loss,\n                fold_avg_val_loss=fold_avg_val_loss,\n                path=pathlib.Path(config.hyper.modelpath) / \"losses.png\",\n            )\n\n            # update the progress bar for the folds\n            progress.update(task_folds, advance=1)\n\n        # update the progress bar for the folds and hide it if done\n        progress.update(task_folds, visible=False)\n\n        # print information about the training\n        msg = (\n            \"Average validation loss of last epoch across folds: \"\n            f\"{np.mean(fold_val_loss):.4f}\"\n        )\n        progress.console.log(msg)\n        logger.info(msg)\n        progress.console.rule(\"[bold blue]Finished training\")\n
"},{"location":"api/train_model/#chirpdetector.train_model.train_cli","title":"train_cli(config_path, mode)","text":"

Train the model from the command line.

"},{"location":"api/train_model/#chirpdetector.train_model.train_cli--parameters","title":"Parameters","text":"
  • config_path: pathlib.Path The path to the config file.
  • mode: str The mode to train in. Either pretrain or finetune.
"},{"location":"api/train_model/#chirpdetector.train_model.train_cli--returns","title":"Returns","text":"
  • None
Source code in chirpdetector/train_model.py
def train_cli(config_path: pathlib.Path, mode: str) -> None:\n    \"\"\"Train the model from the command line.\n\n    Parameters\n    ----------\n    - `config_path`: `pathlib.Path`\n        The path to the config file.\n    - `mode`: `str`\n        The mode to train in. Either `pretrain` or `finetune`.\n\n    Returns\n    -------\n    - `None`\n    \"\"\"\n    config = load_config(config_path)\n    train(config, mode=mode)\n
"},{"location":"api/train_model/#chirpdetector.train_model.train_epoch","title":"train_epoch(dataloader, device, model, optimizer)","text":"

Train the model for one epoch.

"},{"location":"api/train_model/#chirpdetector.train_model.train_epoch--parameters","title":"Parameters","text":"
  • dataloader: DataLoader The dataloader for the training data.
  • device: torch.device The device to train on.
  • model: torch.nn.Module The model to train.
  • optimizer: torch.optim.Optimizer The optimizer to use.
"},{"location":"api/train_model/#chirpdetector.train_model.train_epoch--returns","title":"Returns","text":"
  • train_loss: List The training loss for each batch.
Source code in chirpdetector/train_model.py
def train_epoch(\n    dataloader: DataLoader,\n    device: torch.device,\n    model: torch.nn.Module,\n    optimizer: torch.optim.Optimizer,\n) -> List:\n    \"\"\"Train the model for one epoch.\n\n    Parameters\n    ----------\n    - `dataloader`: `DataLoader`\n        The dataloader for the training data.\n    - `device`: `torch.device`\n        The device to train on.\n    - `model`: `torch.nn.Module`\n        The model to train.\n    - `optimizer`: `torch.optim.Optimizer`\n        The optimizer to use.\n\n    Returns\n    -------\n    - `train_loss`: `List`\n        The training loss for each batch.\n    \"\"\"\n    train_loss = []\n\n    for samples, targets in dataloader:\n        images = list(sample.to(device) for sample in samples)\n        targets = [\n            {k: v.to(device) for k, v in t.items() if k != \"image_name\"}\n            for t in targets\n        ]\n\n        loss_dict = model(images, targets)\n        losses = sum(loss for loss in loss_dict.values())\n        train_loss.append(losses.item())\n\n        optimizer.zero_grad()\n        losses.backward()\n        optimizer.step()\n\n    return train_loss\n
"},{"location":"api/train_model/#chirpdetector.train_model.val_epoch","title":"val_epoch(dataloader, device, model)","text":"

Validate the model for one epoch.

"},{"location":"api/train_model/#chirpdetector.train_model.val_epoch--parameters","title":"Parameters","text":"
  • dataloader: DataLoader The dataloader for the validation data.
  • device: torch.device The device to train on.
  • model: torch.nn.Module The model to train.
"},{"location":"api/train_model/#chirpdetector.train_model.val_epoch--returns","title":"Returns","text":"
  • loss_dict: dict The loss dictionary.
Source code in chirpdetector/train_model.py
def val_epoch(\n    dataloader: DataLoader,\n    device: torch.device,\n    model: torch.nn.Module,\n) -> List:\n    \"\"\"Validate the model for one epoch.\n\n    Parameters\n    ----------\n    - `dataloader`: `DataLoader`\n        The dataloader for the validation data.\n    - `device`: `torch.device`\n        The device to train on.\n    - `model`: `torch.nn.Module`\n        The model to train.\n\n    Returns\n    -------\n    - `loss_dict`: `dict`\n        The loss dictionary.\n    \"\"\"\n    val_loss = []\n    for samples, targets in dataloader:\n        images = list(sample.to(device) for sample in samples)\n        targets = [\n            {k: v.to(device) for k, v in t.items() if k != \"image_name\"}\n            for t in targets\n        ]\n\n        with torch.inference_mode():\n            loss_dict = model(images, targets)\n\n        losses = sum(loss for loss in loss_dict.values())\n        val_loss.append(losses.item())\n\n    return loss_dict, val_loss\n
"}]} \ No newline at end of file diff --git a/setup/index.html b/setup/index.html index c9355a8..796c190 100644 --- a/setup/index.html +++ b/setup/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Setup

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Setup

- + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index e6a87ac..8c1be3e 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ diff --git a/training/index.html b/training/index.html index 4cf75a7..ae5217c 100644 --- a/training/index.html +++ b/training/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Training

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Training

- + \ No newline at end of file diff --git a/visualization/index.html b/visualization/index.html index 390f737..378759c 100644 --- a/visualization/index.html +++ b/visualization/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Visualization

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Visualization

- + \ No newline at end of file diff --git a/yolo-helpers/index.html b/yolo-helpers/index.html index 011cc24..e88bf27 100644 --- a/yolo-helpers/index.html +++ b/yolo-helpers/index.html @@ -55,7 +55,18 @@ - + @@ -970,7 +981,7 @@

Helper commands

- +

Wow, such empty 😮

@@ -1058,5 +1069,5 @@

Helper commands

- + \ No newline at end of file