diff --git a/mozaik/experiments/vision.py b/mozaik/experiments/vision.py index 1f7fe709b..5ecb5c907 100644 --- a/mozaik/experiments/vision.py +++ b/mozaik/experiments/vision.py @@ -1222,9 +1222,16 @@ class MeasureNaturalImagesWithEyeMovement(VisualExperiment): """ Stimulate the model with a natural image with simulated eye movement. - This experiment presents a movie that is generated by translating a - static image along a pre-specified path (presumably containing path - that corresponds to eye-movements). + This experiment supports presenting natural images with flexible paths + for simulated eye movements, allowing for a more dynamic and varied stimulus + presentation. It can handle multiple images and corresponding eye paths for + each trial, enabling the experiment to simulate more natural viewing conditions. + + It is also able to read eye movement paths from Junji hdf5 format files, facilitating + integration with data from real eye-tracking experiments. + + The stimuli are presented by translating static images + along pre-specified paths that correspond to eye movements, with the paths and times passed directly to the stimulus. Parameters ---------- @@ -1236,45 +1243,117 @@ class MeasureNaturalImagesWithEyeMovement(VisualExperiment): ---------------- stimulus_duration : float - The duration of single presentation of the stimulus. + The duration of single presentation of the stimulus, representing the maximum length of each trial. num_trials : int - Number of trials each each stimulus is shown. + Number of trials, each showcasing a different image and eye movement path. size : float - The size of the image in degrees of visual field + The size of the image in degrees of visual field. Notes ----- - Currently this implementation bound to have the image and the eye path saved in in files *./image_naturelle_HIGH.bmp* and *./eye_path.pickle*. - In future we need to make this more general. + The implementation requires specifying directories for data and stimuli. Eye movement data and images are loaded + from files in specified formats, including hdf5 for eye positions. It supports reading and processing of eye + position data to filter positions outside the periods of image presentation and calculates the period of eye + movements based on trial start and end times from the data file. This process allows for a dynamic construction of + eye movement paths and their associated stimuli, presenting them according to the derived parameters for each trial. + + Paths for eye movement data files and image files are constructed dynamically, supporting a variety of stimulus + presentations and making the experiment more adaptable to different kinds of datasets. """ required_parameters = ParameterSet({ 'stimulus_duration' : float, 'num_trials' : int, 'size' : float, + 'data_dir': str, + 'data_filename': str, + 'stim_dir': str, }) - def generate_stimuli(self): + def __init__(self,model,parameters): + VisualExperiment.__init__(self, model,parameters) + # for k in range(0, self.parameters.num_trials): + # self.stimuli.append( + # topo.NaturalImageWithEyeMovement( + # frame_duration = self.frame_duration, + # size_x=model.visual_field.size_x, + # size_y=model.visual_field.size_y, + # location_x=0.0, + # location_y=0.0, + # background_luminance=self.background_luminance, + # duration=self.parameters.stimulus_duration, + # density=self.density, + # trial=k, + # size=self.parameters.size, # x size of image + # eye_movement_period=6.66, # eye movement period + # eye_path_location='./eye_path.pickle', + # image_location='./image_naturelle_HIGHC.bmp')) + + # Modify MeasureNaturalImagesWithEyeMovement (in mozaik/experiments/vision.py) to: + # Be able to present flexible path images and eye_paths + # Be able to present multiple images and eye_paths + # Be able to read eye paths in from Junji hdf5 format + # Pass positions and times to stimulus + import h5py + import os + import numpy as np + # basedir = "/projects/nextcloud_data/csng_test/files/Latency_shortening" + # basedir = "/home/rozsa/dev/Julia_analysis/Latency_shortening" + # data_dir = os.path.join(basedir, "data") + # data_filename = "HIME_20140904_rec5_blk2_eye_position.hdf5" + # stim_dir = os.path.join(basedir, "stimuli") + + for k in range(0, self.parameters.num_trials): - self.stimuli.append( - topo.NaturalImageWithEyeMovement( - frame_duration = self.frame_duration, - size_x=self.model.visual_field.size_x, - size_y=self.model.visual_field.size_y, - location_x=0.0, - location_y=0.0, - background_luminance=self.background_luminance, - duration=self.parameters.stimulus_duration, - density=self.density, - trial=k, - size=self.parameters.size, # x size of image - eye_movement_period=6.66, # eye movement period - eye_path_location='./eye_path.pickle', - image_location='./image_naturelle_HIGHC.bmp')) + trial_label = f"/trial{k+1}" + with h5py.File(f"{self.parameters.data_dir}/{self.parameters.data_filename}", 'r') as data: + stimsetname = data['/stimulus_set_name'][...].astype('str') + # TODO: vybrat start_time, end_time, img_on, img_off time -> z coho vypocitam periodu (tym,ze to este vydelim nr of samples), + # odfiltrujem pozicie mimo ukazovania obrazku + eyepos = data[trial_label+"/eye_position"][...] + num_samples = eyepos.shape[0] + t_start = data[trial_label+"/trial_start_time"][...] + t_end = data[trial_label+"/trial_end_time"][...] + #pouzit t_start, t_end + period = (t_end - t_start) / num_samples + success = data[trial_label+"/success"][...] + image_id = f"{self.parameters.stim_dir}/{stimsetname}/{data[trial_label+'/stimulus_image_id'][...]}.png" + + if not(success): + continue + else: + # TODO: odfiltrujem pozicie mimo ukazovania obrazku + times = np.linspace(t_start, t_end, num_samples) + mask_fv = (t_start < times) & (times < t_end) + filtered_eyepos = eyepos[mask_fv] + x_list = [filtered_eyepos[i][0] for i in range(len(filtered_eyepos))] + y_list = [filtered_eyepos[i][1] for i in range(len(filtered_eyepos))] + temp_dict = MozaikExtendedParameterSet({"x": x_list, "y": y_list}) + + + + trial_label = f"/trial{k}" + self.stimuli.append( + topo.NaturalImageWithEyeMovement( + frame_duration = self.frame_duration, + size_x=model.visual_field.size_x, + size_y=model.visual_field.size_y, + location_x=0.0, + location_y=0.0, + background_luminance=self.background_luminance, + duration=self.parameters.stimulus_duration, # maximum lenghth of trial + # duration = ((t_end - t_start) // 7 )* 7, + density=self.density, + trial=k, + size=self.parameters.size, # x size of image + img_id = image_id, + eye_movement_period = period, + eye_path = temp_dict, + image_location=image_id)) def do_analysis(self, data_store): pass diff --git a/mozaik/stimuli/vision/topographica_based.py b/mozaik/stimuli/vision/topographica_based.py index cf5b9abfa..010cb8738 100644 --- a/mozaik/stimuli/vision/topographica_based.py +++ b/mozaik/stimuli/vision/topographica_based.py @@ -369,12 +369,12 @@ class NaturalImageWithEyeMovement(TopographicaBasedVisualStimulus): size = SNumber(degrees, doc="The length of the longer axis of the image in visual degrees") eye_movement_period = SNumber(ms, doc="The time between two consequitve eye movements recorded in the eye_path file") image_location = SString(doc="Location of the image") - eye_path_location = SString(doc="Location of file containing the eye path (two columns of numbers)") + eye_path = SParameterSet(doc="Location of file containing the eye path (two columns of numbers)") + # POZN: eye_path = list of numpy arrays -> pouzit SParameterSet (funguje ako dictionary) + def frames(self): self.time = 0 - f = open(self.eye_path_location, 'rb') - self.eye_path = pickle.load(f, encoding="latin1") self.pattern_sampler = imagen.image.PatternSampler( size_normalization='fit_longest', whole_pattern_output_fns=[MaximumDynamicRange()]) @@ -393,9 +393,18 @@ def frames(self): pattern_sampler=self.pattern_sampler) while True: - location = self.eye_path[int(numpy.floor(self.frame_duration * self.time / self.eye_movement_period))] - image.x = location[0] - image.y = location[1] + x = self.eye_path["x"] + y = self.eye_path["y"] + + + print(self.frame_duration, self.time, self.eye_movement_period) + print("EYE PATH, ", x) + index = int(numpy.floor(self.frame_duration * self.time / self.eye_movement_period)) + + if index > len(x)-1: + index = len(x)-1 + image.x = x[index] + image.y = y[index] yield (image(), [self.time]) self.time += 1