Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Config file with paths to source and destination folders + paths joining fixes for Windows #1

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"datasource": "/tmp/data",
"results": "/tmp/Graded_Remapping"
}
15 changes: 9 additions & 6 deletions iso_dist_hist.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"""
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as pl
from scipy.io import loadmat
Expand All @@ -36,10 +37,12 @@
['g2784_d3']
]

# Load data from this folder
hdf5Dir = '/home/fetterhoff/Graded_Remapping/'
with open("config.json") as f:
config = json.load(f)

combinedResultDir = hdf5Dir+'waveform_stats/' # Save in subdirectory
# Load data from this folder
hdf5Dir = config['datasource']
combinedResultDir = os.path.join(config['results'], 'waveform_stats') # Save in subdirectory
if not os.path.exists(combinedResultDir):
os.makedirs(combinedResultDir)

Expand All @@ -53,9 +56,9 @@
session = s[0]
print(session) # current session

sd = hdf5Dir+session+'/' # session directory
sd = os.path.join(hdf5Dir, session) # session directory

for mat_name in glob.glob(sd+'*TT*.mat'): # loop through all neuron files
for mat_name in glob.glob(os.path.join(sd, '*TT*.mat')): # loop through all neuron files
m = loadmat(mat_name)
iso_dist = np.append(iso_dist, m['isolation_distance'][0][0]) # save isolation distances

Expand All @@ -73,5 +76,5 @@
pl.ylabel('Cumulative Distribution')
pl.xlim([0,100])

pl.savefig(combinedResultDir+'Fig_S1B_IsolationDistance.pdf',format='pdf', dpi=300, bbox_inches = 'tight', pad_inches = 0.05)
pl.savefig(os.path.join(combinedResultDir, 'Fig_S1B_IsolationDistance.pdf'),format='pdf', dpi=300, bbox_inches = 'tight', pad_inches = 0.05)
pl.close()
24 changes: 13 additions & 11 deletions maximum_likelihood_estimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

import os
import glob
import json
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
Expand Down Expand Up @@ -84,11 +85,12 @@ def bin_spikes(spike_times, dT, wdw_start, wdw_end):
colors = ('r', 'b', 'm', 'c') # Colors for each maze-type
mazeSegList = ['Entire Maze', 'First Hallway', 'First Corner', 'Middle Hallway', 'Last Corner', 'Last Hallway']

# Load data from this folder
hdf5Dir = '/home/fetterhoff/Graded_Remapping/'
with open("config.json") as f:
config = json.load(f)

# Create a results subfolder inside the data folder
combinedResultDir = hdf5Dir+'mle_results_{}gamma/'.format(gamma) # Save in subdirectory
# Load data from this folder
hdf5Dir = config['datasource']
combinedResultDir = os.path.join(config['results'], 'mle_results_{}gamma/'.format(gamma)) # Save in subdirectory
if not os.path.exists(combinedResultDir):
os.makedirs(combinedResultDir)

Expand Down Expand Up @@ -119,11 +121,11 @@ def bin_spikes(spike_times, dT, wdw_start, wdw_end):
session = s[0]
print(session) # current session

sd = hdf5Dir+session+'/' # session directory
sd = os.path.join(hdf5Dir, session) # session directory

# Build a DataFrame using all tetrode (TT) files
spikeDF = pd.DataFrame()
for mat_name in glob.glob(sd+'*TT*.mat'):
for mat_name in glob.glob(os.path.join(sd, '*TT*.mat')): # loop through all neuron files
m = loadmat(mat_name)

frame = pd.DataFrame([[m['file'][0], m['times'][0], m['vr_x'][0], m['vr_y'][0], m['real_cm'][0], m['speed_cms'][0], m['lap_num'][0],
Expand All @@ -134,7 +136,7 @@ def bin_spikes(spike_times, dT, wdw_start, wdw_end):
spikeDF = spikeDF.append(frame)
spikeDF.sort_index(inplace=True)

f2 = sd+session+'_laps_traj.h5'
f2 = os.path.join(sd, session+'_laps_traj.h5')
trajDF = pd.read_hdf(f2, 'trj') # DataFrame of times/places/speed for each lap in VR
# LapsDF maze_type dictionary: {1:R, -1:L, 2: R*, -2: L*}
lapsDF = pd.read_hdf(f2, 'lapsDF')
Expand Down Expand Up @@ -363,7 +365,7 @@ def get_log_likelihood(z_source_dat, z_pattern_dat, Gamma):
ax[0].set_yticklabels(mazeTypeList)
ax[0].set_xticks([0, xlim0/20, xlim0/10])

fig.savefig(combinedResultDir+'Fig3A_mle_timesteps_{}.pdf'.format(session), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
fig.savefig(os.path.join(combinedResultDir, 'Fig3A_mle_timesteps_{}.pdf'.format(session)), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close(fig)

#%% MLE while excuding the same maze as pattern (ex = excluding same maze-type)
Expand Down Expand Up @@ -427,7 +429,7 @@ def get_log_likelihood(z_source_dat, z_pattern_dat, Gamma):
#%% Plot MLE over all sessions
if toPlotAllSessions:

df_count.to_csv(combinedResultDir+'table_S1_place_cell_field_counts.csv')
df_count.to_csv(os.path.join(combinedResultDir, 'table_S1_place_cell_field_counts.csv'))

# plot as percentages
pctAll = [mle_sess.mean(axis=0), mle_sess_fh.mean(axis=0), mle_sess_fc.mean(axis=0), mle_sess_mh.mean(axis=0), mle_sess_lc.mean(axis=0), mle_sess_lh.mean(axis=0)]
Expand Down Expand Up @@ -457,7 +459,7 @@ def get_log_likelihood(z_source_dat, z_pattern_dat, Gamma):
pl.ylabel("Maximum Likelihood Estimate from Pattern Data", labelpad=1)
pl.xlabel("Maximum Likelihood Estimate from Source Data", labelpad=0)

fig.savefig(combinedResultDir+'Fig3B_allSessions_withReal_mle_gamma{}.pdf'.format(gamma), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
fig.savefig(os.path.join(combinedResultDir,'Fig3B_allSessions_withReal_mle_gamma{}.pdf'.format(gamma)), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close(fig)

#%% Plot MLE excluding same maze-type
Expand Down Expand Up @@ -486,5 +488,5 @@ def get_log_likelihood(z_source_dat, z_pattern_dat, Gamma):
pl.ylabel("Maximum Likelihood Estimate from Pattern Data", labelpad=1)
pl.xlabel("Maximum Likelihood Estimate from Source Data", labelpad=0)

fig.savefig(combinedResultDir+'Fig3C_allSessions_noReal_mle_gamma{}.pdf'.format(gamma), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
fig.savefig(os.path.join(combinedResultDir, 'Fig3C_allSessions_noReal_mle_gamma{}.pdf'.format(gamma)), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close(fig)
21 changes: 12 additions & 9 deletions plot_place_cells.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
"""
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
Expand Down Expand Up @@ -40,10 +41,12 @@
fileList = fileList
speedThresh = 5 # cm/s, to discard spikes during stillness

# Load data from this folder
hdf5Dir = '/home/fetterhoff/Graded_Remapping/'
with open("config.json") as f:
config = json.load(f)

combinedResultDir = hdf5Dir+'place_cells/' # Save in subdirectory
# Load data from this folder
hdf5Dir = config['datasource']
combinedResultDir = os.path.join(config['results'], 'place_cells') # Save in subdirectory
if not os.path.exists(combinedResultDir):
os.makedirs(combinedResultDir)

Expand All @@ -63,11 +66,11 @@
session = s[0]
print(session) # current session

sd = hdf5Dir+session+'/' # session directory
sd = os.path.join(hdf5Dir, session) # session directory

# Build a DataFrame using all tetrode (TT) files
spikeDF = pd.DataFrame()
for mat_name in glob.glob(sd+'*TT*.mat'):
for mat_name in glob.glob(os.path.join(sd, '*TT*.mat')): # loop through all neuron files
m = loadmat(mat_name)

frame = pd.DataFrame([[m['file'][0], m['times'][0], m['vr_x'][0], m['vr_y'][0], m['real_cm'][0], m['speed_cms'][0], m['lap_num'][0],
Expand All @@ -78,13 +81,13 @@
spikeDF = spikeDF.append(frame)
spikeDF.sort_index(inplace=True)

f3 = sd+session+'_PCresultsDB.h5'
f3 = os.path.join(sd, session+'_PCresultsDB.h5')
cellResultsDB = pd.read_hdf(f3, 'cellResultsDB')

#%% Plot neurons as examples
for q, cell_id in enumerate(spikeDF.T):
sp = spikeDF.loc[cell_id]
title = combinedResultDir + session + '_' + sp.file[:-2]
title = os.path.join(combinedResultDir, session + '_' + sp.file[:-2])
session_list.append(session)
maze_seg_code.append(sp.segment_types)
if toPlotAllNeurons:
Expand Down Expand Up @@ -202,7 +205,7 @@
box.x0, box.y0, box.y1 = 0.7, 0.26, 0.5
ax[5].set_position(box)

pl.savefig(combinedResultDir+'fig_1HI_pieChart.pdf', format='pdf', bbox_inches='tight', pad_inches=0.01)
pl.savefig(os.path.join(combinedResultDir, 'fig_1HI_pieChart.pdf'), format='pdf', bbox_inches='tight', pad_inches=0.01)
pl.close()

#%% Pie for each gerbil
Expand Down Expand Up @@ -264,5 +267,5 @@
ax[5].legend(['RR*-dir', 'LL*-dir', 'RL*-im', 'LR*-im'], loc='upper right')
ax[1].legend(labels, loc='upper left', bbox_to_anchor=(0.3, 0.83))

pl.savefig(combinedResultDir+'fig_S3_pieChartByGerbil_{}.pdf'.format(gid), format='pdf', bbox_inches='tight', pad_inches=0.01)
pl.savefig(os.path.join(combinedResultDir, 'fig_S3_pieChartByGerbil_{}.pdf'.format(gid)), format='pdf', bbox_inches='tight', pad_inches=0.01)
pl.close()
43 changes: 23 additions & 20 deletions pop_vec_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"""
import os
import glob
import json
import itertools as it
import numpy as np
import matplotlib.pyplot as pl
Expand Down Expand Up @@ -49,10 +50,12 @@
['g2784_ss1']
]

# Load data from this folder
hdf5Dir = '/home/fetterhoff/Graded_Remapping/'
with open("config.json") as f:
config = json.load(f)

combinedResultDir = hdf5Dir+'pop_vec_analysis/' # Save in subdirectory
# Load data from this folder
hdf5Dir = config['datasource']
combinedResultDir = os.path.join(config['results'], 'pop_vec_analysis') # Save in subdirectory
if not os.path.exists(combinedResultDir):
os.makedirs(combinedResultDir)

Expand Down Expand Up @@ -97,11 +100,11 @@
session = s[0]
print(session) # current session

sd = hdf5Dir+session+'/' # session directory
sd = os.path.join(hdf5Dir, session) # session directory

# Build a DataFrame using all tetrode (TT) files
spikeDF = pd.DataFrame()
for mat_name in glob.glob(sd+'*TT*.mat'):
for mat_name in glob.glob(os.path.join(sd, '*TT*.mat')): # loop through all neuron files
m = loadmat(mat_name)

frame = pd.DataFrame([[m['file'][0], m['times'][0], m['vr_x'][0], m['vr_y'][0], m['real_cm'][0], m['speed_cms'][0], m['lap_num'][0],
Expand All @@ -112,13 +115,13 @@
spikeDF = spikeDF.append(frame)
spikeDF.sort_index(inplace=True)

f2 = sd+session+'_laps_traj.h5'
f2 = os.path.join(sd, session+'_laps_traj.h5')
trajDF = pd.read_hdf(f2, 'trj') # DataFrame of times/places/speed for each lap in VR
# LapsDF maze_type dictionary: {1:R, -1:L, 2: R*, -2: L*}
lapsDF = pd.read_hdf(f2, 'lapsDF')
lapsDB = np.array(lapsDF) # Keep values as matrix

f3 = sd+session+'_PCresultsDB.h5'
f3 = os.path.join(sd, session+'_PCresultsDB.h5')
cellResultsDB = pd.read_hdf(f3, 'cellResultsDB')

nPlaceFields = 0 # Count the number of place fields
Expand Down Expand Up @@ -214,8 +217,8 @@
#%% Plot place field matrices sorted by maze R

if not (simSwap or best6 or toExcludeImageCells):
pop_dir = combinedResultDir + 'population_plots/'

pop_dir = os.path.join(combinedResultDir, 'population_plots')
if not os.path.exists(pop_dir):
os.makedirs(pop_dir)

Expand All @@ -241,7 +244,7 @@
axm3.set_xlabel('Track Position (cm)'); axm4.set_xlabel('Track Position (cm)')
axm1.set_ylabel('Neuron #'); axm3.set_ylabel('Neuron #')

pl.savefig(pop_dir+'all_neural_pop_viridis_Rsort_{}.png'.format(session), format='png', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(pop_dir, 'all_neural_pop_viridis_Rsort_{}.png'.format(session)), format='png', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close()

# Plot place field matrices sorted by maze L
Expand All @@ -268,13 +271,13 @@
axm3.set_xlabel('Track Position (cm)'); axm4.set_xlabel('Track Position (cm)')
axm1.set_ylabel('Neuron #'); axm3.set_ylabel('Neuron #')

pl.savefig(pop_dir+'all_neural_pop_viridis_Lsort_{}.png'.format(session), format='png', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(pop_dir, 'all_neural_pop_viridis_Lsort_{}.png'.format(session)), format='png', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close()

#%% pop_vec correlation - all vs shuffled

if not (simSwap or best6 or toExcludeImageCells):
df_count.to_csv(combinedResultDir+'table_S1_place_cell_counts_pop_vec.csv')
df_count.to_csv(os.path.join(combinedResultDir, 'table_S1_place_cell_counts_pop_vec.csv'))

if (best6 or simSwap):
fig, axx = pl.subplots(1, 2, figsize=(3.4, 1.4), sharey=True)
Expand Down Expand Up @@ -402,13 +405,13 @@
axx[1].legend()

if toExcludeImageCells:
pl.savefig(combinedResultDir+'fig_4B_pop_vec_correlation_noImageCells.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_4B_pop_vec_correlation_noImageCells.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
elif simSwap:
pl.savefig(combinedResultDir+'fig_S5A_pop_vec_correlation.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_S5A_pop_vec_correlation.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
elif best6:
pl.savefig(combinedResultDir+'fig_S5C_pop_vec_correlation.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_S5C_pop_vec_correlation.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
else:
pl.savefig(combinedResultDir+'fig_4A_pop_vec_correlation.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_4A_pop_vec_correlation.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close(fig)

#%% pop_vec correlation - all vs shuffled for each gerbil
Expand Down Expand Up @@ -472,7 +475,7 @@
aq[ai].fill_between([bd[0], bd[1]], -.2, 1, facecolor='k', alpha=0.2)
aq[ai].fill_between([bd[2], bd[3]], -.2, 1, facecolor='k', alpha=0.2)

pl.savefig(combinedResultDir+'fig_S5E_pop_vec_correlation_g{}.pdf'.format(gid), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_S5E_pop_vec_correlation_g{}.pdf'.format(gid)), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close(fig)

#%% Plot the matrix for the poulation vector correlation divided by maze segment
Expand Down Expand Up @@ -527,7 +530,7 @@
cbar_ax.tick_params(labelsize=6)
cbar_ax.set_ylabel("r", rotation=0, labelpad=-13)

pl.savefig(combinedResultDir+'fig_4C_population_vector_correlation_bySegment_v.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_4C_population_vector_correlation_bySegment_v.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close()

#%% Plot the matrix for the poulation vector correlation divided by maze segment
Expand Down Expand Up @@ -582,8 +585,8 @@
axw[i].grid(False)

if simSwap:
pl.savefig(combinedResultDir+'fig_S5B_population_vector_correlation_bySegment_simSwap.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_S5B_population_vector_correlation_bySegment_simSwap.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close()
if best6:
pl.savefig(combinedResultDir+'fig_S5D_population_vector_correlation_bySegment_best6.pdf', format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.savefig(os.path.join(combinedResultDir, 'fig_S5D_population_vector_correlation_bySegment_best6.pdf'), format='pdf', dpi=300, bbox_inches='tight', pad_inches=0.05)
pl.close()
Loading