Skip to content

Commit 0e39e2c

Browse files
committed
build: added Windows OS support and compatibility
- Added Windows-specific installation instructions - Fix path separators to use OS-dependent ones - Updated dependency packages for simulation examples
1 parent cad54db commit 0e39e2c

File tree

16 files changed

+76
-50
lines changed

16 files changed

+76
-50
lines changed

README.md

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ parameter inference techniques based on [sbi](https://sbi-dev.github.io/sbi)) wi
3535
inverse modelling pipeline. Designed for modularity, it adapts to your needs without constraints.
3636

3737

38-
# Installation
38+
# Installation on Linux
3939

4040
`ncpi` requires Python 3.10 or higher. To install `ncpi`, you can use pip. The package is available on PyPI, so you can
4141
install it directly from there.
@@ -73,6 +73,31 @@ pip uninstall scikit-learn numpy -y
7373
pip install scikit-learn==1.5.0 numpy
7474
```
7575

76+
# Installation on Windows
77+
78+
To be able to install all dependencies of `ncpi` in Windows, first you have to install [Windows Subsystem for Linux (WSL)](https://documentation.ubuntu.com/wsl/stable/howto/install-ubuntu-wsl2/).
79+
80+
After the WSL installation, we strongly recommend to install the latest updates by running the following commands within the Ubuntu terminal:
81+
82+
```bash
83+
$ sudo apt update
84+
$ sudo apt upgrade -y
85+
```
86+
87+
Once Ubuntu is up and running in WSL, [conda can be installed there](https://docs.conda.io/projects/conda/en/stable/user-guide/install/linux.html). Now you can follow the rest of the instructions of the [installation of `ncpi` in Linux](#Installation-on-Linux).
88+
89+
If you encounter the error *Failed building wheel for pycatch22* when installing `pip install ncpi`, install:
90+
91+
```bash
92+
$ conda install -c conda-forge pycatch22
93+
```
94+
95+
If the error still persists, install the following dependencies:
96+
97+
```bash
98+
$ sudo apt install -y build-essential python3-dev
99+
```
100+
76101
# Folder Structure
77102

78103
- `ncpi/`: Contains the source code for the library, organized into modules and classes.

examples/EEG_AD/EEG_AD.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
# Select either raw EEG data or source-reconstructed EEG data. This study used the raw EEG data for all analyses.
1212
raw = True
1313
if raw:
14-
data_path = '/DATA/empirical_datasets/POCTEP_data/CLEAN/SENSORS'
14+
data_path = os.path.join(os.sep, 'DATA', 'empirical_datasets', 'POCTEP_data', 'CLEAN', 'SENSORS')
1515
else:
16-
data_path = '/DATA/empirical_datasets/POCTEP_data/CLEAN/SOURCES/dSPM/DK'
16+
data_path = os.path.join(os.sep, 'DATA', 'empirical_datasets', 'POCTEP_data', 'CLEAN', 'SOURCES', 'dSPM', 'DK')
1717

1818
# Choose to either download data from Zenodo (True) or load it from a local path (False).
1919
# Important: the zenodo downloads will take a while, so if you have already downloaded the data, set this to False and
@@ -208,12 +208,12 @@ def create_POCTEP_dataframe(data_path):
208208
print(f'--- Sensor: {sensor}')
209209

210210
shutil.copy(
211-
os.path.join(zenodo_dir_sim, 'ML_models/EEG', sensor, method, 'model'),
211+
os.path.join(zenodo_dir_sim, 'ML_models', 'EEG', sensor, method, 'model'),
212212
os.path.join('data', 'model.pkl')
213213
)
214214

215215
shutil.copy(
216-
os.path.join(zenodo_dir_sim, 'ML_models/EEG', sensor, method, 'scaler'),
216+
os.path.join(zenodo_dir_sim, 'ML_models', 'EEG', sensor, method, 'scaler'),
217217
os.path.join('data', 'scaler.pkl')
218218
)
219219

@@ -237,5 +237,4 @@ def create_POCTEP_dataframe(data_path):
237237
np.array(others.tolist())), axis=1)).tolist()
238238

239239
# Save the data including predictions of (E/I)_net
240-
emp_data.to_pickle(os.path.join('data', method, 'emp_data_reduced.pkl'))
241-
240+
emp_data.to_pickle(os.path.join('data', method, 'emp_data_reduced.pkl'))

examples/EEG_AD/figures/EEG_predictions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import ncpi
77

88
# Set the path to the results folder
9-
results_path = '../data'
9+
results_path = os.path.join('..', 'data')
1010

1111
# Select the statistical analysis method ('cohen', 'lmer')
1212
statistical_analysis = 'lmer'
@@ -82,7 +82,7 @@ def append_lmer_results(lmer_results, group, elec, p_value_th, data_lmer):
8282
if row == 2 or row == 3:
8383
method = 'power_spectrum_parameterization_1'
8484
try:
85-
data = pd.read_pickle(os.path.join('../data', method, 'emp_data_reduced.pkl'))
85+
data = pd.read_pickle(os.path.join(results_path, method, 'emp_data_reduced.pkl'))
8686

8787
except Exception as e:
8888
print(f'Error loading data for {method}: {e}')
@@ -238,6 +238,6 @@ def append_lmer_results(lmer_results, group, elec, p_value_th, data_lmer):
238238
fig1.add_artist(tauexc_line_f)
239239
fig1.add_artist(tauinh_line_f)
240240

241-
fig1.savefig('EEG_predictions.png')
241+
fig1.savefig(f'EEG_predictions_{statistical_analysis}.png')
242242

243243

examples/LFP_developing_brain/LFP_developing_brain.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@
2222
zenodo_URL_emp = "https://zenodo.org/api/records/15382047"
2323

2424
# Paths to zenodo files
25-
zenodo_dir_sim = "zenodo_sim_files"
26-
zenodo_dir_emp= "zenodo_emp_files"
25+
zenodo_dir_sim = os.path.join("zenodo_sim_files")
26+
zenodo_dir_emp= os.path.join("zenodo_emp_files")
2727

2828
# Methods used to compute the features
2929
all_methods = ['catch22','power_spectrum_parameterization_1']
@@ -79,18 +79,18 @@ def load_inference_data(method, X, theta, zenodo_dir_sim, ML_model):
7979
folder = 'SBI'
8080

8181
shutil.copy(
82-
os.path.join(zenodo_dir_sim, 'ML_models/4_param', folder, method, 'scaler'),
82+
os.path.join(zenodo_dir_sim, 'ML_models', '4_param', folder, method, 'scaler'),
8383
os.path.join('data', 'scaler.pkl')
8484
)
8585

8686
shutil.copy(
87-
os.path.join(zenodo_dir_sim, 'ML_models/4_param', folder, method, 'model'),
87+
os.path.join(zenodo_dir_sim, 'ML_models', '4_param', folder, method, 'model'),
8888
os.path.join('data', 'model.pkl')
8989
)
9090

9191
if ML_model == 'NPE':
9292
shutil.copy(
93-
os.path.join(zenodo_dir_sim, 'ML_models/4_param', folder, method,
93+
os.path.join(zenodo_dir_sim, 'ML_models', '4_param', folder, method,
9494
'density_estimator'),
9595
os.path.join('data', 'density_estimator.pkl')
9696
)
@@ -101,13 +101,13 @@ def load_inference_data(method, X, theta, zenodo_dir_sim, ML_model):
101101
@timer("Loading LFP data.")
102102
def load_empirical_data(zenodo_dir_emp):
103103
# Load empirical data
104-
file_list = os.listdir(os.path.join(zenodo_dir_emp, 'development_EI_decorrelation/baseline/LFP'))
104+
file_list = os.listdir(os.path.join(zenodo_dir_emp, 'development_EI_decorrelation', 'baseline', 'LFP'))
105105
emp_data = {'LFP': [], 'fs': [], 'age': []}
106106

107107
for i,file_name in enumerate(file_list):
108108
print(f'\r Progress: {i+1} of {len(file_list)} files loaded', end='', flush=True)
109109
structure = scipy.io.loadmat(os.path.join(os.path.join(zenodo_dir_emp,
110-
'development_EI_decorrelation/baseline/LFP'),
110+
'development_EI_decorrelation', 'baseline', 'LFP'),
111111
file_name))
112112
LFP = structure['LFP']['LFP'][0,0]
113113
sum_LFP = np.sum(LFP, axis=0) # sum LFP across channels

examples/LFP_developing_brain/figures/LFP_predictions.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,17 @@
77
import ncpi
88

99
# Folder with parameters of LIF model simulations
10-
sys.path.append(os.path.join(os.path.dirname(__file__), '../../simulation/Hagen_model/simulation/params'))
10+
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'simulation', 'Hagen_model', 'simulation', 'params'))
1111

1212
# Path to the folder with prediction results
13-
pred_results = '../data'
13+
pred_results = os.path.join('..', 'data')
1414

1515
# Calculate new firing rates (True) or load them from file if they already exist (False). If firing rates do not
1616
# exist, they will not be plotted.
1717
compute_firing_rate = False
1818

1919
# Path to saved firing rates
20-
fr_path = './data'
20+
fr_path = os.path.join('.', 'data')
2121

2222
# Number of samples to draw from the predictions for computing the firing rates
2323
n_samples = 50
@@ -118,23 +118,23 @@
118118
LIF_params['J_ext'] = J_ext
119119

120120
# Create a Simulation object
121-
sim = ncpi.Simulation(param_folder='../../simulation/Hagen_model/simulation/params',
122-
python_folder='../../simulation/Hagen_model/simulation/python',
123-
output_folder='../../simulation/Hagen_model/simulation/output')
121+
sim = ncpi.Simulation(param_folder = os.path.join('../../simulation/Hagen_model/simulation/params'),
122+
python_folder = os.path.join('../../simulation/Hagen_model/simulation/python'),
123+
output_folder = os.path.join('../../simulation/Hagen_model/simulation/output'))
124124

125125
# Save parameters to a pickle file
126-
with open(os.path.join('../../simulation/Hagen_model/simulation/output', 'network.pkl'), 'wb') as f:
126+
with open(os.path.join('..', '..', 'simulation', 'Hagen_model', 'simulation', 'output', 'network.pkl'), 'wb') as f:
127127
pickle.dump(LIF_params, f)
128128

129129
# Run the simulation
130130
sim.simulate('simulation.py', 'simulation_params.py')
131131

132132
# Load spike times
133-
with open(os.path.join('../../simulation/Hagen_model/simulation/output', 'times.pkl'), 'rb') as f:
133+
with open(os.path.join('..', '..', 'simulation', 'Hagen_model', 'simulation', 'output', 'times.pkl'), 'rb') as f:
134134
times = pickle.load(f)
135135

136136
# Load tstop
137-
with open(os.path.join('../../simulation/Hagen_model/simulation/output', 'tstop.pkl'), 'rb') as f:
137+
with open(os.path.join('..', '..', 'simulation', 'Hagen_model', 'simulation', 'output', 'tstop.pkl'), 'rb') as f:
138138
tstop = pickle.load(f)
139139

140140
# Transient period
@@ -155,15 +155,15 @@
155155
if compute_firing_rate:
156156
if not os.path.exists('data'):
157157
os.makedirs('data')
158-
with open('data/firing_rates_preds.pkl', 'wb') as f:
158+
with open('data', 'firing_rates_preds.pkl', 'wb') as f:
159159
pickle.dump(firing_rates, f)
160-
with open('data/IDs.pkl', 'wb') as f:
160+
with open('data', 'IDs.pkl', 'wb') as f:
161161
pickle.dump(IDs, f)
162162
else:
163163
try:
164-
with open(os.path.join(fr_path,'firing_rates_preds.pkl'), 'rb') as f:
164+
with open(os.path.join(fr_path, 'firing_rates_preds.pkl'), 'rb') as f:
165165
firing_rates = pickle.load(f)
166-
with open(os.path.join(fr_path,'IDs.pkl'), 'rb') as f:
166+
with open(os.path.join(fr_path, 'IDs.pkl'), 'rb') as f:
167167
IDs = pickle.load(f)
168168
except FileNotFoundError:
169169
print('Firing rates not found.')
@@ -383,5 +383,5 @@
383383
ax.text(0.5, 0.49, y_labels[1], color = 'blue', alpha = 0.5, fontsize = 10, ha='center')
384384

385385
# Save the figure
386-
plt.savefig('LFP_predictions.png', bbox_inches='tight')
386+
plt.savefig(f'LFP_predictions_{statistical_analysis}.png', bbox_inches='tight')
387387
# plt.show()

examples/LFP_developing_brain/figures/emp_features.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import matplotlib.pyplot as plt
44

55
# Path to the folder with prediction results
6-
pred_results = '../data'
6+
pred_results = os.path.join('..', 'data')
77

88
# Names of catch22 features
99
try:

examples/simulation/Hagen_model/figures/SBI_results.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
compute_metrics = True
1818

1919
# Path to the local directory where the metrics and posteriors will be saved
20-
result_folder = 'SBI_results'
20+
result_folder = os.path.join('SBI_results')
2121

2222
# Choose whether to use a held-out dataset or folds from RepeatedKFold
2323
use_held_out_data = True
@@ -34,7 +34,7 @@
3434
zenodo_URL_sim = "https://zenodo.org/api/records/15351118"
3535

3636
# Paths to zenodo files
37-
zenodo_dir_sim = "/DATA/zenodo_sim_files"
37+
zenodo_dir_sim = os.path.join(os.sep, 'DATA', 'zenodo_sim_files')
3838

3939
# Download simulation data and ML models
4040
if zenodo_dw_sim:
@@ -105,10 +105,10 @@ def create_white_to_color_cmap(color):
105105

106106
# Path to ML models trained based on a held-out dataset approach
107107
if use_held_out_data:
108-
ML_path = os.path.join(zenodo_dir_sim, 'ML_models/held_out_data_models')
108+
ML_path = os.path.join(zenodo_dir_sim, 'ML_models', 'held_out_data_models')
109109
# Path to ML models trained based on a RepeatedKFold approach
110110
else:
111-
ML_path = os.path.join(zenodo_dir_sim, 'ML_models/4_param')
111+
ML_path = os.path.join(zenodo_dir_sim, 'ML_models', '4_param')
112112

113113
# Limits of histograms
114114
lims = [[-15, 15], [-2, 5], [-2, 12], [0, 60]]

examples/simulation/Hagen_model/figures/example_full_pipeline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from ncpi import tools
1111

1212
# Path to parameters of the LIF network model
13-
sys.path.append(os.path.join(os.path.dirname(__file__), '../simulation/params'))
13+
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'simulation', 'params'))
1414

1515
# Choose to either download files and precomputed outputs used in simulations of the reference multicompartment neuron
1616
# network model (True) or load them from a local path (False)
@@ -20,7 +20,7 @@
2020
zenodo_URL_mult = "https://zenodo.org/api/records/15429373"
2121

2222
# Zenodo directory where the data is stored (must be an absolute path to correctly load morphologies in NEURON)
23-
zenodo_dir = '/DATA/multicompartment_neuron_network'
23+
zenodo_dir = os.path.join(os.sep, 'DATA', 'multicompartment_neuron_network')
2424

2525
# Set to True to run new simulations of the LIF network model, or False to load precomputed results from a pickle file
2626
# located in a 'data' folder.

examples/simulation/Hagen_model/figures/sim_features.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
zenodo_URL_sim = "https://zenodo.org/api/records/15351118"
1818

1919
# Paths to zenodo files
20-
zenodo_dir_sim = "zenodo_sim_files"
20+
zenodo_dir_sim = os.path.join("zenodo_sim_files")
2121

2222
# Download simulation data and ML models
2323
if zenodo_dw_sim:

examples/simulation/Hagen_model/figures/sim_predictions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
zenodo_URL_sim = "https://zenodo.org/api/records/15351118"
1717

1818
# Paths to zenodo files
19-
zenodo_dir_sim = "zenodo_sim_files"
19+
zenodo_dir_sim = os.path.join("zenodo_sim_files")
2020

2121
# ML model used to compute the predictions (MLPRegressor or Ridge)
2222
ML_model = 'MLPRegressor'
@@ -69,10 +69,10 @@ def hellinger_distance(p, q):
6969
all_theta = {}
7070
for method in all_methods:
7171
try:
72-
with open(os.path.join(zenodo_dir_sim,'ML_models/held_out_data_models', folder, method,
72+
with open(os.path.join(zenodo_dir_sim,'ML_models', 'held_out_data_models', folder, method,
7373
'predictions'), 'rb') as file:
7474
all_preds[method] = np.array(pickle.load(file))
75-
with open(os.path.join(zenodo_dir_sim,'ML_models/held_out_data_models', 'datasets', method,
75+
with open(os.path.join(zenodo_dir_sim,'ML_models', 'held_out_data_models', 'datasets', method,
7676
'held_out_dataset'), 'rb') as file:
7777
X_test, theta_test = pickle.load(file)
7878
all_theta[method] = np.array(theta_test)

0 commit comments

Comments
 (0)