From c7311607e2dbd46de61bf2f51b41b4758236c70f Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Thu, 19 Sep 2019 12:34:22 +0100 Subject: [PATCH 001/123] Skeleton implementation with updated data structures --- .../makefiles/neuron/eprop_adaptive/Makefile | 23 + .../neuron_impl_eprop_adaptive.h | 383 ++++++++++++++ .../models/neuron_model_eprop_adaptive_impl.c | 94 ++++ .../models/neuron_model_eprop_adaptive_impl.h | 68 +++ .../synapse_dynamics_eprop_adaptive_impl.c | 493 ++++++++++++++++++ .../synapse_type_eprop_adaptive.h | 196 +++++++ .../threshold_types/threshold_type_adaptive.h | 33 ++ .../pyNN/models/neuron/builds/__init__.py | 2 +- .../models/neuron/builds/eprop_adaptive.py | 46 ++ .../neuron/builds/if_curr_dual_exp_base.py | 4 +- .../models/neuron/neuron_models/__init__.py | 2 +- .../neuron_models/neuron_model_eprop.py | 180 +++++++ .../models/neuron/synapse_types/__init__.py | 3 +- .../synapse_types/synapse_type_eprop.py | 186 +++++++ .../models/neuron/threshold_types/__init__.py | 2 +- .../threshold_type_adaptive.py | 74 +++ .../threshold_types/threshold_type_eprop.py | 74 +++ 17 files changed, 1857 insertions(+), 6 deletions(-) create mode 100644 neural_modelling/makefiles/neuron/eprop_adaptive/Makefile create mode 100644 neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h create mode 100644 neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c create mode 100644 neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h create mode 100644 neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c create mode 100644 neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h create mode 100644 neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h create mode 100644 spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py create mode 100644 spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py create mode 100644 spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py create mode 100644 spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py create mode 100644 spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py diff --git a/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile b/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile new file mode 100644 index 00000000000..d14f67e4fca --- /dev/null +++ b/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile @@ -0,0 +1,23 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP = $(notdir $(CURDIR)) + +OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_eprop_adaptive_impl.c +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_eprop_adaptive.h +//SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c + +include ../neural_build.mk \ No newline at end of file diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h new file mode 100644 index 00000000000..b020965d073 --- /dev/null +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -0,0 +1,383 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _NEURON_IMPL_STANDARD_H_ +#define _NEURON_IMPL_STANDARD_H_ + +#include "neuron_impl.h" + +// Includes for model parts used in this implementation +#include +#include +#include +#include +#include + + +// Further includes +#include +#include +#include + +#define V_RECORDING_INDEX 0 +#define GSYN_EXCITATORY_RECORDING_INDEX 1 +#define GSYN_INHIBITORY_RECORDING_INDEX 2 + +#ifndef NUM_EXCITATORY_RECEPTORS +#define NUM_EXCITATORY_RECEPTORS 1 +#error NUM_EXCITATORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +#ifndef NUM_INHIBITORY_RECEPTORS +#define NUM_INHIBITORY_RECEPTORS 1 +#error NUM_INHIBITORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +//! Array of neuron states +static neuron_pointer_t neuron_array; + +//! Input states array +static input_type_pointer_t input_type_array; + +//! Additional input array +static additional_input_pointer_t additional_input_array; + +//! Threshold states array +static threshold_type_pointer_t threshold_type_array; + +//! Global parameters for the neurons +static global_neuron_params_pointer_t global_parameters; + +// The synapse shaping parameters +static synapse_param_t *neuron_synapse_shaping_params; + +static bool neuron_impl_initialise(uint32_t n_neurons) { + // allocate DTCM for the global parameter details + if (sizeof(global_neuron_params_t)) { + global_parameters = spin1_malloc(sizeof(global_neuron_params_t)); + if (global_parameters == NULL) { + log_error("Unable to allocate global neuron parameters" + "- Out of DTCM"); + return false; + } + } + + // Allocate DTCM for neuron array + if (sizeof(neuron_t)) { + neuron_array = spin1_malloc(n_neurons * sizeof(neuron_t)); + if (neuron_array == NULL) { + log_error("Unable to allocate neuron array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for input type array and copy block of data + if (sizeof(input_type_t)) { + input_type_array = spin1_malloc(n_neurons * sizeof(input_type_t)); + if (input_type_array == NULL) { + log_error("Unable to allocate input type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for additional input array and copy block of data + if (sizeof(additional_input_t)) { + additional_input_array = + spin1_malloc(n_neurons * sizeof(additional_input_t)); + if (additional_input_array == NULL) { + log_error("Unable to allocate additional input array" + " - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for threshold type array and copy block of data + if (sizeof(threshold_type_t)) { + threshold_type_array = + spin1_malloc(n_neurons * sizeof(threshold_type_t)); + if (threshold_type_array == NULL) { + log_error("Unable to allocate threshold type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for synapse shaping parameters + if (sizeof(synapse_param_t)) { + neuron_synapse_shaping_params = + spin1_malloc(n_neurons * sizeof(synapse_param_t)); + if (neuron_synapse_shaping_params == NULL) { + log_error("Unable to allocate synapse parameters array" + " - Out of DTCM"); + return false; + } + } + + return true; +} + +static void neuron_impl_add_inputs( + index_t synapse_type_index, index_t neuron_index, + input_t weights_this_timestep) { + // simple wrapper to synapse type input function + synapse_param_pointer_t parameters = + &neuron_synapse_shaping_params[neuron_index]; + synapse_types_add_neuron_input(synapse_type_index, + parameters, weights_this_timestep); +} + +static uint32_t n_words_needed(uint32_t size) { + return (size + (sizeof(uint32_t) - 1)) / sizeof(uint32_t); +} + +static void neuron_impl_load_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("reading parameters, next is %u, n_neurons is %u ", + next, n_neurons); + + if (sizeof(global_neuron_params_t)) { + log_debug("writing neuron global parameters"); + spin1_memcpy(global_parameters, &address[next], + sizeof(global_neuron_params_t)); + next += n_words_needed(sizeof(global_neuron_params_t)); + } + + if (sizeof(neuron_t)) { + log_debug("reading neuron local parameters"); + spin1_memcpy(neuron_array, &address[next], + n_neurons * sizeof(neuron_t)); + next += n_words_needed(n_neurons * sizeof(neuron_t)); + } + + if (sizeof(input_type_t)) { + log_debug("reading input type parameters"); + spin1_memcpy(input_type_array, &address[next], + n_neurons * sizeof(input_type_t)); + next += n_words_needed(n_neurons * sizeof(input_type_t)); + } + + if (sizeof(threshold_type_t)) { + log_debug("reading threshold type parameters"); + spin1_memcpy(threshold_type_array, &address[next], + n_neurons * sizeof(threshold_type_t)); + next += n_words_needed(n_neurons * sizeof(threshold_type_t)); + } + + if (sizeof(synapse_param_t)) { + log_debug("reading synapse parameters"); + spin1_memcpy(neuron_synapse_shaping_params, &address[next], + n_neurons * sizeof(synapse_param_t)); + next += n_words_needed(n_neurons * sizeof(synapse_param_t)); + } + + if (sizeof(additional_input_t)) { + log_debug("reading additional input type parameters"); + spin1_memcpy(additional_input_array, &address[next], + n_neurons * sizeof(additional_input_t)); + next += n_words_needed(n_neurons * sizeof(additional_input_t)); + } + + neuron_model_set_global_neuron_params(global_parameters); + +#if LOG_LEVEL >= LOG_DEBUG + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + log_debug("-------------------------------------\n"); +#endif // LOG_LEVEL >= LOG_DEBUG +} + +static bool neuron_impl_do_timestep_update(index_t neuron_index, + input_t external_bias, state_t *recorded_variable_values) { + // Get the neuron itself + neuron_pointer_t neuron = &neuron_array[neuron_index]; + + // Get the input_type parameters and voltage for this neuron + input_type_pointer_t input_type = &input_type_array[neuron_index]; + + // Get threshold and additional input parameters for this neuron + threshold_type_pointer_t threshold_type = + &threshold_type_array[neuron_index]; + additional_input_pointer_t additional_input = + &additional_input_array[neuron_index]; + synapse_param_pointer_t synapse_type = + &neuron_synapse_shaping_params[neuron_index]; + + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + recorded_variable_values[V_RECORDING_INDEX] = voltage; + + // Get the exc and inh values from the synapses + input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); + input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); + + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_value, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_value, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording + REAL total_exc = 0; + REAL total_inh = 0; + + for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { + total_exc += exc_input_values[i]; + } + for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { + total_inh += inh_input_values[i]; + } + + // Call functions to get the input values to be recorded + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + + // Call functions to convert exc_input and inh_input to current + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); + + external_bias += additional_input_get_input_value_as_current( + additional_input, voltage); + + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron); + + // determine if a spike should occur + bool spike = threshold_type_is_above_threshold(result, threshold_type); + + // If spike occurs, communicate to relevant parts of model + if (spike) { + // Call relevant model-based functions + // Tell the neuron model + neuron_model_has_spiked(neuron); + + // Tell the additional input + additional_input_has_spiked(additional_input); + } + + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); + +#if LOG_LEVEL >= LOG_DEBUG + neuron_model_print_state_variables(neuron); +#endif // LOG_LEVEL >= LOG_DEBUG + + // Return the boolean to the model timestep update + return spike; +} + +//! \brief stores neuron parameter back into sdram +//! \param[in] address: the address in sdram to start the store +static void neuron_impl_store_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("writing parameters"); + //if (global_parameters == NULL) { + // log_error("global parameter storage not allocated"); + // rt_error(RTE_SWERR); + // return; + //} + + if (sizeof(global_neuron_params_t)) { + log_debug("writing neuron global parameters"); + spin1_memcpy(&address[next], global_parameters, + sizeof(global_neuron_params_t)); + next += n_words_needed(sizeof(global_neuron_params_t)); + } + + if (sizeof(neuron_t)) { + log_debug("writing neuron local parameters"); + spin1_memcpy(&address[next], neuron_array, + n_neurons * sizeof(neuron_t)); + next += n_words_needed(n_neurons * sizeof(neuron_t)); + } + + if (sizeof(input_type_t)) { + log_debug("writing input type parameters"); + spin1_memcpy(&address[next], input_type_array, + n_neurons * sizeof(input_type_t)); + next += n_words_needed(n_neurons * sizeof(input_type_t)); + } + + if (sizeof(threshold_type_t)) { + log_debug("writing threshold type parameters"); + spin1_memcpy(&address[next], threshold_type_array, + n_neurons * sizeof(threshold_type_t)); + next += n_words_needed(n_neurons * sizeof(threshold_type_t)); + } + + if (sizeof(synapse_param_t)) { + log_debug("writing synapse parameters"); + spin1_memcpy(&address[next], neuron_synapse_shaping_params, + n_neurons * sizeof(synapse_param_t)); + next += n_words_needed(n_neurons * sizeof(synapse_param_t)); + } + + if (sizeof(additional_input_t)) { + log_debug("writing additional input type parameters"); + spin1_memcpy(&address[next], additional_input_array, + n_neurons * sizeof(additional_input_t)); + next += n_words_needed(n_neurons * sizeof(additional_input_t)); + } +} + +#if LOG_LEVEL >= LOG_DEBUG +void neuron_impl_print_inputs(uint32_t n_neurons) { + bool empty = true; + for (index_t i = 0; i < n_neurons; i++) { + empty = empty && (0 == bitsk( + synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) + - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]))); + } + + if (!empty) { + log_debug("-------------------------------------\n"); + + for (index_t i = 0; i < n_neurons; i++) { + input_t input = + synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) + - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]); + if (bitsk(input) != 0) { + log_debug("%3u: %12.6k (= ", i, input); + synapse_types_print_input(&neuron_synapse_shaping_params[i]); + log_debug(")\n"); + } + } + log_debug("-------------------------------------\n"); + } +} + +void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + synapse_types_print_parameters(&neuron_synapse_shaping_params[n]); + } + log_debug("-------------------------------------\n"); +} + +const char *neuron_impl_get_synapse_type_char(uint32_t synapse_type) { + return synapse_types_get_type_char(synapse_type); +} +#endif // LOG_LEVEL >= LOG_DEBUG + +#endif // _NEURON_IMPL_STANDARD_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c new file mode 100644 index 00000000000..3bac73de40d --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "neuron_model_lif_impl.h" + +#include + +// simple Leaky I&F ODE +static inline void lif_neuron_closed_form( + neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + + // update membrane voltage + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +} + +void neuron_model_set_global_neuron_params( + global_neuron_params_pointer_t params) { + use(params); + // Does Nothing - no params +} + +state_t neuron_model_state_update( + uint16_t num_excitatory_inputs, input_t* exc_input, + uint16_t num_inhibitory_inputs, input_t* inh_input, + input_t external_bias, neuron_pointer_t neuron) { + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); + log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { + REAL total_exc = 0; + REAL total_inh = 0; + + for (int i=0; i < num_excitatory_inputs; i++) { + total_exc += exc_input[i]; + } + for (int i=0; i< num_inhibitory_inputs; i++) { + total_inh += inh_input[i]; + } + // Get the input in nA + input_t input_this_timestep = + total_exc - total_inh + external_bias + neuron->I_offset; + + lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep); + } else { + // countdown refractory timer + neuron->refract_timer--; + } + return neuron->V_membrane; +} + +void neuron_model_has_spiked(neuron_pointer_t neuron) { + // reset membrane voltage + neuron->V_membrane = neuron->V_reset; + + // reset refractory timer + neuron->refract_timer = neuron->T_refract; +} + +state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { + return neuron->V_membrane; +} + +void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { + log_debug("V membrane = %11.4k mv", neuron->V_membrane); +} + +void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { + log_debug("V reset = %11.4k mv", neuron->V_reset); + log_debug("V rest = %11.4k mv", neuron->V_rest); + + log_debug("I offset = %11.4k nA", neuron->I_offset); + log_debug("R membrane = %11.4k Mohm", neuron->R_membrane); + + log_debug("exp(-ms/(RC)) = %11.4k [.]", neuron->exp_TC); + + log_debug("T refract = %u timesteps", neuron->T_refract); +} diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h new file mode 100644 index 00000000000..6abc2c3eab3 --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _NEURON_MODEL_LIF_CURR_IMPL_H_ +#define _NEURON_MODEL_LIF_CURR_IMPL_H_ + +#include "neuron_model.h" + + +typedef struct eprop_syn_state_t { + uint16_t z_bar; // low-pass filtered spike train + uint32_t ep_a; // adaptive component of eligibility vector + uint32_t e_bar; // low-pass filtered eligibility trace + uint16_t delta_w; // weight change to apply +}eprop_syn_state_t; + +///////////////////////////////////////////////////////////// +// definition for LIF neuron parameters +typedef struct neuron_t { + // membrane voltage [mV] + REAL V_membrane; + + // membrane resting voltage [mV] + REAL V_rest; + + // membrane resistance [MOhm] + REAL R_membrane; + + // 'fixed' computation parameter - time constant multiplier for + // closed-form solution + // exp(-(machine time step in ms)/(R * C)) [.] + REAL exp_TC; + + // offset current [nA] + REAL I_offset; + + // countdown to end of next refractory period [timesteps] + int32_t refract_timer; + + // post-spike reset membrane voltage [mV] + REAL V_reset; + + // refractory time of neuron [timesteps] + int32_t T_refract; + + // array of synaptic states - peak fan-in of 250 for this case + eprop_syn_state_t syn_state[250]; + +} neuron_t; + +typedef struct global_neuron_params_t { +} global_neuron_params_t; + +#endif // _NEURON_MODEL_LIF_CURR_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c new file mode 100644 index 00000000000..043b7cae77d --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +// Spinn_common includes +#include "static-assert.h" + +// sPyNNaker neural modelling includes +#include + +// Plasticity includes +#include "maths.h" +#include "post_events.h" + +#include "weight_dependence/weight.h" +#include "timing_dependence/timing.h" +#include +#include +#include + +static uint32_t synapse_type_index_bits; +static uint32_t synapse_index_bits; +static uint32_t synapse_index_mask; +static uint32_t synapse_type_index_mask; +static uint32_t synapse_delay_index_type_bits; +static uint32_t synapse_type_mask; + +uint32_t num_plastic_pre_synaptic_events = 0; +uint32_t plastic_saturation_count = 0; + +//--------------------------------------- +// Macros +//--------------------------------------- +// The plastic control words used by Morrison synapses store an axonal delay +// in the upper 3 bits. +// Assuming a maximum of 16 delay slots, this is all that is required as: +// +// 1) Dendritic + Axonal <= 15 +// 2) Dendritic >= Axonal +// +// Therefore: +// +// * Maximum value of dendritic delay is 15 (with axonal delay of 0) +// - It requires 4 bits +// * Maximum value of axonal delay is 7 (with dendritic delay of 8) +// - It requires 3 bits +// +// | Axonal delay | Dendritic delay | Type | Index | +// |---------------------------|--------------------|-------------------|--------------------| +// | SYNAPSE_AXONAL_DELAY_BITS | SYNAPSE_DELAY_BITS | SYNAPSE_TYPE_BITS | SYNAPSE_INDEX_BITS | +// | | | SYNAPSE_TYPE_INDEX_BITS | +// |---------------------------|--------------------|----------------------------------------| +#ifndef SYNAPSE_AXONAL_DELAY_BITS +#define SYNAPSE_AXONAL_DELAY_BITS 3 +#endif + +#define SYNAPSE_AXONAL_DELAY_MASK \ + ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) + +//--------------------------------------- +// Structures +//--------------------------------------- +typedef struct { + pre_trace_t prev_trace; + uint32_t prev_time; +} pre_event_history_t; + +post_event_history_t *post_event_history; + +/* PRIVATE FUNCTIONS */ + +//--------------------------------------- +// Synapse update loop +//--------------------------------------- +static inline final_state_t plasticity_update_synapse( + uint32_t time, + const uint32_t last_pre_time, const pre_trace_t last_pre_trace, + const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, + const uint32_t delay_axonal, update_state_t current_state, + const post_event_history_t *post_event_history) { + // Apply axonal delay to time of last presynaptic spike + const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; + + // Get the post-synaptic window of events to be processed + const uint32_t window_begin_time = + (delayed_last_pre_time >= delay_dendritic) + ? (delayed_last_pre_time - delay_dendritic) : 0; + const uint32_t window_end_time = time + delay_axonal - delay_dendritic; + post_event_window_t post_window = post_events_get_window_delayed( + post_event_history, window_begin_time, window_end_time); + + log_debug("\tPerforming deferred synapse update at time:%u", time); + log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", + window_begin_time, window_end_time, post_window.prev_time, + post_window.num_events); + + // print_event_history(post_event_history); + // print_delayed_window_events(post_event_history, window_begin_time, + // window_end_time, delay_dendritic); + + // Process events in post-synaptic window + while (post_window.num_events > 0) { + const uint32_t delayed_post_time = + *post_window.next_time + delay_dendritic; + log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", + delayed_post_time); + + // Apply spike to state + current_state = timing_apply_post_spike( + delayed_post_time, *post_window.next_trace, delayed_last_pre_time, + last_pre_trace, post_window.prev_time, post_window.prev_trace, + current_state); + + // Go onto next event + post_window = post_events_next_delayed(post_window, delayed_post_time); + } + + const uint32_t delayed_pre_time = time + delay_axonal; + log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", + delayed_pre_time, post_window.prev_time); + + // Apply spike to state + // **NOTE** dendritic delay is subtracted + current_state = timing_apply_pre_spike( + delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, + post_window.prev_time, post_window.prev_trace, current_state); + + // Return final synaptic word and weight + return synapse_structure_get_final_state(current_state); +} + +//--------------------------------------- +// Synaptic row plastic-region implementation +//--------------------------------------- +static inline plastic_synapse_t* plastic_synapses( + address_t plastic_region_address) { + const uint32_t pre_event_history_size_words = + sizeof(pre_event_history_t) / sizeof(uint32_t); + static_assert( + pre_event_history_size_words * sizeof(uint32_t) == sizeof(pre_event_history_t), + "Size of pre_event_history_t structure should be a multiple" + " of 32-bit words"); + + return (plastic_synapse_t *) + &plastic_region_address[pre_event_history_size_words]; +} + +//--------------------------------------- +static inline pre_event_history_t *plastic_event_history( + address_t plastic_region_address) { + return (pre_event_history_t *) &plastic_region_address[0]; +} + +void synapse_dynamics_print_plastic_synapses( + address_t plastic_region_address, address_t fixed_region_address, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + use(plastic_region_address); + use(fixed_region_address); + use(ring_buffer_to_input_buffer_left_shifts); + +#if LOG_LEVEL >= LOG_DEBUG + // Extract separate arrays of weights (from plastic region), + // Control words (from fixed region) and number of plastic synapses + plastic_synapse_t *plastic_words = plastic_synapses(plastic_region_address); + const control_t *control_words = + synapse_row_plastic_controls(fixed_region_address); + size_t plastic_synapse = + synapse_row_num_plastic_controls(fixed_region_address); + + log_debug("Plastic region %u synapses\n", plastic_synapse); + + // Loop through plastic synapses + for (uint32_t i = 0; i < plastic_synapse; i++) { + // Get next control word (auto incrementing control word) + uint32_t control_word = *control_words++; + uint32_t synapse_type = synapse_row_sparse_type( + control_word, synapse_index_bits, synapse_type_mask); + + // Get weight + update_state_t update_state = synapse_structure_get_update_state( + *plastic_words++, synapse_type); + final_state_t final_state = synapse_structure_get_final_state( + update_state); + weight_t weight = synapse_structure_get_final_weight(final_state); + + log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); + synapses_print_weight( + weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); + log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", + synapse_row_sparse_delay(control_word, synapse_type_index_bits), + synapse_types_get_type_char(synapse_type), + synapse_row_sparse_index(control_word, synapse_index_mask), + SYNAPSE_DELAY_MASK, synapse_type_index_bits); + } +#endif // LOG_LEVEL >= LOG_DEBUG +} + +//--------------------------------------- +static inline index_t sparse_axonal_delay(uint32_t x) { +#if 1 + use(x); + return 0; +#else + return (x >> synapse_delay_index_type_bits) & SYNAPSE_AXONAL_DELAY_MASK; +#endif +} + +address_t synapse_dynamics_initialise( + address_t address, uint32_t n_neurons, uint32_t n_synapse_types, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + // Load timing dependence data + address_t weight_region_address = timing_initialise(address); + if (address == NULL) { + return NULL; + } + + // Load weight dependence data + address_t weight_result = weight_initialise( + weight_region_address, n_synapse_types, + ring_buffer_to_input_buffer_left_shifts); + if (weight_result == NULL) { + return NULL; + } + + post_event_history = post_events_init_buffers(n_neurons); + if (post_event_history == NULL) { + return NULL; + } + + uint32_t n_neurons_power_2 = n_neurons; + uint32_t log_n_neurons = 1; + if (n_neurons != 1) { + if (!is_power_of_2(n_neurons)) { + n_neurons_power_2 = next_power_of_2(n_neurons); + } + log_n_neurons = ilog_2(n_neurons_power_2); + } + + uint32_t n_synapse_types_power_2 = n_synapse_types; + if (!is_power_of_2(n_synapse_types)) { + n_synapse_types_power_2 = next_power_of_2(n_synapse_types); + } + uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); + + synapse_type_index_bits = log_n_neurons + log_n_synapse_types; + synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; + synapse_index_bits = log_n_neurons; + synapse_index_mask = (1 << synapse_index_bits) - 1; + synapse_delay_index_type_bits = + SYNAPSE_DELAY_BITS + synapse_type_index_bits; + synapse_type_mask = (1 << log_n_synapse_types) - 1; + + return weight_result; +} + +bool synapse_dynamics_process_plastic_synapses( + address_t plastic_region_address, address_t fixed_region_address, + weight_t *ring_buffers, uint32_t time) { + // Extract separate arrays of plastic synapses (from plastic region), + // Control words (from fixed region) and number of plastic synapses + plastic_synapse_t *plastic_words = + plastic_synapses(plastic_region_address); + const control_t *control_words = + synapse_row_plastic_controls(fixed_region_address); + size_t plastic_synapse = + synapse_row_num_plastic_controls(fixed_region_address); + + num_plastic_pre_synaptic_events += plastic_synapse; + + // Get event history from synaptic row + pre_event_history_t *event_history = + plastic_event_history(plastic_region_address); + + // Get last pre-synaptic event from event history + const uint32_t last_pre_time = event_history->prev_time; + const pre_trace_t last_pre_trace = event_history->prev_trace; + + // Update pre-synaptic trace + log_debug("Adding pre-synaptic event to trace at time:%u", time); + event_history->prev_time = time; + event_history->prev_trace = + timing_add_pre_spike(time, last_pre_time, last_pre_trace); + + // Loop through plastic synapses + for (; plastic_synapse > 0; plastic_synapse--) { + // Get next control word (auto incrementing) + uint32_t control_word = *control_words++; + + // Extract control-word components + // **NOTE** cunningly, control word is just the same as lower + // 16-bits of 32-bit fixed synapse so same functions can be used + uint32_t delay_axonal = sparse_axonal_delay(control_word); + uint32_t delay_dendritic = synapse_row_sparse_delay( + control_word, synapse_type_index_bits); + uint32_t type = synapse_row_sparse_type( + control_word, synapse_index_bits, synapse_type_mask); + uint32_t index = + synapse_row_sparse_index(control_word, synapse_index_mask); + uint32_t type_index = synapse_row_sparse_type_index( + control_word, synapse_type_index_mask); + + // Create update state from the plastic synaptic word + update_state_t current_state = + synapse_structure_get_update_state(*plastic_words, type); + + // Update the synapse state + final_state_t final_state = plasticity_update_synapse( + time, last_pre_time, last_pre_trace, event_history->prev_trace, + delay_dendritic, delay_axonal, current_state, + &post_event_history[index]); + + // Convert into ring buffer offset + uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + delay_axonal + delay_dendritic + time, type_index, + synapse_type_index_bits); + + // Add weight to ring-buffer entry + // **NOTE** Dave suspects that this could be a + // potential location for overflow + + uint32_t accumulation = ring_buffers[ring_buffer_index] + + synapse_structure_get_final_weight(final_state); + + uint32_t sat_test = accumulation & 0x10000; + if (sat_test) { + accumulation = sat_test - 1; + plastic_saturation_count++; + } + + ring_buffers[ring_buffer_index] = accumulation; + + // Write back updated synaptic word to plastic region + *plastic_words++ = + synapse_structure_get_final_synaptic_word(final_state); + } + return true; +} + +void synapse_dynamics_process_post_synaptic_event( + uint32_t time, index_t neuron_index) { + log_debug("Adding post-synaptic event to trace at time:%u", time); + + // Add post-event + post_event_history_t *history = &post_event_history[neuron_index]; + const uint32_t last_post_time = history->times[history->count_minus_one]; + const post_trace_t last_post_trace = + history->traces[history->count_minus_one]; + post_events_add(time, history, + timing_add_post_spike(time, last_post_time, last_post_trace)); +} + +input_t synapse_dynamics_get_intrinsic_bias( + uint32_t time, index_t neuron_index) { + use(time); + use(neuron_index); + return 0.0k; +} + +uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { + return num_plastic_pre_synaptic_events; +} + +uint32_t synapse_dynamics_get_plastic_saturation_count(void) { + return plastic_saturation_count; +} + +#if SYNGEN_ENABLED == 1 + +//! \brief Searches the synaptic row for the the connection with the +//! specified post-synaptic ID +//! \param[in] id: the (core-local) ID of the neuron to search for in the +//! synaptic row +//! \param[in] row: the core-local address of the synaptic row +//! \param[out] sp_data: the address of a struct through which to return +//! weight, delay information +//! \return bool: was the search successful? +bool find_plastic_neuron_with_id( + uint32_t id, address_t row, structural_plasticity_data_t *sp_data) { + address_t fixed_region = synapse_row_fixed_region(row); + address_t plastic_region_address = synapse_row_plastic_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(plastic_region_address); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + plastic_synapse_t weight; + uint32_t delay; + + // Loop through plastic synapses + for (; plastic_synapse > 0; plastic_synapse--) { + // Get next control word (auto incrementing) + weight = *plastic_words++; + uint32_t control_word = *control_words++; + + // Check if index is the one I'm looking for + delay = synapse_row_sparse_delay(control_word, synapse_type_index_bits); + if (synapse_row_sparse_index(control_word, synapse_index_mask) == id) { + sp_data->weight = weight; + sp_data->offset = + synapse_row_num_plastic_controls(fixed_region) + - plastic_synapse; + sp_data->delay = delay; + return true; + } + } + + sp_data->weight = -1; + sp_data->offset = -1; + sp_data->delay = -1; + return false; +} + +//! \brief Remove the entry at the specified offset in the synaptic row +//! \param[in] offset: the offset in the row at which to remove the entry +//! \param[in] row: the core-local address of the synaptic row +//! \return bool: was the removal successful? +bool remove_plastic_neuron_at_offset(uint32_t offset, address_t row) { + address_t fixed_region = synapse_row_fixed_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(synapse_row_plastic_region(row)); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + + // Delete weight at offset + plastic_words[offset] = plastic_words[plastic_synapse - 1]; + plastic_words[plastic_synapse - 1] = 0; + + // Delete control word at offset + control_words[offset] = control_words[plastic_synapse - 1]; + control_words[plastic_synapse - 1] = 0; + + // Decrement FP + fixed_region[1]--; + + return true; +} + +//! ensuring the weight is of the correct type and size +static inline plastic_synapse_t weight_conversion(uint32_t weight) { + return (plastic_synapse_t) (0xFFFF & weight); +} + +//! packing all of the information into the required plastic control word +static inline control_t control_conversion( + uint32_t id, uint32_t delay, uint32_t type) { + control_t new_control = + (delay & ((1 << SYNAPSE_DELAY_BITS) - 1)) << synapse_type_index_bits; + new_control |= (type & ((1 << synapse_type_index_bits) - 1)) << synapse_index_bits; + new_control |= id & ((1 << synapse_index_bits) - 1); + return new_control; +} + +//! \brief Add a plastic entry in the synaptic row +//! \param[in] id: the (core-local) ID of the post-synaptic neuron to be added +//! \param[in] row: the core-local address of the synaptic row +//! \param[in] weight: the initial weight associated with the connection +//! \param[in] delay: the delay associated with the connection +//! \param[in] type: the type of the connection (e.g. inhibitory) +//! \return bool: was the addition successful? +bool add_plastic_neuron_with_id(uint32_t id, address_t row, + uint32_t weight, uint32_t delay, uint32_t type) { + plastic_synapse_t new_weight = weight_conversion(weight); + control_t new_control = control_conversion(id, delay, type); + + address_t fixed_region = synapse_row_fixed_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(synapse_row_plastic_region(row)); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + + // Add weight at offset + plastic_words[plastic_synapse] = new_weight; + + // Add control word at offset + control_words[plastic_synapse] = new_control; + + // Increment FP + fixed_region[1]++; + return true; +} +#endif diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h new file mode 100644 index 00000000000..7e37a89aba3 --- /dev/null +++ b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/*! \file +* +* \brief implementation of synapse_types.h for a simple duel exponential decay +* to synapses. +* +* \details If we have combined excitatory_one/excitatory_two/inhibitory +* synapses it will be because both excitatory and inhibitory synaptic +* time-constants (and thus propogators) are identical. +*/ + +#ifndef _SYNAPSE_TYPES_DUAL_EXCITATORY_EXPONENTIAL_IMPL_H_ +#define _SYNAPSE_TYPES_DUAL_EXCITATORY_EXPONENTIAL_IMPL_H_ + + +//--------------------------------------- +// Macros +//--------------------------------------- +#define SYNAPSE_TYPE_BITS 2 +#define SYNAPSE_TYPE_COUNT 3 + +#define NUM_EXCITATORY_RECEPTORS 2 +#define NUM_INHIBITORY_RECEPTORS 1 + +#include +#include +#include "synapse_types.h" + + +//--------------------------------------- +// Synapse parameters +//--------------------------------------- +input_t excitatory_response[NUM_EXCITATORY_RECEPTORS]; +input_t inhibitory_response[NUM_INHIBITORY_RECEPTORS]; + +typedef struct exp_params_t { + decay_t decay; + decay_t init; + input_t synaptic_input_value; +} exp_params_t; + +typedef struct synapse_param_t { + exp_params_t exc; + exp_params_t exc2; + exp_params_t inh; +} synapse_param_t; + + +//! human readable definition for the positions in the input regions for the +//! different synapse types. +typedef enum input_buffer_regions { + EXCITATORY_ONE, EXCITATORY_TWO, INHIBITORY, +} input_buffer_regions; + +//--------------------------------------- +// Synapse shaping inline implementation +//--------------------------------------- + +//! \brief decays the stuff thats sitting in the input buffers +//! (to compensate for the valve behaviour of a synapse +//! in biology (spike goes in, synapse opens, then closes slowly) plus the +//! leaky aspect of a neuron). as these have not yet been processed and applied +//! to the neuron. +//! \param[in] parameter: the pointer to the parameters to use +//! \return nothing +static inline void exp_shaping(exp_params_t* exp_params) { + // decay value according to decay constant + exp_params->synaptic_input_value = + decay_s1615(exp_params->synaptic_input_value, + exp_params->decay); +} + +static inline void synapse_types_shape_input( + synapse_param_pointer_t parameter) { + exp_shaping(¶meter->exc); + exp_shaping(¶meter->exc2); + exp_shaping(¶meter->inh); +} + +//! \brief helper function to add input for a given timer period to a given +//! neuron +//! \param[in] parameter: the pointer to the parameters to use +//! \param[in] input the inputs to add. +//! \return None +static inline void add_input_exp(exp_params_t* exp_params, input_t input) { + exp_params->synaptic_input_value = exp_params->synaptic_input_value + + decay_s1615(input, exp_params->init); +} + +//! \brief adds the inputs for a give timer period to a given neuron that is +//! being simulated by this model +//! \param[in] synapse_type_index the type of input that this input is to be +//! considered (aka excitatory or inhibitory etc) +//! \param[in] parameter: the pointer to the parameters to use +//! \param[in] input the inputs for that given synapse_type. +//! \return None +static inline void synapse_types_add_neuron_input( + index_t synapse_type_index, synapse_param_pointer_t parameter, + input_t input) { + if (synapse_type_index == EXCITATORY_ONE) { + add_input_exp(¶meter->exc, input); + } else if (synapse_type_index == EXCITATORY_TWO) { + add_input_exp(¶meter->exc2, input); + } else if (synapse_type_index == INHIBITORY) { + add_input_exp(¶meter->inh, input); + } +} + +//! \brief extracts the excitatory input buffers from the buffers available +//! for a given parameter set +//! \param[in] parameter: the pointer to the parameters to use +//! \return the excitatory input buffers for a given neuron ID. +static inline input_t* synapse_types_get_excitatory_input( + synapse_param_pointer_t parameter) { + excitatory_response[0] = parameter->exc.synaptic_input_value; + excitatory_response[1] = parameter->exc2.synaptic_input_value; + return &excitatory_response[0]; +} + +//! \brief extracts the inhibitory input buffers from the buffers available +//! for a given parameter set +//! \param[in] parameter: the pointer to the parameters to use +//! \return the inhibitory input buffers for a given neuron ID. +static inline input_t* synapse_types_get_inhibitory_input( + synapse_param_pointer_t parameter) { + inhibitory_response[0] = parameter->inh.synaptic_input_value; + return &inhibitory_response[0]; +} + +//! \brief returns a human readable character for the type of synapse. +//! examples would be X = excitatory types, I = inhibitory types etc etc. +//! \param[in] synapse_type_index the synapse type index +//! (there is a specific index interpretation in each synapse type) +//! \return a human readable character representing the synapse type. +static inline const char *synapse_types_get_type_char( + index_t synapse_type_index) { + if (synapse_type_index == EXCITATORY_ONE) { + return "X1"; + } else if (synapse_type_index == EXCITATORY_TWO) { + return "X2"; + } else if (synapse_type_index == INHIBITORY) { + return "I"; + } else { + log_debug("did not recognise synapse type %i", synapse_type_index); + return "?"; + } +} + +//! \brief prints the input for a neuron ID given the available inputs +//! currently only executed when the models are in debug mode, as the prints are +//! controlled from the synapses.c print_inputs method. +//! \param[in] parameter: the pointer to the parameters to use +//! \return Nothing +static inline void synapse_types_print_input( + synapse_param_pointer_t parameter) { + io_printf(IO_BUF, "%12.6k + %12.6k - %12.6k", + parameter->exc.synaptic_input_value, + parameter->exc2.synaptic_input_value, + parameter->inh.synaptic_input_value); +} + +//! \brief printer call +//! \param[in] parameter: the pointer to the parameters to print +static inline void synapse_types_print_parameters( + synapse_param_pointer_t parameter) { + log_info("exc_decay = %11.4k\n", parameter->exc.decay); + log_info("exc_init = %11.4k\n", parameter->exc.init); + log_info("exc2_decay = %11.4k\n", parameter->exc2.decay); + log_info("exc2_init = %11.4k\n", parameter->exc2.init); + log_info("inh_decay = %11.4k\n", parameter->inh.decay); + log_info("inh_init = %11.4k\n", parameter->inh.init); + log_info("gsyn_excitatory_initial_value = %11.4k\n", + parameter->exc.synaptic_input_value); + log_info("gsyn_excitatory2_initial_value = %11.4k\n", + parameter->exc2.synaptic_input_value); + log_info("gsyn_inhibitory_initial_value = %11.4k\n", + parameter->inh.synaptic_input_value); +} + +#endif // _SYNAPSE_TYPES_DUAL_EXCITATORY_EXPONENTIAL_IMPL_H_ diff --git a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h new file mode 100644 index 00000000000..1e022d7b07b --- /dev/null +++ b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _THRESHOLD_TYPE_STATIC_H_ +#define _THRESHOLD_TYPE_STATIC_H_ + +#include "threshold_type.h" + +typedef struct threshold_type_t { + // The value of the static threshold + REAL threshold_value; +} threshold_type_t; + +static inline bool threshold_type_is_above_threshold( + state_t value, threshold_type_pointer_t threshold_type) { + return REAL_COMPARE(value, >=, threshold_type->threshold_value); +} + +#endif // _THRESHOLD_TYPE_STATIC_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index feffbc1be10..fbfa1b42569 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -32,4 +32,4 @@ "IFCondExpBase", "IFCurrAlpha", "IFCurrDualExpBase", "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", - "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", ] + "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", "EPropAdaptive"] diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py new file mode 100644 index 00000000000..7e450d56437 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -0,0 +1,46 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard +from spynnaker.pyNN.models.defaults import default_initial_values +from spynnaker.pyNN.models.neuron.neuron_models import ( + NeuronModelEProp) +from spynnaker.pyNN.models.neuron.synapse_types import ( + SynapseTypeEProp) +from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent +from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeAdaptive + +class EPropAdaptive(AbstractPyNNNeuronModelStandard): + """ Adaptive threshold neuron with eprop support + """ + + @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh"}) + def __init__( + self, tau_m=20.0, cm=1.0, v_rest=-65.0, v_reset=-65.0, + v_thresh=-50.0, tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, + tau_refrac=0.1, i_offset=0.0, v=-65.0, isyn_exc=0.0, isyn_inh=0.0, + isyn_exc2=0.0): + # pylint: disable=too-many-arguments, too-many-locals + neuron_model = NeuronModelEProp( + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac) + synapse_type = SynapseTypeEProp( + tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, isyn_inh) + input_type = InputTypeCurrent() + threshold_type = ThresholdTypeAdaptive(v_thresh) + + super(IFCurrDualExpBase, self).__init__( + model_name="eprop_adaptive", binary="eprop_adaptive.aplx", + neuron_model=neuron_model, input_type=input_type, + synapse_type=synapse_type, threshold_type=threshold_type) diff --git a/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py b/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py index 25bc29ec397..cde23c00e82 100644 --- a/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py +++ b/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py @@ -38,9 +38,9 @@ def __init__( # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeakyIntegrateAndFire( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac) - synapse_type = SynapseTypeDualExponential( + synapse_type = SynapseTypeDualExponential( tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, isyn_inh) - input_type = InputTypeCurrent() +input_type = InputTypeCurrent() threshold_type = ThresholdTypeStatic(v_thresh) super(IFCurrDualExpBase, self).__init__( diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 2e5c36952d1..1b6e76428e7 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -19,4 +19,4 @@ NeuronModelLeakyIntegrateAndFire) __all__ = ["AbstractNeuronModel", "NeuronModelIzh", - "NeuronModelLeakyIntegrateAndFire"] + "NeuronModelLeakyIntegrateAndFire", "NeuronModelEProp"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py new file mode 100644 index 00000000000..ca1d9e61084 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py @@ -0,0 +1,180 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import numpy +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from pacman.executor.injection_decorator import inject_items +from .abstract_neuron_model import AbstractNeuronModel + +V = "v" +V_REST = "v_rest" +TAU_M = "tau_m" +CM = "cm" +I_OFFSET = "i_offset" +V_RESET = "v_reset" +TAU_REFRAC = "tau_refrac" +COUNT_REFRAC = "count_refrac" + +UNITS = { + V: 'mV', + V_REST: 'mV', + TAU_M: 'ms', + CM: 'nF', + I_OFFSET: 'nA', + V_RESET: 'mV', + TAU_REFRAC: 'ms' +} + + +class NeuronModelEProp(AbstractNeuronModel): + __slots__ = [ + "__v_init", + "__v_rest", + "__tau_m", + "__cm", + "__i_offset", + "__v_reset", + "__tau_refrac"] + + def __init__( + self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac): + super(NeuronModelEProp, self).__init__( + [DataType.S1615, # v + DataType.S1615, # v_rest + DataType.S1615, # r_membrane (= tau_m / cm) + DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.S1615, # i_offset + DataType.INT32, # count_refrac + DataType.S1615, # v_reset + DataType.INT32]) # tau_refrac + + if v_init is None: + v_init = v_rest + self.__v_init = v_init + self.__v_rest = v_rest + self.__tau_m = tau_m + self.__cm = cm + self.__i_offset = i_offset + self.__v_reset = v_reset + self.__tau_refrac = tau_refrac + + @overrides(AbstractNeuronModel.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # A bit of a guess + return 100 * n_neurons + + @overrides(AbstractNeuronModel.add_parameters) + def add_parameters(self, parameters): + parameters[V_REST] = self.__v_rest + parameters[TAU_M] = self.__tau_m + parameters[CM] = self.__cm + parameters[I_OFFSET] = self.__i_offset + parameters[V_RESET] = self.__v_reset + parameters[TAU_REFRAC] = self.__tau_refrac + + @overrides(AbstractNeuronModel.add_state_variables) + def add_state_variables(self, state_variables): + state_variables[V] = self.__v_init + state_variables[COUNT_REFRAC] = 0 + + @overrides(AbstractNeuronModel.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractNeuronModel.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + def get_values(self, parameters, state_variables, vertex_slice, ts): + + # Add the rest of the data + return [state_variables[V], parameters[V_REST], + parameters[TAU_M] / parameters[CM], + parameters[TAU_M].apply_operation( + operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), + parameters[I_OFFSET], state_variables[COUNT_REFRAC], + parameters[V_RESET], + parameters[TAU_REFRAC].apply_operation( + operation=lambda x: int(numpy.ceil(x / (ts / 1000.0))))] + + @overrides(AbstractNeuronModel.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, + _v_reset, _tau_refrac) = values + + # Copy the changed data only + state_variables[V] = v + state_variables[COUNT_REFRAC] = count_refrac + + @property + def v_init(self): + return self.__v_init + + @v_init.setter + def v_init(self, v_init): + self.__v_init = v_init + + @property + def v_rest(self): + return self.__v_rest + + @v_rest.setter + def v_rest(self, v_rest): + self.__v_rest = v_rest + + @property + def tau_m(self): + return self.__tau_m + + @tau_m.setter + def tau_m(self, tau_m): + self.__tau_m = tau_m + + @property + def cm(self): + return self.__cm + + @cm.setter + def cm(self, cm): + self.__cm = cm + + @property + def i_offset(self): + return self.__i_offset + + @i_offset.setter + def i_offset(self, i_offset): + self.__i_offset = i_offset + + @property + def v_reset(self): + return self.__v_reset + + @v_reset.setter + def v_reset(self, v_reset): + self.__v_reset = v_reset + + @property + def tau_refrac(self): + return self.__tau_refrac + + @tau_refrac.setter + def tau_refrac(self, tau_refrac): + self.__tau_refrac = tau_refrac diff --git a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py index ded06684d3a..c261a6bf9b4 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py @@ -20,4 +20,5 @@ from .synapse_type_alpha import SynapseTypeAlpha __all__ = ["AbstractSynapseType", "SynapseTypeDualExponential", - "SynapseTypeExponential", "SynapseTypeDelta", "SynapseTypeAlpha"] + "SynapseTypeExponential", "SynapseTypeDelta", "SynapseTypeAlpha", + "SynapseTypeEProp"] diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py new file mode 100644 index 00000000000..a863b7d33e2 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py @@ -0,0 +1,186 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import numpy +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from pacman.executor.injection_decorator import inject_items +from .abstract_synapse_type import AbstractSynapseType + +TAU_SYN_E = 'tau_syn_E' +TAU_SYN_E2 = 'tau_syn_E2' +TAU_SYN_I = 'tau_syn_I' +ISYN_EXC = "isyn_exc" +ISYN_EXC2 = "isyn_exc2" +ISYN_INH = "isyn_inh" + +UNITS = { + TAU_SYN_E: "mV", + TAU_SYN_E2: "mV", + TAU_SYN_I: 'mV', + ISYN_EXC: "", + ISYN_EXC2: "", + ISYN_INH: "", +} + + +class SynapseTypeEProp(AbstractSynapseType): + __slots__ = [ + "__tau_syn_E", + "__tau_syn_E2", + "__tau_syn_I", + "__isyn_exc", + "__isyn_exc2", + "__isyn_inh"] + + def __init__( + self, tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, + isyn_inh): + super(SynapseTypeEProp, self).__init__( + [DataType.U032, # decay_E + DataType.U032, # init_E + DataType.S1615, # isyn_exc + DataType.U032, # decay_E2 + DataType.U032, # init_E2 + DataType.S1615, # isyn_exc2 + DataType.U032, # decay_I + DataType.U032, # init_I + DataType.S1615]) # isyn_inh + self.__tau_syn_E = tau_syn_E + self.__tau_syn_E2 = tau_syn_E2 + self.__tau_syn_I = tau_syn_I + self.__isyn_exc = isyn_exc + self.__isyn_exc2 = isyn_exc2 + self.__isyn_inh = isyn_inh + + @overrides(AbstractSynapseType.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + return 100 * n_neurons + + @overrides(AbstractSynapseType.add_parameters) + def add_parameters(self, parameters): + parameters[TAU_SYN_E] = self.__tau_syn_E + parameters[TAU_SYN_E2] = self.__tau_syn_E2 + parameters[TAU_SYN_I] = self.__tau_syn_I + + @overrides(AbstractSynapseType.add_state_variables) + def add_state_variables(self, state_variables): + state_variables[ISYN_EXC] = self.__isyn_exc + state_variables[ISYN_EXC2] = self.__isyn_exc2 + state_variables[ISYN_INH] = self.__isyn_inh + + @overrides(AbstractSynapseType.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractSynapseType.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractSynapseType.get_values, additional_arguments={'ts'}) + def get_values(self, parameters, state_variables, vertex_slice, ts): + + tsfloat = float(ts) / 1000.0 + decay = lambda x: numpy.exp(-tsfloat / x) # noqa E731 + init = lambda x: (x / tsfloat) * (1.0 - numpy.exp(-tsfloat / x)) # noqa E731 + + # Add the rest of the data + return [parameters[TAU_SYN_E].apply_operation(decay), + parameters[TAU_SYN_E].apply_operation(init), + state_variables[ISYN_EXC], + parameters[TAU_SYN_E2].apply_operation(decay), + parameters[TAU_SYN_E2].apply_operation(init), + state_variables[ISYN_EXC2], + parameters[TAU_SYN_I].apply_operation(decay), + parameters[TAU_SYN_I].apply_operation(init), + state_variables[ISYN_INH]] + + @overrides(AbstractSynapseType.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (_decay_E, _init_E, isyn_exc, _decay_E2, _init_E2, isyn_exc2, + _decay_I, _init_I, isyn_inh) = values + + state_variables[ISYN_EXC] = isyn_exc + state_variables[ISYN_EXC2] = isyn_exc2 + state_variables[ISYN_INH] = isyn_inh + + @overrides(AbstractSynapseType.get_n_synapse_types) + def get_n_synapse_types(self): + return 3 + + @overrides(AbstractSynapseType.get_synapse_id_by_target) + def get_synapse_id_by_target(self, target): + if target == "excitatory": + return 0 + elif target == "excitatory2": + return 1 + elif target == "inhibitory": + return 2 + return None + + @overrides(AbstractSynapseType.get_synapse_targets) + def get_synapse_targets(self): + return "excitatory", "excitatory2", "inhibitory" + + @property + def tau_syn_E(self): + return self.__tau_syn_E + + @tau_syn_E.setter + def tau_syn_E(self, tau_syn_E): + self.__tau_syn_E = tau_syn_E + + @property + def tau_syn_E2(self): + return self.__tau_syn_E2 + + @tau_syn_E2.setter + def tau_syn_E2(self, tau_syn_E2): + self.__tau_syn_E2 = tau_syn_E2 + + @property + def tau_syn_I(self): + return self.__tau_syn_I + + @tau_syn_I.setter + def tau_syn_I(self, tau_syn_I): + self.__tau_syn_I = tau_syn_I + + @property + def isyn_exc(self): + return self.__isyn_exc + + @isyn_exc.setter + def isyn_exc(self, isyn_exc): + self.__isyn_exc = isyn_exc + + @property + def isyn_inh(self): + return self.__isyn_inh + + @isyn_inh.setter + def isyn_inh(self, isyn_inh): + self.__isyn_inh = isyn_inh + + @property + def isyn_exc2(self): + return self.__isyn_exc2 + + @isyn_exc2.setter + def isyn_exc2(self, isyn_exc2): + self.__isyn_exc2 = isyn_exc2 diff --git a/spynnaker/pyNN/models/neuron/threshold_types/__init__.py b/spynnaker/pyNN/models/neuron/threshold_types/__init__.py index d419dd779c2..2c4671bc997 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/__init__.py @@ -18,4 +18,4 @@ from .threshold_type_maass_stochastic import ThresholdTypeMaassStochastic __all__ = ["AbstractThresholdType", "ThresholdTypeStatic", - "ThresholdTypeMaassStochastic"] + "ThresholdTypeMaassStochastic", "ThresholdTypeEProp"] diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py new file mode 100644 index 00000000000..2c754a4f341 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -0,0 +1,74 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from .abstract_threshold_type import AbstractThresholdType + +V_THRESH = "v_thresh" + +UNITS = {V_THRESH: "mV"} + + +class ThresholdTypeStatic(AbstractThresholdType): + """ A threshold that is a static value + """ + __slots__ = ["__v_thresh"] + + def __init__(self, v_thresh): + super(ThresholdTypeStatic, self).__init__([ + DataType.S1615]) # v_thresh + self.__v_thresh = v_thresh + + @overrides(AbstractThresholdType.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # Just a comparison, but 2 just in case! + return 2 * n_neurons + + @overrides(AbstractThresholdType.add_parameters) + def add_parameters(self, parameters): + parameters[V_THRESH] = self.__v_thresh + + @overrides(AbstractThresholdType.add_state_variables) + def add_state_variables(self, state_variables): + pass + + @overrides(AbstractThresholdType.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractThresholdType.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @overrides(AbstractThresholdType.get_values) + def get_values(self, parameters, state_variables, vertex_slice): + + # Add the rest of the data + return [parameters[V_THRESH]] + + @overrides(AbstractThresholdType.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (_v_thresh,) = values + + @property + def v_thresh(self): + return self.__v_thresh + + @v_thresh.setter + def v_thresh(self, v_thresh): + self.__v_thresh = v_thresh diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py new file mode 100644 index 00000000000..b457c1103f7 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py @@ -0,0 +1,74 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from .abstract_threshold_type import AbstractThresholdType + +V_THRESH = "v_thresh" + +UNITS = {V_THRESH: "mV"} + + +class ThresholdTypeEProp(AbstractThresholdType): + """ A threshold that is a static value + """ + __slots__ = ["__v_thresh"] + + def __init__(self, v_thresh): + super(ThresholdTypeStatic, self).__init__([ + DataType.S1615]) # v_thresh + self.__v_thresh = v_thresh + + @overrides(AbstractThresholdType.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # Just a comparison, but 2 just in case! + return 2 * n_neurons + + @overrides(AbstractThresholdType.add_parameters) + def add_parameters(self, parameters): + parameters[V_THRESH] = self.__v_thresh + + @overrides(AbstractThresholdType.add_state_variables) + def add_state_variables(self, state_variables): + pass + + @overrides(AbstractThresholdType.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractThresholdType.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @overrides(AbstractThresholdType.get_values) + def get_values(self, parameters, state_variables, vertex_slice): + + # Add the rest of the data + return [parameters[V_THRESH]] + + @overrides(AbstractThresholdType.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (_v_thresh,) = values + + @property + def v_thresh(self): + return self.__v_thresh + + @v_thresh.setter + def v_thresh(self, v_thresh): + self.__v_thresh = v_thresh From 8d6f4855b483f26815c5d3f690afc4f62664aa6f Mon Sep 17 00:00:00 2001 From: mbassor2 Date: Thu, 19 Sep 2019 15:02:22 +0100 Subject: [PATCH 002/123] update to skeleton implementation --- .../models/neuron_model_eprop_adaptive_impl.h | 9 ++- .../threshold_types/threshold_type_adaptive.h | 4 +- .../pyNN/models/neuron/builds/__init__.py | 1 + .../models/neuron/builds/eprop_adaptive.py | 8 +-- .../neuron/builds/if_curr_dual_exp_base.py | 4 +- .../models/neuron/neuron_models/__init__.py | 1 + .../neuron_models/neuron_model_eprop.py | 70 ++++++++++++++++--- .../models/neuron/synapse_types/__init__.py | 1 + .../models/neuron/threshold_types/__init__.py | 3 +- .../threshold_type_adaptive.py | 4 +- 10 files changed, 82 insertions(+), 23 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 6abc2c3eab3..6e7036ee5df 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -20,12 +20,14 @@ #include "neuron_model.h" +#define SYNAPSES_PER_NEURON 250 + typedef struct eprop_syn_state_t { + uint16_t delta_w; // weight change to apply uint16_t z_bar; // low-pass filtered spike train uint32_t ep_a; // adaptive component of eligibility vector uint32_t e_bar; // low-pass filtered eligibility trace - uint16_t delta_w; // weight change to apply }eprop_syn_state_t; ///////////////////////////////////////////////////////////// @@ -57,8 +59,11 @@ typedef struct neuron_t { // refractory time of neuron [timesteps] int32_t T_refract; + // pseudo derivative + REAL psi; + // array of synaptic states - peak fan-in of 250 for this case - eprop_syn_state_t syn_state[250]; + eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; } neuron_t; diff --git a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h index 1e022d7b07b..b3e8ff83634 100644 --- a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h +++ b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h @@ -20,7 +20,7 @@ #include "threshold_type.h" -typedef struct threshold_type_t { +typedef struct threshold_type_adaptive_t { // The value of the static threshold REAL threshold_value; } threshold_type_t; @@ -30,4 +30,4 @@ static inline bool threshold_type_is_above_threshold( return REAL_COMPARE(value, >=, threshold_type->threshold_value); } -#endif // _THRESHOLD_TYPE_STATIC_H_ +#endif // _THRESHOLD_TYPE_ADAPTIVE_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index fbfa1b42569..6e8004ef8ad 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -27,6 +27,7 @@ from .if_curr_delta import IFCurrDelta from .if_curr_exp_ca2_adaptive import IFCurrExpCa2Adaptive from .if_curr_exp_semd_base import IFCurrExpSEMDBase +from .eprop_adaptive import EPropAdaptive __all__ = ["EIFConductanceAlphaPopulation", "HHCondExp", "IFCondAlpha", "IFCondExpBase", "IFCurrAlpha", "IFCurrDualExpBase", diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 7e450d56437..fae5f477e76 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -26,21 +26,21 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ Adaptive threshold neuron with eprop support """ - @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh"}) + @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", "psi"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=-65.0, v_reset=-65.0, v_thresh=-50.0, tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_refrac=0.1, i_offset=0.0, v=-65.0, isyn_exc=0.0, isyn_inh=0.0, - isyn_exc2=0.0): + isyn_exc2=0.0, psi=0.0): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelEProp( - v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac) + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi) synapse_type = SynapseTypeEProp( tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, isyn_inh) input_type = InputTypeCurrent() threshold_type = ThresholdTypeAdaptive(v_thresh) - super(IFCurrDualExpBase, self).__init__( + super(EPropAdaptive, self).__init__( model_name="eprop_adaptive", binary="eprop_adaptive.aplx", neuron_model=neuron_model, input_type=input_type, synapse_type=synapse_type, threshold_type=threshold_type) diff --git a/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py b/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py index cde23c00e82..25bc29ec397 100644 --- a/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py +++ b/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp_base.py @@ -38,9 +38,9 @@ def __init__( # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeakyIntegrateAndFire( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac) - synapse_type = SynapseTypeDualExponential( + synapse_type = SynapseTypeDualExponential( tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, isyn_inh) -input_type = InputTypeCurrent() + input_type = InputTypeCurrent() threshold_type = ThresholdTypeStatic(v_thresh) super(IFCurrDualExpBase, self).__init__( diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 1b6e76428e7..ca6c796f13e 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -17,6 +17,7 @@ from .neuron_model_izh import NeuronModelIzh from .neuron_model_leaky_integrate_and_fire import ( NeuronModelLeakyIntegrateAndFire) +from .neuron_model_eprop import NeuronModelEProp __all__ = ["AbstractNeuronModel", "NeuronModelIzh", "NeuronModelLeakyIntegrateAndFire", "NeuronModelEProp"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py index ca1d9e61084..23654fee15f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py @@ -19,6 +19,11 @@ from pacman.executor.injection_decorator import inject_items from .abstract_neuron_model import AbstractNeuronModel +# constants +SYNAPSES_PER_NEURON = 250 + + + V = "v" V_REST = "v_rest" TAU_M = "tau_m" @@ -27,6 +32,7 @@ V_RESET = "v_reset" TAU_REFRAC = "tau_refrac" COUNT_REFRAC = "count_refrac" +PSI = "psi" UNITS = { V: 'mV', @@ -35,7 +41,8 @@ CM: 'nF', I_OFFSET: 'nA', V_RESET: 'mV', - TAU_REFRAC: 'ms' + TAU_REFRAC: 'ms', + PSI: 'N/A' } @@ -47,19 +54,44 @@ class NeuronModelEProp(AbstractNeuronModel): "__cm", "__i_offset", "__v_reset", - "__tau_refrac"] + "__tau_refrac", + "__psi"] def __init__( - self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac): - super(NeuronModelEProp, self).__init__( - [DataType.S1615, # v + self, + v_init, + v_rest, + tau_m, + cm, + i_offset, + v_reset, + tau_refrac, + psi + ): + + + datatype_list = [DataType.S1615, # v DataType.S1615, # v_rest DataType.S1615, # r_membrane (= tau_m / cm) DataType.S1615, # exp_tc (= e^(-ts / tau_m)) DataType.S1615, # i_offset DataType.INT32, # count_refrac DataType.S1615, # v_reset - DataType.INT32]) # tau_refrac + DataType.INT32, # tau_refrac + DataType.S1615 # psi, pseuo_derivative + ] + + # Synapse states - always initialise to zero + eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) + DataType.INT16, # delta_w + DataType.INT16, # z_bar + DataType.INT32, # ep_a + DataType.INT32, # e_bar + ] + # Extend to include fan-in for each neuron + datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) + + super(NeuronModelEProp, self).__init__(datatype_list) if v_init is None: v_init = v_rest @@ -70,6 +102,8 @@ def __init__( self.__i_offset = i_offset self.__v_reset = v_reset self.__tau_refrac = tau_refrac + self.__psi = psi # calculate from v and v_thresh (but will probably end up zero) + @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -89,6 +123,7 @@ def add_parameters(self, parameters): def add_state_variables(self, state_variables): state_variables[V] = self.__v_init state_variables[COUNT_REFRAC] = 0 + state_variables[PSI] = self.__psi @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -103,25 +138,40 @@ def has_variable(self, variable): def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data - return [state_variables[V], parameters[V_REST], + values = [state_variables[V], + parameters[V_REST], parameters[TAU_M] / parameters[CM], parameters[TAU_M].apply_operation( operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - parameters[I_OFFSET], state_variables[COUNT_REFRAC], + parameters[I_OFFSET], + state_variables[COUNT_REFRAC], parameters[V_RESET], parameters[TAU_REFRAC].apply_operation( - operation=lambda x: int(numpy.ceil(x / (ts / 1000.0))))] + operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + state_variables[PSI] + ] + + # create synaptic state - init to zero + eprop_syn_init = [0, + 0, + 0, + 0] + # extend to appropriate fan-in + values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + + return values @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac) = values + _v_reset, _tau_refrac, psi) = values # Copy the changed data only state_variables[V] = v state_variables[COUNT_REFRAC] = count_refrac + state_vairables[PSI] = psi @property def v_init(self): diff --git a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py index c261a6bf9b4..67060f057f1 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py @@ -18,6 +18,7 @@ from .synapse_type_exponential import SynapseTypeExponential from .synapse_type_delta import SynapseTypeDelta from .synapse_type_alpha import SynapseTypeAlpha +from .synapse_type_eprop import SynapseTypeEProp __all__ = ["AbstractSynapseType", "SynapseTypeDualExponential", "SynapseTypeExponential", "SynapseTypeDelta", "SynapseTypeAlpha", diff --git a/spynnaker/pyNN/models/neuron/threshold_types/__init__.py b/spynnaker/pyNN/models/neuron/threshold_types/__init__.py index 2c4671bc997..7ce908de545 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/__init__.py @@ -16,6 +16,7 @@ from .abstract_threshold_type import AbstractThresholdType from .threshold_type_static import ThresholdTypeStatic from .threshold_type_maass_stochastic import ThresholdTypeMaassStochastic +from .threshold_type_adaptive import ThresholdTypeAdaptive __all__ = ["AbstractThresholdType", "ThresholdTypeStatic", - "ThresholdTypeMaassStochastic", "ThresholdTypeEProp"] + "ThresholdTypeMaassStochastic", "ThresholdTypeAdaptive"] diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index 2c754a4f341..54817660818 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -22,13 +22,13 @@ UNITS = {V_THRESH: "mV"} -class ThresholdTypeStatic(AbstractThresholdType): +class ThresholdTypeAdaptive(AbstractThresholdType): """ A threshold that is a static value """ __slots__ = ["__v_thresh"] def __init__(self, v_thresh): - super(ThresholdTypeStatic, self).__init__([ + super(ThresholdTypeAdaptive, self).__init__([ DataType.S1615]) # v_thresh self.__v_thresh = v_thresh From e04b1c49061b89b9bc0cfa8de2f946f74b7fc564 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Thu, 19 Sep 2019 19:57:37 +0100 Subject: [PATCH 003/123] Add eprop_adaptive synapse type --- .../neuron_impl_eprop_adaptive.h | 2 +- .../synapse_type_eprop_adaptive.h | 158 +++++++++--------- .../threshold_types/threshold_type_adaptive.h | 8 +- .../models/neuron/builds/eprop_adaptive.py | 18 +- .../models/neuron/synapse_types/__init__.py | 4 +- ...prop.py => synapse_type_eprop_adaptive.py} | 144 +++++++++------- 6 files changed, 184 insertions(+), 150 deletions(-) rename spynnaker/pyNN/models/neuron/synapse_types/{synapse_type_eprop.py => synapse_type_eprop_adaptive.py} (58%) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index b020965d073..e9ecab14496 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -22,10 +22,10 @@ // Includes for model parts used in this implementation #include +#include #include #include #include -#include // Further includes diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h index 7e37a89aba3..f7d3647c80d 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h @@ -1,20 +1,3 @@ -/* - * Copyright (c) 2017-2019 The University of Manchester - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - /*! \file * * \brief implementation of synapse_types.h for a simple duel exponential decay @@ -25,18 +8,18 @@ * time-constants (and thus propogators) are identical. */ -#ifndef _SYNAPSE_TYPES_DUAL_EXCITATORY_EXPONENTIAL_IMPL_H_ -#define _SYNAPSE_TYPES_DUAL_EXCITATORY_EXPONENTIAL_IMPL_H_ +#ifndef _SYNAPSE_TYPES_EPROP_ADPATIVE_IMPL_H_ +#define _SYNAPSE_TYPES_EPROP_ADAPTIVE_IMPL_H_ //--------------------------------------- // Macros //--------------------------------------- #define SYNAPSE_TYPE_BITS 2 -#define SYNAPSE_TYPE_COUNT 3 +#define SYNAPSE_TYPE_COUNT 4 #define NUM_EXCITATORY_RECEPTORS 2 -#define NUM_INHIBITORY_RECEPTORS 1 +#define NUM_INHIBITORY_RECEPTORS 2 #include #include @@ -49,23 +32,25 @@ input_t excitatory_response[NUM_EXCITATORY_RECEPTORS]; input_t inhibitory_response[NUM_INHIBITORY_RECEPTORS]; -typedef struct exp_params_t { - decay_t decay; - decay_t init; - input_t synaptic_input_value; -} exp_params_t; - typedef struct synapse_param_t { - exp_params_t exc; - exp_params_t exc2; - exp_params_t inh; + decay_t exc_decay; + decay_t exc_init; + decay_t exc2_decay; + decay_t exc2_init; + decay_t inh_decay; + decay_t inh_init; + decay_t inh2_decay; + decay_t inh2_init; + input_t input_buffer_excitatory_value; + input_t input_buffer_excitatory2_value; + input_t input_buffer_inhibitory_value; + input_t input_buffer_inhibitory2_value; } synapse_param_t; - //! human readable definition for the positions in the input regions for the //! different synapse types. typedef enum input_buffer_regions { - EXCITATORY_ONE, EXCITATORY_TWO, INHIBITORY, + EXCITATORY_ONE, EXCITATORY_TWO, INHIBITORY_ONE, INHIBITORY_TWO } input_buffer_regions; //--------------------------------------- @@ -79,28 +64,21 @@ typedef enum input_buffer_regions { //! to the neuron. //! \param[in] parameter: the pointer to the parameters to use //! \return nothing -static inline void exp_shaping(exp_params_t* exp_params) { - // decay value according to decay constant - exp_params->synaptic_input_value = - decay_s1615(exp_params->synaptic_input_value, - exp_params->decay); -} - static inline void synapse_types_shape_input( synapse_param_pointer_t parameter) { - exp_shaping(¶meter->exc); - exp_shaping(¶meter->exc2); - exp_shaping(¶meter->inh); -} -//! \brief helper function to add input for a given timer period to a given -//! neuron -//! \param[in] parameter: the pointer to the parameters to use -//! \param[in] input the inputs to add. -//! \return None -static inline void add_input_exp(exp_params_t* exp_params, input_t input) { - exp_params->synaptic_input_value = exp_params->synaptic_input_value + - decay_s1615(input, exp_params->init); + parameter->input_buffer_excitatory_value = decay_s1615( + parameter->input_buffer_excitatory_value, + parameter->exc_decay); + parameter->input_buffer_excitatory2_value = decay_s1615( + parameter->input_buffer_excitatory2_value, + parameter->exc2_decay); + parameter->input_buffer_inhibitory_value = decay_s1615( + parameter->input_buffer_inhibitory_value, + parameter->inh_decay); + parameter->input_buffer_inhibitory2_value = decay_s1615( + parameter->input_buffer_inhibitory2_value, + parameter->inh2_decay); } //! \brief adds the inputs for a give timer period to a given neuron that is @@ -114,11 +92,24 @@ static inline void synapse_types_add_neuron_input( index_t synapse_type_index, synapse_param_pointer_t parameter, input_t input) { if (synapse_type_index == EXCITATORY_ONE) { - add_input_exp(¶meter->exc, input); + parameter->input_buffer_excitatory_value = + parameter->input_buffer_excitatory_value + + decay_s1615(input, parameter->exc_init); + } else if (synapse_type_index == EXCITATORY_TWO) { - add_input_exp(¶meter->exc2, input); - } else if (synapse_type_index == INHIBITORY) { - add_input_exp(¶meter->inh, input); + parameter->input_buffer_excitatory2_value = + parameter->input_buffer_excitatory2_value + + decay_s1615(input, parameter->exc2_init); + + } else if (synapse_type_index == INHIBITORY_ONE) { + parameter->input_buffer_inhibitory_value = + parameter->input_buffer_inhibitory_value + + decay_s1615(input, parameter->inh_init); + + } else if (synapse_type_index == INHIBITORY_TWO) { + parameter->input_buffer_inhibitory2_value = + parameter->input_buffer_inhibitory2_value + + decay_s1615(input, parameter->inh2_init); } } @@ -128,8 +119,8 @@ static inline void synapse_types_add_neuron_input( //! \return the excitatory input buffers for a given neuron ID. static inline input_t* synapse_types_get_excitatory_input( synapse_param_pointer_t parameter) { - excitatory_response[0] = parameter->exc.synaptic_input_value; - excitatory_response[1] = parameter->exc2.synaptic_input_value; + excitatory_response[0] = parameter->input_buffer_excitatory_value; + excitatory_response[1] = parameter->input_buffer_excitatory2_value; return &excitatory_response[0]; } @@ -139,7 +130,8 @@ static inline input_t* synapse_types_get_excitatory_input( //! \return the inhibitory input buffers for a given neuron ID. static inline input_t* synapse_types_get_inhibitory_input( synapse_param_pointer_t parameter) { - inhibitory_response[0] = parameter->inh.synaptic_input_value; + inhibitory_response[0] = parameter->input_buffer_inhibitory_value; + inhibitory_response[1] = parameter->input_buffer_inhibitory2_value; return &inhibitory_response[0]; } @@ -154,8 +146,10 @@ static inline const char *synapse_types_get_type_char( return "X1"; } else if (synapse_type_index == EXCITATORY_TWO) { return "X2"; - } else if (synapse_type_index == INHIBITORY) { + } else if (synapse_type_index == INHIBITORY_ONE) { return "I"; + } else if (synapse_type_index == INHIBITORY_TWO) { + return "I2"; } else { log_debug("did not recognise synapse type %i", synapse_type_index); return "?"; @@ -164,33 +158,43 @@ static inline const char *synapse_types_get_type_char( //! \brief prints the input for a neuron ID given the available inputs //! currently only executed when the models are in debug mode, as the prints are -//! controlled from the synapses.c print_inputs method. +//! controlled from the synapses.c _print_inputs method. //! \param[in] parameter: the pointer to the parameters to use //! \return Nothing static inline void synapse_types_print_input( synapse_param_pointer_t parameter) { - io_printf(IO_BUF, "%12.6k + %12.6k - %12.6k", - parameter->exc.synaptic_input_value, - parameter->exc2.synaptic_input_value, - parameter->inh.synaptic_input_value); + io_printf( + IO_BUF, "%12.6k + %12.6k - %12.6k - %12.6k", + parameter->input_buffer_excitatory_value, + parameter->input_buffer_excitatory2_value, + parameter->input_buffer_inhibitory_value, + parameter->input_buffer_inhibitory2_value); } //! \brief printer call //! \param[in] parameter: the pointer to the parameters to print static inline void synapse_types_print_parameters( synapse_param_pointer_t parameter) { - log_info("exc_decay = %11.4k\n", parameter->exc.decay); - log_info("exc_init = %11.4k\n", parameter->exc.init); - log_info("exc2_decay = %11.4k\n", parameter->exc2.decay); - log_info("exc2_init = %11.4k\n", parameter->exc2.init); - log_info("inh_decay = %11.4k\n", parameter->inh.decay); - log_info("inh_init = %11.4k\n", parameter->inh.init); - log_info("gsyn_excitatory_initial_value = %11.4k\n", - parameter->exc.synaptic_input_value); - log_info("gsyn_excitatory2_initial_value = %11.4k\n", - parameter->exc2.synaptic_input_value); - log_info("gsyn_inhibitory_initial_value = %11.4k\n", - parameter->inh.synaptic_input_value); + log_info("exc_decay = %11.4k\n", parameter->exc_decay); + log_info("exc_init = %11.4k\n", parameter->exc_init); + log_info("exc2_decay = %11.4k\n", parameter->exc2_decay); + log_info("exc2_init = %11.4k\n", parameter->exc2_init); + log_info("inh_decay = %11.4k\n", parameter->inh_decay); + log_info("inh_init = %11.4k\n", parameter->inh_init); + log_info("inh2_decay = %11.4k\n", parameter->inh2_decay); + log_info("inh2_init = %11.4k\n", parameter->inh2_init); + log_info( + "gsyn_excitatory_initial_value = %11.4k\n", + parameter->input_buffer_excitatory_value); + log_info( + "gsyn_excitatory2_initial_value = %11.4k\n", + parameter->input_buffer_excitatory2_value); + log_info( + "gsyn_inhibitory_initial_value = %11.4k\n", + parameter->input_buffer_inhibitory_value); + log_info( + "gsyn_inhibitory2_initial_value = %11.4k\n", + parameter->input_buffer_inhibitory2_value); } -#endif // _SYNAPSE_TYPES_DUAL_EXCITATORY_EXPONENTIAL_IMPL_H_ +#endif // _SYNAPSE_TYPES_ERBP_IMPL_H_ diff --git a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h index b3e8ff83634..881e3aa99f3 100644 --- a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h +++ b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h @@ -15,18 +15,20 @@ * along with this program. If not, see . */ -#ifndef _THRESHOLD_TYPE_STATIC_H_ -#define _THRESHOLD_TYPE_STATIC_H_ +#ifndef _THRESHOLD_TYPE_ADAPTIVE_H_ +#define _THRESHOLD_TYPE_ADAPTIVE_H_ #include "threshold_type.h" -typedef struct threshold_type_adaptive_t { +typedef struct threshold_type_t { // The value of the static threshold REAL threshold_value; } threshold_type_t; static inline bool threshold_type_is_above_threshold( state_t value, threshold_type_pointer_t threshold_type) { + + return REAL_COMPARE(value, >=, threshold_type->threshold_value); } diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index fae5f477e76..78abb3d64d7 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -18,7 +18,7 @@ from spynnaker.pyNN.models.neuron.neuron_models import ( NeuronModelEProp) from spynnaker.pyNN.models.neuron.synapse_types import ( - SynapseTypeEProp) + SynapseTypeEPropAdaptive) from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeAdaptive @@ -28,14 +28,20 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", "psi"}) def __init__( - self, tau_m=20.0, cm=1.0, v_rest=-65.0, v_reset=-65.0, - v_thresh=-50.0, tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, - tau_refrac=0.1, i_offset=0.0, v=-65.0, isyn_exc=0.0, isyn_inh=0.0, - isyn_exc2=0.0, psi=0.0): + self, + # neuron model params + tau_m=20.0, cm=1.0, v_rest=-65.0, v_reset=-65.0, + v_thresh=-50.0, tau_refrac=0.1, i_offset=0.0, v=-65.0, psi=0.0, + + #synapse type params + tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, + isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, + + ): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelEProp( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi) - synapse_type = SynapseTypeEProp( + synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, isyn_inh) input_type = InputTypeCurrent() threshold_type = ThresholdTypeAdaptive(v_thresh) diff --git a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py index 67060f057f1..9b2d84403bc 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py @@ -18,8 +18,8 @@ from .synapse_type_exponential import SynapseTypeExponential from .synapse_type_delta import SynapseTypeDelta from .synapse_type_alpha import SynapseTypeAlpha -from .synapse_type_eprop import SynapseTypeEProp +from .synapse_type_eprop_adaptive import SynapseTypeEPropAdaptive __all__ = ["AbstractSynapseType", "SynapseTypeDualExponential", "SynapseTypeExponential", "SynapseTypeDelta", "SynapseTypeAlpha", - "SynapseTypeEProp"] + "SynapseTypeEPropAdaoptive"] diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py similarity index 58% rename from spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py rename to spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index a863b7d33e2..fbf7fd0c38e 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -1,30 +1,17 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import numpy from spinn_utilities.overrides import overrides -from data_specification.enums import DataType from pacman.executor.injection_decorator import inject_items from .abstract_synapse_type import AbstractSynapseType +from data_specification.enums import DataType +import numpy TAU_SYN_E = 'tau_syn_E' TAU_SYN_E2 = 'tau_syn_E2' TAU_SYN_I = 'tau_syn_I' +TAU_SYN_I2 = 'tau_syn_I2' ISYN_EXC = "isyn_exc" ISYN_EXC2 = "isyn_exc2" ISYN_INH = "isyn_inh" +ISYN_INH2 = "isyn_inh2" UNITS = { TAU_SYN_E: "mV", @@ -36,34 +23,45 @@ } -class SynapseTypeEProp(AbstractSynapseType): +class SynapseTypeEPropAdaptive(AbstractSynapseType): __slots__ = [ - "__tau_syn_E", - "__tau_syn_E2", - "__tau_syn_I", - "__isyn_exc", - "__isyn_exc2", - "__isyn_inh"] + "_tau_syn_E", + "_tau_syn_E2", + "_tau_syn_I", + "_tau_syn_I2", + "_isyn_exc", + "_isyn_exc2", + "_isyn_inh", + "_isyn_inh2"] def __init__( - self, tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, - isyn_inh): - super(SynapseTypeEProp, self).__init__( + self, tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + isyn_exc, isyn_exc2, isyn_inh, isyn_inh2 + ): + super(SynapseTypeERBP, self).__init__( [DataType.U032, # decay_E DataType.U032, # init_E - DataType.S1615, # isyn_exc DataType.U032, # decay_E2 DataType.U032, # init_E2 - DataType.S1615, # isyn_exc2 DataType.U032, # decay_I DataType.U032, # init_I + DataType.U032, # decay_I2 + DataType.U032, # init_I2 + DataType.S1615, # isyn_exc + DataType.S1615, # isyn_exc2 + DataType.S1615, DataType.S1615]) # isyn_inh - self.__tau_syn_E = tau_syn_E - self.__tau_syn_E2 = tau_syn_E2 - self.__tau_syn_I = tau_syn_I - self.__isyn_exc = isyn_exc - self.__isyn_exc2 = isyn_exc2 - self.__isyn_inh = isyn_inh + + self._tau_syn_E = tau_syn_E + self._tau_syn_E2 = tau_syn_E2 + self._tau_syn_I = tau_syn_I + self._tau_syn_I2 = tau_syn_I2 + self._isyn_exc = isyn_exc + self._isyn_exc2 = isyn_exc2 + self._isyn_inh = isyn_inh + self._isyn_inh2 = isyn_inh2 + + @overrides(AbstractSynapseType.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -71,15 +69,17 @@ def get_n_cpu_cycles(self, n_neurons): @overrides(AbstractSynapseType.add_parameters) def add_parameters(self, parameters): - parameters[TAU_SYN_E] = self.__tau_syn_E - parameters[TAU_SYN_E2] = self.__tau_syn_E2 - parameters[TAU_SYN_I] = self.__tau_syn_I + parameters[TAU_SYN_E] = self._tau_syn_E + parameters[TAU_SYN_E2] = self._tau_syn_E2 + parameters[TAU_SYN_I] = self._tau_syn_I + parameters[TAU_SYN_I2] = self._tau_syn_I2 @overrides(AbstractSynapseType.add_state_variables) def add_state_variables(self, state_variables): - state_variables[ISYN_EXC] = self.__isyn_exc - state_variables[ISYN_EXC2] = self.__isyn_exc2 - state_variables[ISYN_INH] = self.__isyn_inh + state_variables[ISYN_EXC] = self._isyn_exc + state_variables[ISYN_EXC2] = self._isyn_exc2 + state_variables[ISYN_INH] = self._isyn_inh + state_variables[ISYN_INH2] = self._isyn_inh2 @overrides(AbstractSynapseType.get_units) def get_units(self, variable): @@ -100,87 +100,109 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data return [parameters[TAU_SYN_E].apply_operation(decay), parameters[TAU_SYN_E].apply_operation(init), - state_variables[ISYN_EXC], parameters[TAU_SYN_E2].apply_operation(decay), parameters[TAU_SYN_E2].apply_operation(init), - state_variables[ISYN_EXC2], parameters[TAU_SYN_I].apply_operation(decay), parameters[TAU_SYN_I].apply_operation(init), - state_variables[ISYN_INH]] + parameters[TAU_SYN_I2].apply_operation(decay), + parameters[TAU_SYN_I2].apply_operation(init), + state_variables[ISYN_EXC], + state_variables[ISYN_EXC2], + state_variables[ISYN_INH], + state_variables[ISYN_INH2]] @overrides(AbstractSynapseType.update_values) def update_values(self, values, parameters, state_variables): # Read the data - (_decay_E, _init_E, isyn_exc, _decay_E2, _init_E2, isyn_exc2, - _decay_I, _init_I, isyn_inh) = values + (_decay_E, _init_E, _decay_E2, _init_E2, _decay_I, _init_I, _decay_I2, _init_I2, + isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) = values state_variables[ISYN_EXC] = isyn_exc state_variables[ISYN_EXC2] = isyn_exc2 state_variables[ISYN_INH] = isyn_inh + state_variables[ISYN_INH2] = isyn_inh2 @overrides(AbstractSynapseType.get_n_synapse_types) def get_n_synapse_types(self): - return 3 + return 4 @overrides(AbstractSynapseType.get_synapse_id_by_target) def get_synapse_id_by_target(self, target): if target == "excitatory": return 0 - elif target == "excitatory2": + elif target == "exc_err": return 1 elif target == "inhibitory": return 2 + elif target == "inh_err": + return 3 return None @overrides(AbstractSynapseType.get_synapse_targets) def get_synapse_targets(self): - return "excitatory", "excitatory2", "inhibitory" + return "excitatory", "exc_err", "inhibitory", "inh_err" @property def tau_syn_E(self): - return self.__tau_syn_E + return self._tau_syn_E @tau_syn_E.setter def tau_syn_E(self, tau_syn_E): - self.__tau_syn_E = tau_syn_E + self._tau_syn_E = tau_syn_E @property def tau_syn_E2(self): - return self.__tau_syn_E2 + return self._tau_syn_E2 @tau_syn_E2.setter def tau_syn_E2(self, tau_syn_E2): - self.__tau_syn_E2 = tau_syn_E2 + self._tau_syn_E2 = tau_syn_E2 @property def tau_syn_I(self): - return self.__tau_syn_I + return self._tau_syn_I @tau_syn_I.setter def tau_syn_I(self, tau_syn_I): - self.__tau_syn_I = tau_syn_I + self._tau_syn_I = tau_syn_I + + @property + def tau_syn_I2(self): + return self._tau_syn_I2 + + @tau_syn_I2.setter + def tau_syn_I2(self, tau_syn_I2): + self._tau_syn_I2 = tau_syn_I2 @property def isyn_exc(self): - return self.__isyn_exc + return self._isyn_exc @isyn_exc.setter def isyn_exc(self, isyn_exc): - self.__isyn_exc = isyn_exc + self._isyn_exc = isyn_exc @property def isyn_inh(self): - return self.__isyn_inh + return self._isyn_inh @isyn_inh.setter def isyn_inh(self, isyn_inh): - self.__isyn_inh = isyn_inh + self._isyn_inh = isyn_inh + + @property + def isyn_inh2(self): + return self._isyn_inh2 + + @isyn_inh.setter + def isyn_inh(self, isyn_inh2): + self._isyn_inh2 = isyn_inh2 @property def isyn_exc2(self): - return self.__isyn_exc2 + return self._isyn_exc2 @isyn_exc2.setter def isyn_exc2(self, isyn_exc2): - self.__isyn_exc2 = isyn_exc2 + self._isyn_exc2 = isyn_exc2 \ No newline at end of file From c0911e3ac52e5fc79167a611a8de169ad57fe85c Mon Sep 17 00:00:00 2001 From: mbassor2 Date: Thu, 26 Sep 2019 14:19:09 +0100 Subject: [PATCH 004/123] added regularisation functionality --- .../neuron_impl_eprop_adaptive.h | 15 ++++- .../models/neuron_model_eprop_adaptive_impl.h | 3 + .../models/neuron/builds/eprop_adaptive.py | 17 +++-- .../neuron_models/neuron_model_eprop.py | 66 ++++++++++++++----- .../synapse_type_eprop_adaptive.py | 2 +- 5 files changed, 82 insertions(+), 21 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index e9ecab14496..556d5f79773 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -205,6 +205,15 @@ static void neuron_impl_load_neuron_parameters( static bool neuron_impl_do_timestep_update(index_t neuron_index, input_t external_bias, state_t *recorded_variable_values) { + + + if (neuron_index == 0) { + // Decay global rate trace (only done once per core per timestep) + global_parameters->core_pop_rate = global_parameters->core_pop_rate + * global_parameters->rate_exp_TC; + } + + // Get the neuron itself neuron_pointer_t neuron = &neuron_array[neuron_index]; @@ -246,7 +255,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Call functions to get the input values to be recorded recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + global_parameters->core_pop_rate; // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( @@ -274,6 +284,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Tell the additional input additional_input_has_spiked(additional_input); + + // Add contribution from this neuron's spike to global rate trace + global_parameters->core_pop_rate += 1.0k; } // Shape the existing input according to the included rule diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 6e7036ee5df..716f743811c 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -68,6 +68,9 @@ typedef struct neuron_t { } neuron_t; typedef struct global_neuron_params_t { + REAL core_pop_rate; + REAL core_target_rate; + REAL rate_exp_TC; } global_neuron_params_t; #endif // _NEURON_MODEL_LIF_CURR_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 78abb3d64d7..2e11c1275e4 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -26,7 +26,8 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ Adaptive threshold neuron with eprop support """ - @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", "psi"}) + @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", + "isyn_inh2", "psi", "target_rate", "tau_err"}) def __init__( self, # neuron model params @@ -36,14 +37,22 @@ def __init__( #synapse type params tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - + + # Regularisation params + target_rate=10, tau_err=1000 # fits with 1 ms timestep + ): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelEProp( - v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi) + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi, + target_rate, tau_err) + synapse_type = SynapseTypeEPropAdaptive( - tau_syn_E, tau_syn_E2, tau_syn_I, isyn_exc, isyn_exc2, isyn_inh) + tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) + input_type = InputTypeCurrent() + threshold_type = ThresholdTypeAdaptive(v_thresh) super(EPropAdaptive, self).__init__( diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py index 23654fee15f..169b7fda2e9 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py @@ -55,7 +55,9 @@ class NeuronModelEProp(AbstractNeuronModel): "__i_offset", "__v_reset", "__tau_refrac", - "__psi"] + "__psi", + "__target_rate", + "__tau_err"] def __init__( self, @@ -66,20 +68,23 @@ def __init__( i_offset, v_reset, tau_refrac, - psi + psi, + # regularisation params + target_rate, + tau_err ): - - datatype_list = [DataType.S1615, # v - DataType.S1615, # v_rest - DataType.S1615, # r_membrane (= tau_m / cm) - DataType.S1615, # exp_tc (= e^(-ts / tau_m)) - DataType.S1615, # i_offset - DataType.INT32, # count_refrac - DataType.S1615, # v_reset - DataType.INT32, # tau_refrac - DataType.S1615 # psi, pseuo_derivative - ] + datatype_list = [ + DataType.S1615, # v + DataType.S1615, # v_rest + DataType.S1615, # r_membrane (= tau_m / cm) + DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.S1615, # i_offset + DataType.INT32, # count_refrac + DataType.S1615, # v_reset + DataType.INT32, # tau_refrac + DataType.S1615 # psi, pseuo_derivative + ] # Synapse states - always initialise to zero eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) @@ -91,7 +96,15 @@ def __init__( # Extend to include fan-in for each neuron datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) - super(NeuronModelEProp, self).__init__(datatype_list) + + global_data_types = [ + DataType.S1615, # core_pop_rate + DataType.S1615, # core_target_rate + DataType.S1615 # rate_exp_TC + ] + + super(NeuronModelEProp, self).__init__(data_types=datatype_list, + global_data_types=global_data_types) if v_init is None: v_init = v_rest @@ -104,6 +117,9 @@ def __init__( self.__tau_refrac = tau_refrac self.__psi = psi # calculate from v and v_thresh (but will probably end up zero) + self.__target_rate = target_rate + self.__tau_err = tau_err + @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -151,7 +167,7 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): state_variables[PSI] ] - # create synaptic state - init to zero + # create synaptic state - init all state to zero eprop_syn_init = [0, 0, 0, @@ -161,6 +177,23 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): return values + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_global_values, + additional_arguments={'ts'}) + def get_global_values(self, ts): + glob_vals = [ + self.__target_rate, # initialise global pop rate to the target + self.__target_rate, # set target rate + numpy.exp(-float(ts/1000)/self.__tau_err) + ] + + print("\n ") + print(glob_vals) + print(ts) + print("\n") + return glob_vals + + @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): @@ -172,7 +205,10 @@ def update_values(self, values, parameters, state_variables): state_variables[V] = v state_variables[COUNT_REFRAC] = count_refrac state_vairables[PSI] = psi + + + @property def v_init(self): return self.__v_init diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index fbf7fd0c38e..4682e57de68 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -38,7 +38,7 @@ def __init__( self, tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2 ): - super(SynapseTypeERBP, self).__init__( + super(SynapseTypeEPropAdaptive, self).__init__( [DataType.U032, # decay_E DataType.U032, # init_E DataType.U032, # decay_E2 From 65cde938e7468df92e7e5ba2aaf71102afbf2da2 Mon Sep 17 00:00:00 2001 From: mbassor2 Date: Thu, 26 Sep 2019 15:59:20 +0100 Subject: [PATCH 005/123] Added adaptive neuron --- .../neuron_impl_eprop_adaptive.h | 38 ++++- .../models/neuron_model_eprop_adaptive_impl.c | 47 ++++--- .../models/neuron_model_eprop_adaptive_impl.h | 7 + .../threshold_types/threshold_type_adaptive.h | 87 +++++++++--- .../models/neuron/builds/eprop_adaptive.py | 16 ++- .../neuron_models/neuron_model_eprop.py | 17 ++- .../threshold_type_adaptive.py | 131 +++++++++++++++--- .../threshold_types/threshold_type_eprop.py | 74 ---------- 8 files changed, 270 insertions(+), 147 deletions(-) delete mode 100644 spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 556d5f79773..a03c3a8c193 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -230,7 +230,10 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Get the voltage state_t voltage = neuron_model_get_membrane_voltage(neuron); - recorded_variable_values[V_RECORDING_INDEX] = voltage; + state_t B_t = threshold_type->B; + state_t z_t = neuron->z; + +// recorded_variable_values[V_RECORDING_INDEX] = voltage; // Get the exc and inh values from the synapses input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); @@ -253,10 +256,10 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, total_inh += inh_input_values[i]; } - // Call functions to get the input values to be recorded - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - global_parameters->core_pop_rate; +// // Call functions to get the input values to be recorded +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// global_parameters->core_pop_rate; // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( @@ -274,7 +277,30 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias, neuron); // determine if a spike should occur - bool spike = threshold_type_is_above_threshold(result, threshold_type); + threshold_type_update_threshold(neuron->z, threshold_type); + + + // Also update Z (including using refractory period information) + state_t nu = (voltage - threshold_type->B)/threshold_type->B; + + if (nu > ZERO){ + neuron->z = 1 * neuron->A; // implements refractory period + } + + bool spike = neuron->z; + + // ********************************************************* + // Record updated state + // Record V (just as cheap to set then to gate later) + recorded_variable_values[V_RECORDING_INDEX] = voltage; // result; + + // Record Z + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = z_t; + + // Record B + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = B_t; // threshold_type->B; + // ********************************************************* + // If spike occurs, communicate to relevant parts of model if (spike) { diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 3bac73de40d..b26e026a1a2 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -15,7 +15,7 @@ * along with this program. If not, see . */ -#include "neuron_model_lif_impl.h" +#include "neuron_model_eprop_adaptive_impl.h" #include @@ -41,36 +41,43 @@ state_t neuron_model_state_update( log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); - // If outside of the refractory period - if (neuron->refract_timer <= 0) { - REAL total_exc = 0; - REAL total_inh = 0; - - for (int i=0; i < num_excitatory_inputs; i++) { - total_exc += exc_input[i]; - } - for (int i=0; i< num_inhibitory_inputs; i++) { - total_inh += inh_input[i]; - } - // Get the input in nA - input_t input_this_timestep = - total_exc - total_inh + external_bias + neuron->I_offset; + + REAL total_exc = 0; + REAL total_inh = 0; + + for (int i=0; i < num_excitatory_inputs; i++) { + total_exc += exc_input[i]; + } + for (int i=0; i< num_inhibitory_inputs; i++) { + total_inh += inh_input[i]; + } + // Get the input in nA + input_t input_this_timestep = + exc_input[0] - inh_input[0] + external_bias + neuron->I_offset; lif_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep); + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { + // Allow spiking again + neuron->A = 1; } else { + // Neuron cannot fire, as neuron->A=0; // countdown refractory timer - neuron->refract_timer--; + neuron->refract_timer -= 1; } + return neuron->V_membrane; } void neuron_model_has_spiked(neuron_pointer_t neuron) { - // reset membrane voltage - neuron->V_membrane = neuron->V_reset; + // reset z to zero + neuron->z = 0; - // reset refractory timer - neuron->refract_timer = neuron->T_refract; + // Set refractory timer + neuron->refract_timer = neuron->T_refract - 1; + neuron->A = 0; } state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 716f743811c..f6fb69d52a1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -59,6 +59,13 @@ typedef struct neuron_t { // refractory time of neuron [timesteps] int32_t T_refract; + // Neuron spike train + REAL z; + + // refractory multiplier - to allow evolution of neuronal dynamics during + // refractory period + REAL A; + // pseudo derivative REAL psi; diff --git a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h index 881e3aa99f3..90fd8ab71e2 100644 --- a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h +++ b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h @@ -1,35 +1,78 @@ -/* - * Copyright (c) 2017-2019 The University of Manchester - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - #ifndef _THRESHOLD_TYPE_ADAPTIVE_H_ #define _THRESHOLD_TYPE_ADAPTIVE_H_ #include "threshold_type.h" +#include +#include "debug.h" typedef struct threshold_type_t { - // The value of the static threshold - REAL threshold_value; + + REAL B; // Capital B(t) + REAL b; // b(t) + REAL b_0; // small b^0 + decay_t e_to_dt_on_tau_a; // rho + REAL beta; + decay_t adpt; // (1-rho) + REAL scalar; + } threshold_type_t; -static inline bool threshold_type_is_above_threshold( - state_t value, threshold_type_pointer_t threshold_type) { +static REAL var_scal = 1; + +static void _print_threshold_params(threshold_type_pointer_t threshold_type){ + io_printf(IO_BUF, "B: %k, " + "b: %k, " + "b_0: %k, " + "e_to_dt_on_tau_a: %u, " + "beta: %k, " + "adpt: %u, \n" + "scalar: %k, \n\n", + threshold_type->B, + threshold_type->b, + threshold_type->b_0, + threshold_type->e_to_dt_on_tau_a, + threshold_type->beta, + threshold_type->adpt, + threshold_type->scalar + ); +} - return REAL_COMPARE(value, >=, threshold_type->threshold_value); +static inline bool threshold_type_is_above_threshold(state_t value, + threshold_type_pointer_t threshold_type) { + + // Not used + use(value); + use(threshold_type); + + return false; } +static inline void threshold_type_update_threshold(state_t z, + threshold_type_pointer_t threshold_type){ + +// _print_threshold_params(threshold_type); + + + s1615 temp1 = decay_s1615(threshold_type->b, threshold_type->e_to_dt_on_tau_a); + s1615 temp2 = decay_s1615(threshold_type->scalar, threshold_type->adpt) * z; + + threshold_type->b = temp1 + + temp2; + + +// // Evolve threshold dynamics (decay to baseline) and adapt if z=nonzero +// // Update small b (same regardless of spike - uses z from previous timestep) +// threshold_type->b = +// decay_s1615(threshold_type->b, threshold_type->e_to_dt_on_tau_a) +// + decay_s1615(1000k, threshold_type->adpt) // fold scaling into decay to increase precision +// * z; // stored on neuron +// + // Update large B + threshold_type->B = threshold_type->b_0 + + threshold_type->beta*threshold_type->b; + +} + + #endif // _THRESHOLD_TYPE_ADAPTIVE_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 2e11c1275e4..83dbd1fe98c 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -27,19 +27,23 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2", "psi", "target_rate", "tau_err"}) + "isyn_inh2", "psi", "target_rate", "tau_err", + "B", "small_b"}) def __init__( self, # neuron model params tau_m=20.0, cm=1.0, v_rest=-65.0, v_reset=-65.0, - v_thresh=-50.0, tau_refrac=0.1, i_offset=0.0, v=-65.0, psi=0.0, + tau_refrac=5, i_offset=0.0, v=-65.0, psi=0.0, #synapse type params tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, # Regularisation params - target_rate=10, tau_err=1000 # fits with 1 ms timestep + target_rate=10, tau_err=1000, # fits with 1 ms timestep + + # Threshold parameters + B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8 ): # pylint: disable=too-many-arguments, too-many-locals @@ -53,7 +57,11 @@ def __init__( input_type = InputTypeCurrent() - threshold_type = ThresholdTypeAdaptive(v_thresh) + threshold_type = ThresholdTypeAdaptive(B, + small_b, + small_b_0, + tau_a, + beta) super(EPropAdaptive, self).__init__( model_name="eprop_adaptive", binary="eprop_adaptive.aplx", diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py index 169b7fda2e9..7dace015237 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py @@ -33,6 +33,8 @@ TAU_REFRAC = "tau_refrac" COUNT_REFRAC = "count_refrac" PSI = "psi" +Z = "z" +A = "a" UNITS = { V: 'mV', @@ -42,6 +44,8 @@ I_OFFSET: 'nA', V_RESET: 'mV', TAU_REFRAC: 'ms', + Z: 'N/A', + A: 'N/A', PSI: 'N/A' } @@ -55,9 +59,12 @@ class NeuronModelEProp(AbstractNeuronModel): "__i_offset", "__v_reset", "__tau_refrac", + "__z", + "__a", "__psi", "__target_rate", - "__tau_err"] + "__tau_err" + ] def __init__( self, @@ -83,6 +90,8 @@ def __init__( DataType.INT32, # count_refrac DataType.S1615, # v_reset DataType.INT32, # tau_refrac + DataType.S1615, # Z + DataType.S1615, # A DataType.S1615 # psi, pseuo_derivative ] @@ -115,7 +124,7 @@ def __init__( self.__i_offset = i_offset self.__v_reset = v_reset self.__tau_refrac = tau_refrac - self.__psi = psi # calculate from v and v_thresh (but will probably end up zero) + self.__psi = psi # calculate from v and v_thresh (but will probably end up zero) self.__target_rate = target_rate self.__tau_err = tau_err @@ -140,6 +149,8 @@ def add_state_variables(self, state_variables): state_variables[V] = self.__v_init state_variables[COUNT_REFRAC] = 0 state_variables[PSI] = self.__psi + state_variables[Z] = 0 + state_variables[A] = 0 @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -164,6 +175,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[V_RESET], parameters[TAU_REFRAC].apply_operation( operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + state_variables[Z], + state_variables[A], state_variables[PSI] ] diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index 54817660818..64b00c2ddd3 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -14,23 +14,62 @@ # along with this program. If not, see . from spinn_utilities.overrides import overrides -from data_specification.enums import DataType from .abstract_threshold_type import AbstractThresholdType +from pacman.executor.injection_decorator import inject_items +from data_specification.enums import DataType + +import numpy + +BIG_B = "big_b" +SMALL_B = "small_b" +SMALL_B_0 = "small_b_0" +TAU_A = "tau_a" +BETA = "beta" +ADPT = "adpt" +SCALAR = "scalar" + +UNITS = { + BIG_B: "mV", + SMALL_B: "mV", + SMALL_B_0: "mV", + TAU_A: "ms", + BETA: "N/A", +# ADPT: "mV" + SCALAR: "dimensionless" + } -V_THRESH = "v_thresh" -UNITS = {V_THRESH: "mV"} class ThresholdTypeAdaptive(AbstractThresholdType): """ A threshold that is a static value """ - __slots__ = ["__v_thresh"] - - def __init__(self, v_thresh): + __slots__ = [ + "_B", + "_small_b", + "_small_b_0", + "_tau_a", + "_beta", +# "_adpt" + "_scalar" + ] + + def __init__(self, B, small_b, small_b_0, tau_a, beta): super(ThresholdTypeAdaptive, self).__init__([ - DataType.S1615]) # v_thresh - self.__v_thresh = v_thresh + DataType.S1615, + DataType.S1615, + DataType.S1615, + DataType.UINT32, + DataType.S1615, + DataType.UINT32, + DataType.S1615 + ]) + self._B = B + self._small_b = small_b + self._small_b_0 = small_b_0 + self._tau_a = tau_a + self._beta = beta + self._scalar = 1000 @overrides(AbstractThresholdType.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -39,11 +78,15 @@ def get_n_cpu_cycles(self, n_neurons): @overrides(AbstractThresholdType.add_parameters) def add_parameters(self, parameters): - parameters[V_THRESH] = self.__v_thresh + parameters[SMALL_B_0] = self._small_b_0 + parameters[TAU_A] = self._tau_a + parameters[BETA] = self._beta + parameters[SCALAR] = self._scalar @overrides(AbstractThresholdType.add_state_variables) def add_state_variables(self, state_variables): - pass + state_variables[BIG_B] = self._B + state_variables[SMALL_B] = self._small_b @overrides(AbstractThresholdType.get_units) def get_units(self, variable): @@ -53,22 +96,72 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @overrides(AbstractThresholdType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractThresholdType.get_values, additional_arguments={'ts'}) + def get_values(self, parameters, state_variables, vertex_slice, ts): + + ulfract = pow(2, 32) # Add the rest of the data - return [parameters[V_THRESH]] + return [ + state_variables[BIG_B], + state_variables[SMALL_B], + parameters[SMALL_B_0], + parameters[TAU_A].apply_operation( + operation=lambda + x: numpy.exp(float(-ts) / (1000.0 * x)) * ulfract), + parameters[BETA], + parameters[TAU_A].apply_operation( + operation=lambda x: (1 - numpy.exp( + float(-ts) / (1000.0 * x))) * ulfract), # ADPT + parameters[SCALAR] + ] @overrides(AbstractThresholdType.update_values) def update_values(self, values, parameters, state_variables): # Read the data - (_v_thresh,) = values + (big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar) = values + + state_variables[BIG_B] = big_b + state_variables[SMALL_B] = small_b + + @property + def B(self): + return self._B + + @B.setter + def B(self, new_value): + self._B = new_value + + @property + def small_b(self): + return self._small_b + + @small_b.setter + def small_b(self, new_value): + self._small_b = new_value + + @property + def small_b_0(self): + return self._small_b_0 + + @small_b_0.setter + def small_b_0(self, new_value): + self._small_b_0 = new_value + + @property + def tau_a(self): + return self._tau_a + + @tau_a.setter + def tau_a(self, new_value): + self._tau_a = new_value @property - def v_thresh(self): - return self.__v_thresh + def beta(self): + return self._beta - @v_thresh.setter - def v_thresh(self, v_thresh): - self.__v_thresh = v_thresh + @beta.setter + def beta(self, new_value): + self._beta = new_value diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py deleted file mode 100644 index b457c1103f7..00000000000 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_eprop.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from spinn_utilities.overrides import overrides -from data_specification.enums import DataType -from .abstract_threshold_type import AbstractThresholdType - -V_THRESH = "v_thresh" - -UNITS = {V_THRESH: "mV"} - - -class ThresholdTypeEProp(AbstractThresholdType): - """ A threshold that is a static value - """ - __slots__ = ["__v_thresh"] - - def __init__(self, v_thresh): - super(ThresholdTypeStatic, self).__init__([ - DataType.S1615]) # v_thresh - self.__v_thresh = v_thresh - - @overrides(AbstractThresholdType.get_n_cpu_cycles) - def get_n_cpu_cycles(self, n_neurons): - # Just a comparison, but 2 just in case! - return 2 * n_neurons - - @overrides(AbstractThresholdType.add_parameters) - def add_parameters(self, parameters): - parameters[V_THRESH] = self.__v_thresh - - @overrides(AbstractThresholdType.add_state_variables) - def add_state_variables(self, state_variables): - pass - - @overrides(AbstractThresholdType.get_units) - def get_units(self, variable): - return UNITS[variable] - - @overrides(AbstractThresholdType.has_variable) - def has_variable(self, variable): - return variable in UNITS - - @overrides(AbstractThresholdType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): - - # Add the rest of the data - return [parameters[V_THRESH]] - - @overrides(AbstractThresholdType.update_values) - def update_values(self, values, parameters, state_variables): - - # Read the data - (_v_thresh,) = values - - @property - def v_thresh(self): - return self.__v_thresh - - @v_thresh.setter - def v_thresh(self, v_thresh): - self.__v_thresh = v_thresh From b095d6e25322cea565b6163dcf418c1e7b7a5bb6 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Thu, 26 Sep 2019 20:49:02 +0100 Subject: [PATCH 006/123] Adaptive threshold working --- .../implementations/neuron_impl_eprop_adaptive.h | 12 +++++++----- neural_modelling/src/neuron/models/neuron_model.h | 3 ++- .../models/neuron_model_eprop_adaptive_impl.c | 14 +++++++++----- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index a03c3a8c193..9fe55ceca8c 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -270,24 +270,26 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); + // determine if a spike should occur + threshold_type_update_threshold(neuron->z, threshold_type); + // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron); + external_bias, neuron, B_t); + - // determine if a spike should occur - threshold_type_update_threshold(neuron->z, threshold_type); // Also update Z (including using refractory period information) state_t nu = (voltage - threshold_type->B)/threshold_type->B; if (nu > ZERO){ - neuron->z = 1 * neuron->A; // implements refractory period + neuron->z = 1.0k * neuron->A; // implements refractory period } - bool spike = neuron->z; + bool spike = z_t; // ********************************************************* // Record updated state diff --git a/neural_modelling/src/neuron/models/neuron_model.h b/neural_modelling/src/neuron/models/neuron_model.h index 6a65249e03d..48b3a78cde2 100644 --- a/neural_modelling/src/neuron/models/neuron_model.h +++ b/neural_modelling/src/neuron/models/neuron_model.h @@ -55,7 +55,8 @@ void neuron_model_set_global_neuron_params( state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, neuron_pointer_t neuron); + input_t external_bias, neuron_pointer_t neuron, + REAL B_t); //! \brief Indicates that the neuron has spiked //! \param[in] neuron pointer to a neuron parameter struct which contains all diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index b26e026a1a2..6931f298535 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -21,11 +21,14 @@ // simple Leaky I&F ODE static inline void lif_neuron_closed_form( - neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { + neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep, + REAL B_t) { + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; // update membrane voltage - neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) + - neuron->z * B_t; // this line achieves reset } void neuron_model_set_global_neuron_params( @@ -37,11 +40,12 @@ void neuron_model_set_global_neuron_params( state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, neuron_pointer_t neuron) { + input_t external_bias, neuron_pointer_t neuron, + REAL B_t) { + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); - REAL total_exc = 0; REAL total_inh = 0; @@ -56,7 +60,7 @@ state_t neuron_model_state_update( exc_input[0] - inh_input[0] + external_bias + neuron->I_offset; lif_neuron_closed_form( - neuron, neuron->V_membrane, input_this_timestep); + neuron, neuron->V_membrane, input_this_timestep, B_t); // If outside of the refractory period if (neuron->refract_timer <= 0) { From c813aff7afaeb2351cff35ebb9b03227ad386e01 Mon Sep 17 00:00:00 2001 From: mbassor2 Date: Fri, 27 Sep 2019 14:48:48 +0100 Subject: [PATCH 007/123] make regularisation target rates scalable with neurons per core --- .../implementations/neuron_impl_eprop_adaptive.h | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 9fe55ceca8c..f2b4b5f3443 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -194,6 +194,15 @@ static void neuron_impl_load_neuron_parameters( neuron_model_set_global_neuron_params(global_parameters); + // ********************************************** + // ******** for eprop regularisation ************ + // ********************************************** + global_parameters->core_target_rate = global_parameters->core_target_rate + * n_neurons; // scales target rate depending on number of neurons + global_parameters->core_pop_rate = global_parameters->core_pop_rate + * n_neurons; // scale initial value, too + + #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { @@ -291,13 +300,18 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, bool spike = z_t; + + // ********************************************************* // Record updated state // Record V (just as cheap to set then to gate later) recorded_variable_values[V_RECORDING_INDEX] = voltage; // result; // Record Z - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = z_t; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = z_t; + + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = + global_parameters->core_pop_rate; // Record B recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = B_t; // threshold_type->B; From 3e63017592154995db5d9e15c591f1da252aa6a4 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Wed, 2 Oct 2019 17:53:20 +0100 Subject: [PATCH 008/123] beginnings of store recall readout neuron, still broken and needs actual implementation --- .../implementations/store_recall_readout.h | 559 ++++++++++++++++++ .../neuron_model_store_recall_readout_impl.c | 98 +++ .../neuron_model_store_recall_readout_impl.h | 62 ++ .../neuron/builds/store_recall_readout.py | 42 ++ .../models/neuron/neuron_models/__init__.py | 4 +- .../neuron_model_store_recall_readout.py | 321 ++++++++++ 6 files changed, 1085 insertions(+), 1 deletion(-) create mode 100644 neural_modelling/src/neuron/implementations/store_recall_readout.h create mode 100644 neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c create mode 100644 neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h create mode 100644 spynnaker/pyNN/models/neuron/builds/store_recall_readout.py create mode 100644 spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py diff --git a/neural_modelling/src/neuron/implementations/store_recall_readout.h b/neural_modelling/src/neuron/implementations/store_recall_readout.h new file mode 100644 index 00000000000..6855d83fbb0 --- /dev/null +++ b/neural_modelling/src/neuron/implementations/store_recall_readout.h @@ -0,0 +1,559 @@ +#ifndef _NEURON_IMPL_STANDARD_H_ +#define _NEURON_IMPL_STANDARD_H_ + +#include "neuron_impl.h" + +// Includes for model parts used in this implementation +#include +#include +#include +#include +#include + +// Further includes +#include +#include +#include +#include +#include + +#define V_RECORDING_INDEX 0 +#define GSYN_EXCITATORY_RECORDING_INDEX 1 +#define GSYN_INHIBITORY_RECORDING_INDEX 2 + +#ifndef NUM_EXCITATORY_RECEPTORS +#define NUM_EXCITATORY_RECEPTORS 1 +#error NUM_EXCITATORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +#ifndef NUM_INHIBITORY_RECEPTORS +#define NUM_INHIBITORY_RECEPTORS 1 +#error NUM_INHIBITORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +//! Array of neuron states +static neuron_pointer_t neuron_array; + +//! Input states array +static input_type_pointer_t input_type_array; + +//! Additional input array +static additional_input_pointer_t additional_input_array; + +//! Threshold states array +static threshold_type_pointer_t threshold_type_array; + +//! Global parameters for the neurons +static global_neuron_params_pointer_t global_parameters; + +// The synapse shaping parameters +static synapse_param_t *neuron_synapse_shaping_params; + +static REAL next_spike_time = 0; +static uint32_t timer = 0; +static uint32_t target_ind = 0; + +// Store recall parameters +uint32_t store_recall_state = 0; //0: idle, 1: storing, 2:stored, 3:recall +uint32_t stored_value = 0; +accum softmax_0 = 0; +accum softmax_1 = 0; +accum cross_entropy = 0; + +static bool neuron_impl_initialise(uint32_t n_neurons) { + + // allocate DTCM for the global parameter details + if (sizeof(global_neuron_params_t) > 0) { + global_parameters = (global_neuron_params_t *) spin1_malloc( + sizeof(global_neuron_params_t)); + if (global_parameters == NULL) { + log_error("Unable to allocate global neuron parameters" + "- Out of DTCM"); + return false; + } + } + + // Allocate DTCM for neuron array + if (sizeof(neuron_t) != 0) { + neuron_array = (neuron_t *) spin1_malloc(n_neurons * sizeof(neuron_t)); + if (neuron_array == NULL) { + log_error("Unable to allocate neuron array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for input type array and copy block of data + if (sizeof(input_type_t) != 0) { + input_type_array = (input_type_t *) spin1_malloc( + n_neurons * sizeof(input_type_t)); + if (input_type_array == NULL) { + log_error("Unable to allocate input type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for additional input array and copy block of data + if (sizeof(additional_input_t) != 0) { + additional_input_array = (additional_input_pointer_t) spin1_malloc( + n_neurons * sizeof(additional_input_t)); + if (additional_input_array == NULL) { + log_error("Unable to allocate additional input array" + " - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for threshold type array and copy block of data + if (sizeof(threshold_type_t) != 0) { + threshold_type_array = (threshold_type_t *) spin1_malloc( + n_neurons * sizeof(threshold_type_t)); + if (threshold_type_array == NULL) { + log_error("Unable to allocate threshold type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for synapse shaping parameters + if (sizeof(synapse_param_t) != 0) { + neuron_synapse_shaping_params = (synapse_param_t *) spin1_malloc( + n_neurons * sizeof(synapse_param_t)); + if (neuron_synapse_shaping_params == NULL) { + log_error("Unable to allocate synapse parameters array" + " - Out of DTCM"); + return false; + } + } + + // Initialise pointers to Neuron parameters in STDP code + synapse_dynamics_set_neuron_array(neuron_array); + log_info("set pointer to neuron array in stdp code"); + + return true; +} + +static void neuron_impl_add_inputs( + index_t synapse_type_index, index_t neuron_index, + input_t weights_this_timestep) { + // simple wrapper to synapse type input function + synapse_param_pointer_t parameters = + &(neuron_synapse_shaping_params[neuron_index]); + synapse_types_add_neuron_input(synapse_type_index, + parameters, weights_this_timestep); +} + +static void neuron_impl_load_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("reading parameters, next is %u, n_neurons is %u ", + next, n_neurons); + + //log_debug("writing neuron global parameters"); + spin1_memcpy(global_parameters, &address[next], + sizeof(global_neuron_params_t)); + next += (sizeof(global_neuron_params_t) + 3) / 4; + + log_debug("reading neuron local parameters"); + spin1_memcpy(neuron_array, &address[next], n_neurons * sizeof(neuron_t)); + next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + + log_debug("reading input type parameters"); + spin1_memcpy(input_type_array, &address[next], + n_neurons * sizeof(input_type_t)); + next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + + log_debug("reading threshold type parameters"); + spin1_memcpy(threshold_type_array, &address[next], + n_neurons * sizeof(threshold_type_t)); + next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + + log_debug("reading synapse parameters"); + spin1_memcpy(neuron_synapse_shaping_params, &address[next], + n_neurons * sizeof(synapse_param_t)); + next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + + log_debug("reading additional input type parameters"); + spin1_memcpy(additional_input_array, &address[next], + n_neurons * sizeof(additional_input_t)); + next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + + neuron_model_set_global_neuron_params(global_parameters); + + io_printf(IO_BUF, "\nPrinting global params\n"); + io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); + io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); + io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); + io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); + io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); + io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); + io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); + io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); + + + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + + io_printf(IO_BUF, "size of global params: %u", + sizeof(global_neuron_params_t)); + + + + #if LOG_LEVEL >= LOG_DEBUG + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + log_debug("-------------------------------------\n"); + //} + #endif // LOG_LEVEL >= LOG_DEBUG +} + + +// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& + +// Poisson Spike Source Functions + +static inline REAL slow_spike_source_get_time_to_spike( + REAL mean_inter_spike_interval_in_ticks, neuron_pointer_t neuron) { + return exponential_dist_variate( + mars_kiss64_seed, +// neuron->spike_source_seed + global_parameters->spike_source_seed + ) + * mean_inter_spike_interval_in_ticks; +} + + + +void set_spike_source_rate(neuron_pointer_t neuron, REAL rate, + threshold_type_pointer_t threshold_type) { + + // clip rate to ensure divde by 0 and overflow don't occur + if (rate < 0.25){ + rate = 0.25; + } else if (rate > threshold_type->threshold_value) { + rate = threshold_type->threshold_value; + } + + REAL rate_diff = neuron->rate_at_last_setting - rate; + + // ensure rate_diff is absolute + if REAL_COMPARE(rate_diff, <, REAL_CONST(0.0)) { + rate_diff = -rate_diff; + } + + // Has rate changed by more than a predefined threshold since it was last + // used to update the mean isi ticks? + if ((rate_diff) > neuron->rate_update_threshold){ + // then update the rate + neuron->rate_at_last_setting = rate; + + // Update isi ticks based on new rate + neuron->mean_isi_ticks = + // rate * + //// global_parameters->ticks_per_second; // shouldn't this be ticks_per_second/rate? + // neuron->ticks_per_second ; // shouldn't this be ticks_per_second/rate? + (global_parameters->ticks_per_second / rate); // shouldn't this be ticks_per_second/rate? + + // Account for time that's already passed since previous spike + neuron->time_to_spike_ticks = neuron->mean_isi_ticks + - neuron->time_since_last_spike; + } // else stick with existing rate and isi ticks - they're within threshold +} + + +bool timer_update_determine_poisson_spiked(neuron_pointer_t neuron) { + // NOTE: ALL SOURCES TREATED AS SLOW SOURCES!!! + // NOTE: NO SOURCE CAN SPIKE MORE THAN ONCE PER TIMESTEP + // If this spike source should spike now + + bool has_spiked = false; + + // Advance by one timestep + // Subtract tick + neuron->time_to_spike_ticks -= REAL_CONST(1.0); + + // Add tick to time since last spike (to enable for dynamic rate change) + neuron->time_since_last_spike += 1.0k; + +// io_printf(IO_BUF, " Time to next spike: %k\n", +// neuron->time_to_spike_ticks); + + if (REAL_COMPARE( + neuron->time_to_spike_ticks, <=, + REAL_CONST(0.0))) { + + // Update time to spike + next_spike_time = slow_spike_source_get_time_to_spike( + neuron->mean_isi_ticks, neuron); + + neuron->time_to_spike_ticks += next_spike_time; + + // Set time since last spike to zero, so we start counting from here + neuron->time_since_last_spike = 0; + + has_spiked = true; + } + + return has_spiked; +} + +// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& + + +static bool neuron_impl_do_timestep_update(index_t neuron_index, + input_t external_bias, state_t *recorded_variable_values) { + + // Get the neuron itself + neuron_pointer_t neuron = &neuron_array[neuron_index]; + bool spike = false; + + target_ind = timer & 0x3ff; // repeats on a cycle of 1024 entries in array + + if (timer % 200 == 0){ + if (rand() < global_parameters->prob_command){ + store_recall_state = (store_recall_state + 1) % 4; + } + } + +// io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); +// io_printf(IO_BUF, "Target: %k\n\n", +// global_parameters->target_V[target_ind]); + + // Get the input_type parameters and voltage for this neuron + input_type_pointer_t input_type = &input_type_array[neuron_index]; + + // Get threshold and additional input parameters for this neuron + threshold_type_pointer_t threshold_type = + &threshold_type_array[neuron_index]; + additional_input_pointer_t additional_input = + &additional_input_array[neuron_index]; + synapse_param_pointer_t synapse_type = + &neuron_synapse_shaping_params[neuron_index]; + + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + + + // Get the exc and inh values from the synapses + input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); + input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); + + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_value, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_value, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording + REAL total_exc = 0; + REAL total_inh = 0; + + for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ + total_exc += exc_input_values[i]; + } + for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ + total_inh += inh_input_values[i]; + } + + // Call functions to get the input values to be recorded + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + + // Softmax of the exc and inh inputs representing 1 and 0 respectively + // may need to keep a running total of choice over the recall command + accum exp_0 = exp(total_inh); + accum exp_1 = exp(total_exc); + softmax_0 = exp_0 / (exp_1 + exp_0); + softmax_1 = exp_1 / (exp_1 + exp_0); + // What to do if log(0) + if (stored_value){ + cross_entropy = -log(softmax_1); + } + else{ + cross_entropy = -log(softmax_0); + } + + // Call functions to convert exc_input and inh_input to current + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); + + external_bias += additional_input_get_input_value_as_current( + additional_input, voltage); + + if (neuron_index == 0){ + recorded_variable_values[V_RECORDING_INDEX] = voltage; + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, -50k); +// io_printf(IO_BUF, "Readout membrane pot: %k\n", voltage); + // determine if a spike should occur + // bool spike = threshold_type_is_above_threshold(result, threshold_type); + + // Finally, set global membrane potential to updated value + global_parameters->readout_V = result; + + } else if (neuron_index == 1) { // this is the excitatory error source + + recorded_variable_values[V_RECORDING_INDEX] = + global_parameters->target_V[target_ind]; + + // Update Poisson neuron rate based on updated V + REAL rate = (global_parameters->target_V[target_ind] + - global_parameters->readout_V); // calc difference to +// io_printf(IO_BUF, "New Rate: %k", rate); +// rate = rate * 10; + rate = rate * 5.0k; + if (rate > 0) { // readout is below target, so set rate = diff. + // This will cause potentiation of excitatory synapses, + // and depression of inhibitory synapses + set_spike_source_rate(neuron, rate, + threshold_type); + } else { // readout is above target, so set rate = zero + set_spike_source_rate(neuron, 0, + threshold_type); + } + + // judge whether poisson neuron should have fired + spike = timer_update_determine_poisson_spiked(neuron); + + } else if (neuron_index == 2){ + // Update Poisson neuron rate based on updated V + REAL rate = (global_parameters->target_V[target_ind] + - global_parameters->readout_V); // calc difference to +// io_printf(IO_BUF, "New Rate: %k", rate); + + recorded_variable_values[V_RECORDING_INDEX] = rate; +// rate = rate * 10; + + rate = rate * 5.0k; + if (rate < 0) { + // readout is above target, send spikes from inhibitory neuron with rate = -diff: + // this will depress excitatory synapses, and potenitate inhibitory synapses + set_spike_source_rate(neuron, -rate, + threshold_type); + } else { // readout is below target, so set rate = 0; + set_spike_source_rate(neuron, 0, + threshold_type); + } + + // judge whether poisson neuron should have fired + spike = timer_update_determine_poisson_spiked(neuron); + timer++; // update this here, as needs to be done once per iteration over all the neurons + + } + + + + // If spike occurs, communicate to relevant parts of model + if (spike) { + // Call relevant model-based functions + // Tell the neuron model +// neuron_model_has_spiked(neuron); + + // Tell the additional input + additional_input_has_spiked(additional_input); + } + + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); + + #if LOG_LEVEL >= LOG_DEBUG + neuron_model_print_state_variables(neuron); + #endif // LOG_LEVEL >= LOG_DEBUG + + // Return the boolean to the model timestep update + return spike; +} + + + + + +//! \brief stores neuron parameter back into sdram +//! \param[in] address: the address in sdram to start the store +static void neuron_impl_store_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("writing parameters"); + + //log_debug("writing neuron global parameters"); + spin1_memcpy(&address[next], global_parameters, + sizeof(global_neuron_params_t)); + next += (sizeof(global_neuron_params_t) + 3) / 4; + + log_debug("writing neuron local parameters"); + spin1_memcpy(&address[next], neuron_array, + n_neurons * sizeof(neuron_t)); + next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + + log_debug("writing input type parameters"); + spin1_memcpy(&address[next], input_type_array, + n_neurons * sizeof(input_type_t)); + next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + + log_debug("writing threshold type parameters"); + spin1_memcpy(&address[next], threshold_type_array, + n_neurons * sizeof(threshold_type_t)); + next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + + log_debug("writing synapse parameters"); + spin1_memcpy(&address[next], neuron_synapse_shaping_params, + n_neurons * sizeof(synapse_param_t)); + next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + + log_debug("writing additional input type parameters"); + spin1_memcpy(&address[next], additional_input_array, + n_neurons * sizeof(additional_input_t)); + next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; +} + +#if LOG_LEVEL >= LOG_DEBUG +void neuron_impl_print_inputs(uint32_t n_neurons) { + bool empty = true; + for (index_t i = 0; i < n_neurons; i++) { + empty = empty + && (bitsk(synapse_types_get_excitatory_input( + &(neuron_synapse_shaping_params[i])) + - synapse_types_get_inhibitory_input( + &(neuron_synapse_shaping_params[i]))) == 0); + } + + if (!empty) { + log_debug("-------------------------------------\n"); + + for (index_t i = 0; i < n_neurons; i++) { + input_t input = + synapse_types_get_excitatory_input( + &(neuron_synapse_shaping_params[i])) + - synapse_types_get_inhibitory_input( + &(neuron_synapse_shaping_params[i])); + if (bitsk(input) != 0) { + log_debug("%3u: %12.6k (= ", i, input); + synapse_types_print_input( + &(neuron_synapse_shaping_params[i])); + log_debug(")\n"); + } + } + log_debug("-------------------------------------\n"); + } +} + +void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + synapse_types_print_parameters(&(neuron_synapse_shaping_params[n])); + } + log_debug("-------------------------------------\n"); +} + +const char *neuron_impl_get_synapse_type_char(uint32_t synapse_type) { + return synapse_types_get_type_char(synapse_type); +} +#endif // LOG_LEVEL >= LOG_DEBUG + +#endif // _NEURON_IMPL_STANDARD_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c new file mode 100644 index 00000000000..0c86a0f677a --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c @@ -0,0 +1,98 @@ +#include "neuron_model_lif_poisson_readout_impl.h" + +#include + +// simple Leaky I&F ODE +static inline void _lif_neuron_closed_form( + neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { + + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + + // update membrane voltage + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +} + +void neuron_model_set_global_neuron_params( + global_neuron_params_pointer_t params) { + use(params); + + // Does Nothing - no params +} + +state_t neuron_model_state_update( + uint16_t num_excitatory_inputs, input_t* exc_input, + uint16_t num_inhibitory_inputs, input_t* inh_input, + input_t external_bias, neuron_pointer_t neuron, input_t B_t) { + + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); + log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); + + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { + REAL total_exc = 0; + REAL total_inh = 0; + + total_exc += exc_input[0]; + total_inh += inh_input[0]; +// for (int i=0; i < num_excitatory_inputs; i++){ +// total_exc += exc_input[i]; +// } +// for (int i=0; i< num_inhibitory_inputs; i++){ +// total_inh += inh_input[i]; +// } + // Get the input in nA + input_t input_this_timestep = + total_exc - total_inh + external_bias + neuron->I_offset; + + _lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep); + } else { + + // countdown refractory timer + neuron->refract_timer -= 1; + } + return neuron->V_membrane; +} + +void neuron_model_has_spiked(neuron_pointer_t neuron) { + + // reset membrane voltage + neuron->V_membrane = neuron->V_reset; + + // reset refractory timer + neuron->refract_timer = neuron->T_refract; +} + +state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { + return neuron->V_membrane; +} + +void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { + log_debug("V membrane = %11.4k mv", neuron->V_membrane); +} + +void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { + io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); + io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); + + io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); + io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); + + io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); + + io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); + io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); + io_printf(IO_BUF, "time_to_spike_ticks = %k \n", + neuron->time_to_spike_ticks); + +// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +} + + + diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h new file mode 100644 index 00000000000..3cd84c3838d --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h @@ -0,0 +1,62 @@ +#ifndef _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ +#define _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ + +#include "neuron_model.h" +#include "random.h" + +///////////////////////////////////////////////////////////// +// definition for LIF neuron parameters +typedef struct neuron_t { + // membrane voltage [mV] + REAL V_membrane; + + // membrane resting voltage [mV] + REAL V_rest; + + // membrane resistance [MOhm] + REAL R_membrane; + + // 'fixed' computation parameter - time constant multiplier for + // closed-form solution + // exp(-(machine time step in ms)/(R * C)) [.] + REAL exp_TC; + + // offset current [nA] + REAL I_offset; + + // countdown to end of next refractory period [timesteps] + int32_t refract_timer; + + // post-spike reset membrane voltage [mV] + REAL V_reset; + + // refractory time of neuron [timesteps] + int32_t T_refract; + + + // Poisson compartment params + REAL mean_isi_ticks; + REAL time_to_spike_ticks; + + int32_t time_since_last_spike; + REAL rate_at_last_setting; + REAL rate_update_threshold; + + +// // Should be in global params +// mars_kiss64_seed_t spike_source_seed; // array of 4 values +//// UFRACT seconds_per_tick; +// REAL ticks_per_second; + +} neuron_t; + +typedef struct global_neuron_params_t { + mars_kiss64_seed_t spike_source_seed; // array of 4 values + REAL ticks_per_second; + REAL readout_V; + REAL prob_command; + REAL rate_on; + REAL rate_off; +} global_neuron_params_t; + +#endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py b/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py new file mode 100644 index 00000000000..daf2f222b33 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py @@ -0,0 +1,42 @@ +from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard +from spynnaker.pyNN.models.defaults import default_initial_values +from spynnaker.pyNN.models.neuron.neuron_models import ( + NeuronModelStoreRecallReadout) +from spynnaker.pyNN.models.neuron.synapse_types import ( + SynapseTypeEPropAdaptive) +from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent +from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeStatic + + +class StoreRecallReadout(AbstractPyNNNeuronModelStandard): + """ Leaky integrate and fire neuron which fires Poisson spikes with rate + set by the neurons membrane potential + """ + + @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", + "isyn_inh2", "mean_isi_ticks", + "time_to_spike_ticks"}) + def __init__( + self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, + v_thresh=100, tau_refrac=0.1, + isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, + tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, + mean_isi_ticks=65000, time_to_spike_ticks=65000, + i_offset=0.0, v=50, rate_update_threshold=0.25, + prob_command=1./6., rate_on=50, rate_off=0): + # pylint: disable=too-many-arguments, too-many-locals + neuron_model = NeuronModelStoreRecallReadout( + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, + mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, + prob_command, rate_on, rate_off) + synapse_type = SynapseTypeEPropAdaptive( + tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) + input_type = InputTypeCurrent() + threshold_type = ThresholdTypeStatic(v_thresh) + + super(StoreRecallReadout, self).__init__( + model_name="store_recall_readout_neuron", + binary="store_recall_readout_neuron.aplx", + neuron_model=neuron_model, input_type=input_type, + synapse_type=synapse_type, threshold_type=threshold_type) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index ca6c796f13e..6da3028db7d 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -18,6 +18,8 @@ from .neuron_model_leaky_integrate_and_fire import ( NeuronModelLeakyIntegrateAndFire) from .neuron_model_eprop import NeuronModelEProp +from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout __all__ = ["AbstractNeuronModel", "NeuronModelIzh", - "NeuronModelLeakyIntegrateAndFire", "NeuronModelEProp"] + "NeuronModelLeakyIntegrateAndFire", "NeuronModelEProp", + "NeuronModelStoreRecallReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py new file mode 100644 index 00000000000..2cf708cca5e --- /dev/null +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py @@ -0,0 +1,321 @@ +import numpy +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from pacman.executor.injection_decorator import inject_items +from .abstract_neuron_model import AbstractNeuronModel + +MICROSECONDS_PER_SECOND = 1000000.0 +MICROSECONDS_PER_MILLISECOND = 1000.0 +V = "v" +V_REST = "v_rest" +TAU_M = "tau_m" +CM = "cm" +I_OFFSET = "i_offset" +V_RESET = "v_reset" +TAU_REFRAC = "tau_refrac" +COUNT_REFRAC = "count_refrac" +MEAN_ISI_TICKS = "mean_isi_ticks" +TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" +SEED1 = "seed1" +SEED2 = "seed2" +SEED3 = "seed3" +SEED4 = "seed4" +TICKS_PER_SECOND = "ticks_per_second" +TIME_SINCE_LAST_SPIKE = "time_since_last_spike" +RATE_AT_LAST_SETTING = "rate_at_last_setting" +RATE_UPDATE_THRESHOLD = "rate_update_threshold" +PROB_COMMAND = "prob_command" +RATE_ON = "rate_on" +RATE_OFF = "rate_off" + +UNITS = { + V: 'mV', + V_REST: 'mV', + TAU_M: 'ms', + CM: 'nF', + I_OFFSET: 'nA', + V_RESET: 'mV', + TAU_REFRAC: 'ms' +} + + +class NeuronModelStoreRecallReadout(AbstractNeuronModel): + __slots__ = [ + "_v_init", + "_v_rest", + "_tau_m", + "_cm", + "_i_offset", + "_v_reset", + "_tau_refrac", + "_mean_isi_ticks", + "_time_to_spike_ticks", + "_time_since_last_spike", + "_rate_at_last_setting", + "_rate_update_threshold", + "_prob_command", + "_rate_on", + "_rate_off" + ] + + def __init__( + self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, + mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, + prob_command, rate_on, rate_off): + + global_data_types=[ + DataType.UINT32, # MARS KISS seed + DataType.UINT32, # MARS KISS seed + DataType.UINT32, # MARS KISS seed + DataType.UINT32, # MARS KISS seed + DataType.S1615, # ticks_per_second + DataType.S1615 # global mem pot + ] + global_data_types.extend([DataType.S1615 for i in range(1024)]) + + super(NeuronModelStoreRecallReadout, self).__init__( + data_types= [ + DataType.S1615, # v + DataType.S1615, # v_rest + DataType.S1615, # r_membrane (= tau_m / cm) + DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.S1615, # i_offset + DataType.INT32, # count_refrac + DataType.S1615, # v_reset + DataType.INT32, # tau_refrac + #### Poisson Compartment Params #### + DataType.S1615, # REAL mean_isi_ticks + DataType.S1615, # REAL time_to_spike_ticks + DataType.INT32, # int32_t time_since_last_spike s + DataType.S1615, # REAL rate_at_last_setting; s + DataType.S1615 # REAL rate_update_threshold; p + ], + + global_data_types=global_data_types + ) + + if v_init is None: + v_init = v_rest + + self._v_init = v_init + self._v_rest = v_rest + self._tau_m = tau_m + self._cm = cm + self._i_offset = i_offset + self._v_reset = v_reset + self._tau_refrac = tau_refrac + self._mean_isi_ticks = mean_isi_ticks + self._time_to_spike_ticks = time_to_spike_ticks + self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation + self._rate_at_last_setting = 0 + self._rate_update_threshold = 2 + self._prob_command = prob_command + self._rate_off = rate_off + self._rate_on = rate_on + + @overrides(AbstractNeuronModel.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # A bit of a guess + return 100 * n_neurons + + @overrides(AbstractNeuronModel.add_parameters) + def add_parameters(self, parameters): + parameters[V_REST] = self._v_rest + parameters[TAU_M] = self._tau_m + parameters[CM] = self._cm + parameters[I_OFFSET] = self._i_offset + parameters[V_RESET] = self._v_reset + parameters[TAU_REFRAC] = self._tau_refrac + parameters[SEED1] = 10065 + parameters[SEED2] = 232 + parameters[SEED3] = 3634 + parameters[SEED4] = 4877 + + parameters[PROB_COMMAND] = 1./6. + parameters[RATE_ON] = 50 + parameters[RATE_OFF] = 0 + + parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() + parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold +# parameters[TARGET_DATA] = self._target_data + + @overrides(AbstractNeuronModel.add_state_variables) + def add_state_variables(self, state_variables): + state_variables[V] = self._v_init + state_variables[COUNT_REFRAC] = 0 + state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks + state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential + state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike + state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting + + + @overrides(AbstractNeuronModel.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractNeuronModel.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + def get_values(self, parameters, state_variables, vertex_slice, ts): + + # Add the rest of the data + return [state_variables[V], + parameters[V_REST], + parameters[TAU_M] / parameters[CM], + parameters[TAU_M].apply_operation( + operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), + parameters[I_OFFSET], state_variables[COUNT_REFRAC], + parameters[V_RESET], + parameters[TAU_REFRAC].apply_operation( + operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + state_variables[MEAN_ISI_TICKS], + state_variables[TIME_TO_SPIKE_TICKS], + state_variables[TIME_SINCE_LAST_SPIKE], + state_variables[RATE_AT_LAST_SETTING], + parameters[RATE_UPDATE_THRESHOLD] + ] + + @overrides(AbstractNeuronModel.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, + _v_reset, _tau_refrac, + mean_isi_ticks, time_to_spike_ticks, time_since_last_spike, + rate_at_last_setting, _rate_update_threshold +# _seed1, _seed2, _seed3, _seed4, _ticks_per_second + ) = values + + # Copy the changed data only + state_variables[V] = v + state_variables[COUNT_REFRAC] = count_refrac + state_variables[MEAN_ISI_TICKS] = mean_isi_ticks + state_variables[TIME_TO_SPIKE_TICKS] = time_to_spike_ticks + state_variables[TIME_SINCE_LAST_SPIKE] = time_since_last_spike + state_variables[RATE_AT_LAST_SETTING] = rate_at_last_setting + + # Global params + @inject_items({"machine_time_step": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_global_values, + additional_arguments={'machine_time_step'}) + def get_global_values(self, machine_time_step): + vals = [ + 1, # seed 1 + 2, # seed 2 + 3, # seed 3 + 4, # seed 4 + MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second + 0.0, # set to 0, as will be set in first timestep of model anyway + ] + +# target_data = [] +# +# for i in range(1024): +# target_data.append( +# # 4 +# 5 + 2 * numpy.sin(2 * i * 2* numpy.pi / 1024) \ +# + 5 * numpy.sin((4 * i * 2* numpy.pi / 1024)) +# ) + vals.extend(self._prob_command) + vals.extend(self._rate_on) + vals.extend(self._rate_off) + return vals + + @property + def prob_command(self): + return self._prob_command + + @prob_command.setter + def prob_command(self, prob_command): + self._prob_command = prob_command + + @property + def rate_on(self): + return self._rate_on + + @rate_on.setter + def rate_on(self, rate_on): + self._rate_on = rate_on + + @property + def rate_off(self): + return self._rate_off + + @rate_on.setter + def rate_on(self, rate_off): + self._rate_off = rate_off + + @property + def v_init(self): + return self._v + + @v_init.setter + def v_init(self, v_init): + self._v = v_init + + @property + def v_rest(self): + return self._v_rest + + @v_rest.setter + def v_rest(self, v_rest): + self._v_rest = v_rest + + @property + def tau_m(self): + return self._tau_m + + @tau_m.setter + def tau_m(self, tau_m): + self._tau_m = tau_m + + @property + def cm(self): + return self._cm + + @cm.setter + def cm(self, cm): + self._cm = cm + + @property + def i_offset(self): + return self._i_offset + + @i_offset.setter + def i_offset(self, i_offset): + self._i_offset = i_offset + + @property + def v_reset(self): + return self._v_reset + + @v_reset.setter + def v_reset(self, v_reset): + self._v_reset = v_reset + + @property + def tau_refrac(self): + return self._tau_refrac + + @tau_refrac.setter + def tau_refrac(self, tau_refrac): + self._tau_refrac = tau_refrac + + @property + def mean_isi_ticks(self): + return self._mean_isi_ticks + + @mean_isi_ticks.setter + def mean_isi_ticks(self, new_mean_isi_ticks): + self._mean_isi_ticks = new_mean_isi_ticks + + @property + def time_to_spike_ticks(self): + return self._time_to_spike_ticks + + @mean_isi_ticks.setter + def time_to_spike_ticks(self, new_time_to_spike_ticks): + self._time_to_spike_ticks = new_time_to_spike_ticks \ No newline at end of file From a7ee9d65506e635af70f3cc6cbb6327704d93c2f Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 3 Oct 2019 18:41:28 +0100 Subject: [PATCH 009/123] most of the structure of the store recall task is in place just waiting for poisson sources to be update-able from this code, all poisson stuff has been removed, neuron index 1 and 2 has V stored as diff variables, mean and entropy of values comes from average V over the recall period --- .../implementations/store_recall_readout.h | 265 ++++++------------ .../neuron_model_store_recall_readout_impl.h | 6 +- .../neuron_model_store_recall_readout.py | 6 +- 3 files changed, 86 insertions(+), 191 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/store_recall_readout.h b/neural_modelling/src/neuron/implementations/store_recall_readout.h index 6855d83fbb0..e8e51ead28c 100644 --- a/neural_modelling/src/neuron/implementations/store_recall_readout.h +++ b/neural_modelling/src/neuron/implementations/store_recall_readout.h @@ -16,6 +16,7 @@ #include #include #include +#include "random.h" #define V_RECORDING_INDEX 0 #define GSYN_EXCITATORY_RECORDING_INDEX 1 @@ -56,11 +57,10 @@ static uint32_t timer = 0; static uint32_t target_ind = 0; // Store recall parameters -uint32_t store_recall_state = 0; //0: idle, 1: storing, 2:stored, 3:recall +uint32_t store_recall_state = 0; // 0: idle, 1: storing, 2:stored, 3:recall uint32_t stored_value = 0; -accum softmax_0 = 0; -accum softmax_1 = 0; -accum cross_entropy = 0; +uint32_t broacast_value = 0; +REAL ticks_for_mean = 0; static bool neuron_impl_initialise(uint32_t n_neurons) { @@ -126,6 +126,9 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } } + // Seed the random input + validate_mars_kiss64_seed(kiss_seed); + // Initialise pointers to Neuron parameters in STDP code synapse_dynamics_set_neuron_array(neuron_array); log_info("set pointer to neuron array in stdp code"); @@ -209,118 +212,34 @@ static void neuron_impl_load_neuron_parameters( #endif // LOG_LEVEL >= LOG_DEBUG } - -// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& - -// Poisson Spike Source Functions - -static inline REAL slow_spike_source_get_time_to_spike( - REAL mean_inter_spike_interval_in_ticks, neuron_pointer_t neuron) { - return exponential_dist_variate( - mars_kiss64_seed, -// neuron->spike_source_seed - global_parameters->spike_source_seed - ) - * mean_inter_spike_interval_in_ticks; -} - - - -void set_spike_source_rate(neuron_pointer_t neuron, REAL rate, - threshold_type_pointer_t threshold_type) { - - // clip rate to ensure divde by 0 and overflow don't occur - if (rate < 0.25){ - rate = 0.25; - } else if (rate > threshold_type->threshold_value) { - rate = threshold_type->threshold_value; - } - - REAL rate_diff = neuron->rate_at_last_setting - rate; - - // ensure rate_diff is absolute - if REAL_COMPARE(rate_diff, <, REAL_CONST(0.0)) { - rate_diff = -rate_diff; - } - - // Has rate changed by more than a predefined threshold since it was last - // used to update the mean isi ticks? - if ((rate_diff) > neuron->rate_update_threshold){ - // then update the rate - neuron->rate_at_last_setting = rate; - - // Update isi ticks based on new rate - neuron->mean_isi_ticks = - // rate * - //// global_parameters->ticks_per_second; // shouldn't this be ticks_per_second/rate? - // neuron->ticks_per_second ; // shouldn't this be ticks_per_second/rate? - (global_parameters->ticks_per_second / rate); // shouldn't this be ticks_per_second/rate? - - // Account for time that's already passed since previous spike - neuron->time_to_spike_ticks = neuron->mean_isi_ticks - - neuron->time_since_last_spike; - } // else stick with existing rate and isi ticks - they're within threshold -} - - -bool timer_update_determine_poisson_spiked(neuron_pointer_t neuron) { - // NOTE: ALL SOURCES TREATED AS SLOW SOURCES!!! - // NOTE: NO SOURCE CAN SPIKE MORE THAN ONCE PER TIMESTEP - // If this spike source should spike now - - bool has_spiked = false; - - // Advance by one timestep - // Subtract tick - neuron->time_to_spike_ticks -= REAL_CONST(1.0); - - // Add tick to time since last spike (to enable for dynamic rate change) - neuron->time_since_last_spike += 1.0k; - -// io_printf(IO_BUF, " Time to next spike: %k\n", -// neuron->time_to_spike_ticks); - - if (REAL_COMPARE( - neuron->time_to_spike_ticks, <=, - REAL_CONST(0.0))) { - - // Update time to spike - next_spike_time = slow_spike_source_get_time_to_spike( - neuron->mean_isi_ticks, neuron); - - neuron->time_to_spike_ticks += next_spike_time; - - // Set time since last spike to zero, so we start counting from here - neuron->time_since_last_spike = 0; - - has_spiked = true; - } - - return has_spiked; -} - -// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& - - static bool neuron_impl_do_timestep_update(index_t neuron_index, input_t external_bias, state_t *recorded_variable_values) { // Get the neuron itself neuron_pointer_t neuron = &neuron_array[neuron_index]; - bool spike = false; - - target_ind = timer & 0x3ff; // repeats on a cycle of 1024 entries in array + // Change broadcasted value and state with probability + // State - 0: idle, 1: storing, 2:stored-idle, 3:recall if (timer % 200 == 0){ - if (rand() < global_parameters->prob_command){ + if (store_recall_state == 3 || store_recall_state == 1){ store_recall_state = (store_recall_state + 1) % 4; } + else{ + REAL random_number = (REAL)(mars_kiss64_seed(global_parameters->spike_source_seed) / (REAL)0xffffffff); + if (random_number < global_parameters->prob_command){ + store_recall_state = (store_recall_state + 1) % 4; + } + } + REAL switch_value = (REAL)(mars_kiss64_seed(global_parameters->spike_source_seed) / (REAL)0xffffffff); + if (switch_value < 0.5){ + broacast_value = (broacast_value + 1) % 2; + } + if (store_recall_state == 1){ + stored_value = broacast_value; + } + // send packets to the variable poissons with the updated states } -// io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); -// io_printf(IO_BUF, "Target: %k\n\n", -// global_parameters->target_V[target_ind]); - // Get the input_type parameters and voltage for this neuron input_type_pointer_t input_type = &input_type_array[neuron_index]; @@ -361,20 +280,6 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - // Softmax of the exc and inh inputs representing 1 and 0 respectively - // may need to keep a running total of choice over the recall command - accum exp_0 = exp(total_inh); - accum exp_1 = exp(total_exc); - softmax_0 = exp_0 / (exp_1 + exp_0); - softmax_1 = exp_1 / (exp_1 + exp_0); - // What to do if log(0) - if (stored_value){ - cross_entropy = -log(softmax_1); - } - else{ - cross_entropy = -log(softmax_0); - } - // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( exc_input_values, input_type, voltage); @@ -384,6 +289,32 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); + // If during recall calculate error + if (neuron_index == 2 && store_recall_state == 3){ + ticks_for_mean += 1; + // Softmax of the exc and inh inputs representing 1 and 0 respectively + // may need to scale to stop huge numbers going in the exp + global_parameters->mean_0 += global_parameters->readout_V_0; + global_parameters->mean_1 += global_parameters->readout_V_1; + accum exp_0 = exp(global_parameters->mean_0 / ticks_for_mean); + accum exp_1 = exp(global_parameters->mean_1 / ticks_for_mean); + accum softmax_0 = exp_0 / (exp_1 + exp_0); + accum softmax_1 = exp_1 / (exp_1 + exp_0); + // What to do if log(0)? + if (stored_value){ + global_parameters->cross_entropy = -log(softmax_1); + } + else{ + global_parameters->cross_entropy = -log(softmax_0); + } + } + // Reset values after recall + if (store_recall_state == 0){ + ticks_for_mean = 0; + global_parameters->mean_0 == 0; + global_parameters->mean_1 == 0; + } + if (neuron_index == 0){ recorded_variable_values[V_RECORDING_INDEX] = voltage; // update neuron parameters @@ -391,73 +322,37 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, external_bias, neuron, -50k); -// io_printf(IO_BUF, "Readout membrane pot: %k\n", voltage); - // determine if a spike should occur - // bool spike = threshold_type_is_above_threshold(result, threshold_type); - // Finally, set global membrane potential to updated value - global_parameters->readout_V = result; - - } else if (neuron_index == 1) { // this is the excitatory error source - - recorded_variable_values[V_RECORDING_INDEX] = - global_parameters->target_V[target_ind]; - - // Update Poisson neuron rate based on updated V - REAL rate = (global_parameters->target_V[target_ind] - - global_parameters->readout_V); // calc difference to -// io_printf(IO_BUF, "New Rate: %k", rate); -// rate = rate * 10; - rate = rate * 5.0k; - if (rate > 0) { // readout is below target, so set rate = diff. - // This will cause potentiation of excitatory synapses, - // and depression of inhibitory synapses - set_spike_source_rate(neuron, rate, - threshold_type); - } else { // readout is above target, so set rate = zero - set_spike_source_rate(neuron, 0, - threshold_type); - } + global_parameters->readout_V_0 = result; - // judge whether poisson neuron should have fired - spike = timer_update_determine_poisson_spiked(neuron); - - } else if (neuron_index == 2){ - // Update Poisson neuron rate based on updated V - REAL rate = (global_parameters->target_V[target_ind] - - global_parameters->readout_V); // calc difference to -// io_printf(IO_BUF, "New Rate: %k", rate); - - recorded_variable_values[V_RECORDING_INDEX] = rate; -// rate = rate * 10; - - rate = rate * 5.0k; - if (rate < 0) { - // readout is above target, send spikes from inhibitory neuron with rate = -diff: - // this will depress excitatory synapses, and potenitate inhibitory synapses - set_spike_source_rate(neuron, -rate, - threshold_type); - } else { // readout is below target, so set rate = 0; - set_spike_source_rate(neuron, 0, - threshold_type); - } + } else if (neuron_index == 1){ + recorded_variable_values[V_RECORDING_INDEX] = voltage; + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, -50k); - // judge whether poisson neuron should have fired - spike = timer_update_determine_poisson_spiked(neuron); + // Finally, set global membrane potential to updated value + global_parameters->readout_V_1 = result; + //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// + // maybe sign of the error isn't important anymore? // + //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// + } else if (neuron_index == 2){ // this is the excitatory error source + + recorded_variable_values[V_RECORDING_INDEX] = stored_value; + // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not + if (global_parameters->cross_entropy > -0.7){ + // it's correct so keep doing what you're doing or boost synapses? + } + + } else if (neuron_index == 3){ // this is the inhibitory error source + + // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not + if (global_parameters->cross_entropy < -0.7){ + // it's incorrect so change doing what you're doing or suppress synapses? + } timer++; // update this here, as needs to be done once per iteration over all the neurons - - } - - - - // If spike occurs, communicate to relevant parts of model - if (spike) { - // Call relevant model-based functions - // Tell the neuron model -// neuron_model_has_spiked(neuron); - - // Tell the additional input - additional_input_has_spiked(additional_input); } // Shape the existing input according to the included rule @@ -468,13 +363,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, #endif // LOG_LEVEL >= LOG_DEBUG // Return the boolean to the model timestep update - return spike; + return false; } - - - - //! \brief stores neuron parameter back into sdram //! \param[in] address: the address in sdram to start the store static void neuron_impl_store_neuron_parameters( diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h index 3cd84c3838d..1ed478e9cb4 100644 --- a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h @@ -53,10 +53,14 @@ typedef struct neuron_t { typedef struct global_neuron_params_t { mars_kiss64_seed_t spike_source_seed; // array of 4 values REAL ticks_per_second; - REAL readout_V; + REAL readout_V_0; + REAL readout_V_1; REAL prob_command; REAL rate_on; REAL rate_off; + REAL mean_0; + REAL mean_1; + REAL cross_entropy; } global_neuron_params_t; #endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py index 2cf708cca5e..0edcf85805c 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py @@ -131,9 +131,9 @@ def add_parameters(self, parameters): parameters[SEED3] = 3634 parameters[SEED4] = 4877 - parameters[PROB_COMMAND] = 1./6. - parameters[RATE_ON] = 50 - parameters[RATE_OFF] = 0 + parameters[PROB_COMMAND] = self._prob_command + parameters[RATE_ON] = self._rate_on + parameters[RATE_OFF] = self._rate_off parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold From 6dd206ee90aa092fd5f4da7ce0c7b8eeb9a74bba Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 4 Oct 2019 17:02:01 +0100 Subject: [PATCH 010/123] no longer broadcasting error with inhib and excite but as a packet --- .../src/neuron/implementations/store_recall_readout.h | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/store_recall_readout.h b/neural_modelling/src/neuron/implementations/store_recall_readout.h index e8e51ead28c..e6252b28907 100644 --- a/neural_modelling/src/neuron/implementations/store_recall_readout.h +++ b/neural_modelling/src/neuron/implementations/store_recall_readout.h @@ -338,15 +338,12 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// // maybe sign of the error isn't important anymore? // //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// - } else if (neuron_index == 2){ // this is the excitatory error source + } else if (neuron_index == 2){ // this is the error source recorded_variable_values[V_RECORDING_INDEX] = stored_value; - // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not - if (global_parameters->cross_entropy > -0.7){ - // it's correct so keep doing what you're doing or boost synapses? - } + // Switched to always broadcasting error but with packet - } else if (neuron_index == 3){ // this is the inhibitory error source + } else if (neuron_index == 3){ // this is the deprecated // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not if (global_parameters->cross_entropy < -0.7){ From c73a232018bf4d2e24b5f55080444a6ed75abde5 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 7 Oct 2019 16:30:33 +0100 Subject: [PATCH 011/123] c code compiles, packets transmitted to poisson spike source, includes are corrected, init corrected to include store_recall and eprop properly, test script adjusted to creat edge for poisson updating, still not tested as waiting for erbp plasticity components --- .../neuron/store_recall_readout/Makefile | 16 ++++ ...t.h => neuron_impl_store_recall_readout.h} | 78 +++++++++++++------ .../neuron_model_store_recall_readout_impl.c | 2 +- .../neuron_model_store_recall_readout_impl.h | 4 +- .../neuron/abstract_population_vertex.py | 20 ++++- .../pyNN/models/neuron/builds/__init__.py | 4 +- .../neuron/builds/store_recall_readout.py | 4 +- .../neuron_model_store_recall_readout.py | 32 +++++++- 8 files changed, 127 insertions(+), 33 deletions(-) create mode 100644 neural_modelling/makefiles/neuron/store_recall_readout/Makefile rename neural_modelling/src/neuron/implementations/{store_recall_readout.h => neuron_impl_store_recall_readout.h} (85%) diff --git a/neural_modelling/makefiles/neuron/store_recall_readout/Makefile b/neural_modelling/makefiles/neuron/store_recall_readout/Makefile new file mode 100644 index 00000000000..bf46778f57e --- /dev/null +++ b/neural_modelling/makefiles/neuron/store_recall_readout/Makefile @@ -0,0 +1,16 @@ +APP = $(notdir $(CURDIR)) + +OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.c +#NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.c +#NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.h +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_store_recall_readout.h +#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c + +#TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.c +#TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.h +#WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.c +#WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.h + + +include ../neural_build.mk \ No newline at end of file diff --git a/neural_modelling/src/neuron/implementations/store_recall_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h similarity index 85% rename from neural_modelling/src/neuron/implementations/store_recall_readout.h rename to neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h index e6252b28907..41e7b24c333 100644 --- a/neural_modelling/src/neuron/implementations/store_recall_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h @@ -4,19 +4,20 @@ #include "neuron_impl.h" // Includes for model parts used in this implementation -#include -#include +#include +#include #include #include #include // Further includes #include -#include +#include #include #include #include -#include "random.h" +//#include "random.h" +#include #define V_RECORDING_INDEX 0 #define GSYN_EXCITATORY_RECORDING_INDEX 1 @@ -57,7 +58,16 @@ static uint32_t timer = 0; static uint32_t target_ind = 0; // Store recall parameters -uint32_t store_recall_state = 0; // 0: idle, 1: storing, 2:stored, 3:recall +typedef enum +{ + STATE_IDLE, + STATE_STORING, + STATE_STORED, + STATE_RECALL, + STATE_SHIFT, +} current_state_t; + +uint32_t store_recall_state = STATE_IDLE; // 0: idle, 1: storing, 2:stored, 3:recall uint32_t stored_value = 0; uint32_t broacast_value = 0; REAL ticks_for_mean = 0; @@ -127,10 +137,10 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Seed the random input - validate_mars_kiss64_seed(kiss_seed); + validate_mars_kiss64_seed(global_parameters->kiss_seed); // Initialise pointers to Neuron parameters in STDP code - synapse_dynamics_set_neuron_array(neuron_array); +// synapse_dynamics_set_neuron_array(neuron_array); log_info("set pointer to neuron array in stdp code"); return true; @@ -183,14 +193,18 @@ static void neuron_impl_load_neuron_parameters( neuron_model_set_global_neuron_params(global_parameters); io_printf(IO_BUF, "\nPrinting global params\n"); - io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); - io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); - io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); - io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); + io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); + io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); + io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); + io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); + io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); + io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); + io_printf(IO_BUF, "poisson key: %k \n\n", global_parameters->p_key); + io_printf(IO_BUF, "poisson pop size: %k \n\n", global_parameters->p_pop_size); for (index_t n = 0; n < n_neurons; n++) { @@ -221,23 +235,38 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Change broadcasted value and state with probability // State - 0: idle, 1: storing, 2:stored-idle, 3:recall if (timer % 200 == 0){ - if (store_recall_state == 3 || store_recall_state == 1){ - store_recall_state = (store_recall_state + 1) % 4; + if (store_recall_state == STATE_RECALL || store_recall_state == STATE_STORING){ + store_recall_state = (store_recall_state + 1) % STATE_SHIFT; } else{ - REAL random_number = (REAL)(mars_kiss64_seed(global_parameters->spike_source_seed) / (REAL)0xffffffff); + REAL random_number = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); if (random_number < global_parameters->prob_command){ - store_recall_state = (store_recall_state + 1) % 4; + store_recall_state = (store_recall_state + 1) % STATE_SHIFT; } } - REAL switch_value = (REAL)(mars_kiss64_seed(global_parameters->spike_source_seed) / (REAL)0xffffffff); + REAL switch_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); if (switch_value < 0.5){ broacast_value = (broacast_value + 1) % 2; } - if (store_recall_state == 1){ + if (store_recall_state == STATE_STORING){ stored_value = broacast_value; } // send packets to the variable poissons with the updated states + for (int i = 0; i < 4; i++){ + REAL payload = 10; + if ((broacast_value == i && i < 2) || + (i == 2 && store_recall_state == STATE_STORING) || + (i == 3 && store_recall_state == STATE_RECALL)){ + payload = global_parameters->rate_on; + } + else { + payload = global_parameters->rate_off; + } + for (int j = i*global_parameters->p_pop_size; + j < i*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + } + } } // Get the input_type parameters and voltage for this neuron @@ -290,26 +319,26 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, additional_input, voltage); // If during recall calculate error - if (neuron_index == 2 && store_recall_state == 3){ + if (neuron_index == 2 && store_recall_state == STATE_RECALL){ ticks_for_mean += 1; // Softmax of the exc and inh inputs representing 1 and 0 respectively // may need to scale to stop huge numbers going in the exp global_parameters->mean_0 += global_parameters->readout_V_0; global_parameters->mean_1 += global_parameters->readout_V_1; - accum exp_0 = exp(global_parameters->mean_0 / ticks_for_mean); - accum exp_1 = exp(global_parameters->mean_1 / ticks_for_mean); + accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); + accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); accum softmax_0 = exp_0 / (exp_1 + exp_0); accum softmax_1 = exp_1 / (exp_1 + exp_0); // What to do if log(0)? if (stored_value){ - global_parameters->cross_entropy = -log(softmax_1); + global_parameters->cross_entropy = -logk(softmax_1); } else{ - global_parameters->cross_entropy = -log(softmax_0); + global_parameters->cross_entropy = -logk(softmax_0); } } // Reset values after recall - if (store_recall_state == 0){ + if (store_recall_state == STATE_IDLE){ ticks_for_mean = 0; global_parameters->mean_0 == 0; global_parameters->mean_1 == 0; @@ -342,6 +371,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[V_RECORDING_INDEX] = stored_value; // Switched to always broadcasting error but with packet + if (store_recall_state == STATE_RECALL){ + // Broadcast error + } } else if (neuron_index == 3){ // this is the deprecated diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c index 0c86a0f677a..a3c424bd4cb 100644 --- a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c @@ -1,4 +1,4 @@ -#include "neuron_model_lif_poisson_readout_impl.h" +#include "neuron_model_store_recall_readout_impl.h" #include diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h index 1ed478e9cb4..08c1e8ee1f6 100644 --- a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h @@ -51,7 +51,7 @@ typedef struct neuron_t { } neuron_t; typedef struct global_neuron_params_t { - mars_kiss64_seed_t spike_source_seed; // array of 4 values + mars_kiss64_seed_t kiss_seed; // array of 4 values REAL ticks_per_second; REAL readout_V_0; REAL readout_V_1; @@ -61,6 +61,8 @@ typedef struct global_neuron_params_t { REAL mean_0; REAL mean_1; REAL cross_entropy; + uint32_t p_key; + REAL p_pop_size; } global_neuron_params_t; #endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index c3877e385f5..c84f7b1243c 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -27,7 +27,7 @@ AbstractChangableAfterRun, AbstractProvidesIncomingPartitionConstraints, AbstractProvidesOutgoingPartitionConstraints, AbstractHasAssociatedBinary, AbstractGeneratesDataSpecification, AbstractRewritesDataSpecification, - AbstractCanReset) + AbstractCanReset, AbstractProvidesNKeysForPartition) from spinn_front_end_common.abstract_models.impl import ( ProvidesKeyToAtomMappingImpl) from spinn_front_end_common.utilities import ( @@ -77,7 +77,7 @@ class AbstractPopulationVertex( AbstractChangableAfterRun, AbstractRewritesDataSpecification, AbstractReadParametersBeforeSet, AbstractAcceptsIncomingSynapses, ProvidesKeyToAtomMappingImpl, - AbstractCanReset): + AbstractCanReset, AbstractProvidesNKeysForPartition): """ Underlying vertex model for Neural Populations. """ __slots__ = [ @@ -499,6 +499,12 @@ def generate_data_specification( key = routing_info.get_first_key_from_pre_vertex( vertex, constants.SPIKE_PARTITION_ID) + # Get the poisson key + p_key = routing_info.get_first_key_from_pre_vertex( + vertex, constants.LIVE_POISSON_CONTROL_PARTITION_ID) + if hasattr(self.__neuron_impl, "set_poisson_key"): + self.__neuron_impl.set_poisson_key(p_key) + # Write the setup region spec.switch_write_focus( constants.POPULATION_BASED_REGIONS.SYSTEM.value) @@ -882,3 +888,13 @@ def reset_to_first_timestep(self): if self.__synapse_manager.synapse_dynamics.changes_during_run: self.__change_requires_data_generation = True self.__change_requires_neuron_parameters_reload = False + + def get_n_keys_for_partition(self, partition, graph_mapper): + if partition.identifier == constants.LIVE_POISSON_CONTROL_PARTITION_ID: + n_keys = 0 + for edge in partition.edges: + slice = graph_mapper.get_slice(edge.post_vertex) + n_keys += slice.n_atoms + return n_keys + else: + return self.n_atoms diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index 6e8004ef8ad..b799e0f62d6 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -28,9 +28,11 @@ from .if_curr_exp_ca2_adaptive import IFCurrExpCa2Adaptive from .if_curr_exp_semd_base import IFCurrExpSEMDBase from .eprop_adaptive import EPropAdaptive +from .store_recall_readout import StoreRecallReadout __all__ = ["EIFConductanceAlphaPopulation", "HHCondExp", "IFCondAlpha", "IFCondExpBase", "IFCurrAlpha", "IFCurrDualExpBase", "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", - "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", "EPropAdaptive"] + "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", + "EPropAdaptive", "StoreRecallReadout"] diff --git a/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py b/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py index daf2f222b33..9861dc433b4 100644 --- a/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py @@ -23,12 +23,12 @@ def __init__( tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, mean_isi_ticks=65000, time_to_spike_ticks=65000, i_offset=0.0, v=50, rate_update_threshold=0.25, - prob_command=1./6., rate_on=50, rate_off=0): + prob_command=1./6., rate_on=50, rate_off=0, poisson_pop_size=25): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelStoreRecallReadout( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, - prob_command, rate_on, rate_off) + prob_command, rate_on, rate_off, poisson_pop_size) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py index 0edcf85805c..c0cd3d8cc6b 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py @@ -55,13 +55,18 @@ class NeuronModelStoreRecallReadout(AbstractNeuronModel): "_rate_update_threshold", "_prob_command", "_rate_on", - "_rate_off" + "_rate_off", + "_mean_0", + "_mean_1", + "_cross_entropy", + "_poisson_key", + "_poisson_pop_size" ] def __init__( self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, - prob_command, rate_on, rate_off): + prob_command, rate_on, rate_off, poisson_pop_size): global_data_types=[ DataType.UINT32, # MARS KISS seed @@ -69,7 +74,14 @@ def __init__( DataType.UINT32, # MARS KISS seed DataType.UINT32, # MARS KISS seed DataType.S1615, # ticks_per_second - DataType.S1615 # global mem pot + DataType.S1615, # global mem pot + DataType.S1615, # global mem pot 2 + DataType.S1615, # rate on + DataType.S1615, # rate off + DataType.S1615, # mean 0 activation + DataType.S1615, # mean 0 activation + DataType.S1615, # cross entropy + DataType.UINT32 # poisson key ] global_data_types.extend([DataType.S1615 for i in range(1024)]) @@ -112,6 +124,14 @@ def __init__( self._prob_command = prob_command self._rate_off = rate_off self._rate_on = rate_on + self._mean_0 = 0.0 + self._mean_1 = 0.0 + self._cross_entropy = 0.0 + self._poisson_key = None + self._poisson_pop_size = poisson_pop_size + + def set_poisson_key(self, p_key): + self._poisson_key = p_key @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -209,6 +229,7 @@ def get_global_values(self, machine_time_step): 4, # seed 4 MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second 0.0, # set to 0, as will be set in first timestep of model anyway + 0.0, # set to 0, as will be set in first timestep of model anyway ] # target_data = [] @@ -222,6 +243,11 @@ def get_global_values(self, machine_time_step): vals.extend(self._prob_command) vals.extend(self._rate_on) vals.extend(self._rate_off) + vals.extend(self._mean_0) + vals.extend(self._mean_1) + vals.extend(self._cross_entropy) + vals.extend(self._poisson_key) + vals.extend(self._poisson_pop_size) return vals @property From 532a6da160f0855fdeed4547acf978b8f3a8ba7b Mon Sep 17 00:00:00 2001 From: mbassor2 Date: Tue, 8 Oct 2019 13:58:40 +0100 Subject: [PATCH 012/123] add signed weights functionality --- .../implementations/neuron_impl_eprop_adaptive.h | 4 +++- neural_modelling/src/neuron/synapse_row.h | 8 ++++++++ neural_modelling/src/neuron/synapses.c | 16 +++++++++------- .../connectors/abstract_connector.py | 14 +++++++------- .../synapse_dynamics/synapse_dynamics_static.py | 4 ++-- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 6 files changed, 30 insertions(+), 18 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index f2b4b5f3443..19fdc5b212e 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -314,7 +314,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, global_parameters->core_pop_rate; // Record B - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = B_t; // threshold_type->B; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + //B_t; // threshold_type->B; + total_exc; // total synaptic input from input layer // ********************************************************* diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index 3f7ac4c103d..afa9e754db2 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -58,10 +58,15 @@ //! the mask for the synapse delay in the row #define SYNAPSE_DELAY_MASK ((1 << SYNAPSE_DELAY_BITS) - 1) +#define SYNAPSE_WEIGHTS_SIGNED true + + + // Define the type of the weights #ifdef SYNAPSE_WEIGHTS_SIGNED typedef __int_t(SYNAPSE_WEIGHT_BITS) weight_t; #else +io_printf(IO_BUF, "Using signed weights!! \n "); typedef __uint_t(SYNAPSE_WEIGHT_BITS) weight_t; #endif typedef uint16_t control_t; @@ -69,6 +74,9 @@ typedef uint16_t control_t; #define N_SYNAPSE_ROW_HEADER_WORDS 3 + + + // The data structure layout supported by this API is designed for // mixed plastic and fixed synapse rows. // diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index dc3c5e4ec7d..7372ecaba94 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -178,7 +178,9 @@ static inline void process_fixed_synapses( synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); uint32_t combined_synapse_neuron_index = synapse_row_sparse_type_index( synaptic_word, synapse_type_index_mask); - uint32_t weight = synapse_row_sparse_weight(synaptic_word); + int32_t weight = synapse_row_sparse_weight(synaptic_word); + + io_printf(IO_BUF, "signed w: %d \n", weight); // Convert into ring buffer offset uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( @@ -186,17 +188,17 @@ static inline void process_fixed_synapses( synapse_type_index_bits); // Add weight to current ring buffer value - uint32_t accumulation = ring_buffers[ring_buffer_index] + weight; + int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) // **NOTE** 0x10000 can be expressed as an ARM literal, // but 0xFFFF cannot. Therefore, we use (0x10000 - 1) // to obtain this value - uint32_t sat_test = accumulation & 0x10000; - if (sat_test) { - accumulation = sat_test - 1; - saturation_count++; - } +// uint32_t sat_test = accumulation & 0x10000; +// if (sat_test) { +// accumulation = sat_test - 1; +// saturation_count++; +// } // Store saturated value back in ring-buffer ring_buffers[ring_buffer_index] = accumulation; diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 8f1bf726f6f..f31079c6fce 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -313,13 +313,13 @@ def _generate_weights(self, values, n_connections, connection_slices, if not weights.size: logger_utils.warn_once(logger, "No connection in " + str(self)) - elif numpy.amin(weights) < 0 < numpy.amax(weights): - raise Exception( - "Weights must be either all positive or all negative" - " in projection {}->{}".format( - self.__pre_population.label, - self.__post_population.label)) - return numpy.abs(weights) +# elif numpy.amin(weights) < 0 < numpy.amax(weights): +# raise Exception( +# "Weights must be either all positive or all negative" +# " in projection {}->{}".format( +# self.__pre_population.label, +# self.__post_population.label)) + return weights # numpy.abs(weights) def _clip_delays(self, delays): """ Clip delay values, keeping track of how many have been clipped. diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index cada65d96ad..efac76e4e4d 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -77,8 +77,8 @@ def get_static_synaptic_data( n_synapse_type_bits = get_n_bits(n_synapse_types) fixed_fixed = ( - ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & - 0xFFFF) << 16) | + ((numpy.rint(connections["weight"]).astype("uint16") & + 0xFFFF).astype("uint32") << 16) | ((connections["delay"].astype("uint32") & 0xF) << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype( diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 835c95d48c5..f3eb28ebac4 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -438,7 +438,7 @@ def _get_ring_buffer_to_input_left_shifts( delay_running_totals = [RunningStats() for _ in range(n_synapse_types)] total_weights = numpy.zeros(n_synapse_types) biggest_weight = numpy.zeros(n_synapse_types) - weights_signed = False + weights_signed = True rate_stats = [RunningStats() for _ in range(n_synapse_types)] steps_per_second = 1000000.0 / machine_timestep From 1ae007eb4aa6b19303d508be3f035b6b6c9a9ee4 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Tue, 8 Oct 2019 16:53:56 +0100 Subject: [PATCH 013/123] Update to allow correct reading of weights (static) --- .../models/neuron/synapse_dynamics/synapse_dynamics_static.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index efac76e4e4d..74dc5c83480 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -134,7 +134,7 @@ def read_static_synaptic_data( [numpy.repeat(i, ff_size[i]) for i in range(len(ff_size))]) connections["target"] = ( (data & neuron_id_mask) + post_vertex_slice.lo_atom) - connections["weight"] = (data >> 16) & 0xFFFF + connections["weight"] = ((data >> 16) & 0xFFFF).astype("int16") connections["delay"] = (data >> (n_neuron_id_bits + n_synapse_type_bits)) & 0xF connections["delay"][connections["delay"] == 0] = 16 From f8a2309947daf2a5eb38e8f588e8993a1c5cc8aa Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Fri, 18 Oct 2019 15:59:49 +0100 Subject: [PATCH 014/123] Add learning signal param and skeleton synapse state updates --- neural_modelling/makefiles/neuron/Makefile | 7 ++-- .../neuron_impl_eprop_adaptive.h | 11 ++--- .../models/neuron_model_eprop_adaptive_impl.c | 40 +++++++++++++++++++ .../models/neuron_model_eprop_adaptive_impl.h | 2 + .../models/neuron/builds/eprop_adaptive.py | 20 ++++++---- .../neuron_models/neuron_model_eprop.py | 22 ++++++---- 6 files changed, 80 insertions(+), 22 deletions(-) diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index 7fd23215667..090fa3385be 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -13,7 +13,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -MODELS = IF_curr_exp \ +MODELS = eprop_adaptive \ + # IF_curr_exp \ IF_cond_exp \ IZK_curr_exp \ IZK_cond_exp \ @@ -33,8 +34,8 @@ MODELS = IF_curr_exp \ IF_curr_exp_stdp_mad_nearest_pair_multiplicative \ IF_curr_exp_stdp_mad_pfister_triplet_additive \ IF_cond_exp_stdp_mad_nearest_pair_additive \ - IF_curr_alpha \ - IF_curr_alpha_stdp_mad_pair_additive \ + IF_curr_alpha \ + IF_curr_alpha_stdp_mad_pair_additive \ IF_cond_exp_structural \ IF_curr_exp_stdp_mad_pair_additive_structural \ IF_curr_exp_structural \ diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 19fdc5b212e..5c3ade3c69d 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -308,15 +308,16 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[V_RECORDING_INDEX] = voltage; // result; // Record Z -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = z_t; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = z_t; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - global_parameters->core_pop_rate; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +// global_parameters->core_pop_rate; // Record B recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - //B_t; // threshold_type->B; - total_exc; // total synaptic input from input layer + B_t; // threshold_type->B; +// global_parameters->core_target_rate; +// total_exc; // total synaptic input from input layer // ********************************************************* diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 6931f298535..2a3e1f42c5c 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -72,6 +72,46 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } + + // ****************************************************************** + // Update Psi (pseudo-derivative) (done once for each postsynaptic neuron) + // ****************************************************************** + // REAL temp1 = (neuron->V_membrane - v_threshold_baseline) * (1/v_thresh) + // REAL temp2 = ((1/v_th) * 0.3 * 1-(abs(temp1)) + // neuron->psi = (temp2 > 0)? temp2 , 0; + + // All operations now need doing once per eprop synapse +// for (int syn=0; syn < total_synapses_per_neuron; syn++){ + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** +// neuron->syn_state[syn_ind].ep_a; = neuron->psi * neuron->syn_state[syn_ind].z_bar + +// (global_params->rho - neuron->psi * global_params->beta) * +// neuron->syn_state[syn_ind].ep_a; + + + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** +// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +// global_params->beta * neuron->syn_state[syn_ind].ep_a); +// neuron->syn_state[syn_ind].e_bar = "low pass filtered temp_elig_trace" + + + // ****************************************************************** + // Update total weight change + // ****************************************************************** +// uint16_t this_dt_weight_change = -global_params->eta * neuron->learning_sig * neuron->syn_state[syn_ind].e_bar; +// neuron->syn_state[syn_ind].delta_w +=this_dt_weight_change; + +// } + + + + + + + return neuron->V_membrane; } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index f6fb69d52a1..e9d306ab883 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -69,6 +69,8 @@ typedef struct neuron_t { // pseudo derivative REAL psi; + REAL L; // learning signal + // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 83dbd1fe98c..764082b8847 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -26,14 +26,17 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ Adaptive threshold neuron with eprop support """ - @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2", "psi", "target_rate", "tau_err", - "B", "small_b"}) + @default_initial_values({"v", + "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", + "psi", "target_rate", "tau_err", + "B", "small_b", + "l" + }) def __init__( self, # neuron model params - tau_m=20.0, cm=1.0, v_rest=-65.0, v_reset=-65.0, - tau_refrac=5, i_offset=0.0, v=-65.0, psi=0.0, + tau_m=20.0, cm=1.0, v_rest=0, v_reset=0, + tau_refrac=5, i_offset=0.0, v=0.0, psi=0.0, #synapse type params tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, @@ -43,13 +46,16 @@ def __init__( target_rate=10, tau_err=1000, # fits with 1 ms timestep # Threshold parameters - B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8 + B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, + + # Learning signal and weight update constants + l=0 ): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelEProp( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi, - target_rate, tau_err) + target_rate, tau_err, l) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py index 7dace015237..23194ea0a56 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py @@ -35,6 +35,7 @@ PSI = "psi" Z = "z" A = "a" +L = "learning_signal" UNITS = { V: 'mV', @@ -63,7 +64,8 @@ class NeuronModelEProp(AbstractNeuronModel): "__a", "__psi", "__target_rate", - "__tau_err" + "__tau_err", + "__l" ] def __init__( @@ -78,7 +80,8 @@ def __init__( psi, # regularisation params target_rate, - tau_err + tau_err, + l ): datatype_list = [ @@ -90,9 +93,10 @@ def __init__( DataType.INT32, # count_refrac DataType.S1615, # v_reset DataType.INT32, # tau_refrac - DataType.S1615, # Z - DataType.S1615, # A - DataType.S1615 # psi, pseuo_derivative + DataType.S1615, # Z + DataType.S1615, # A + DataType.S1615, # psi, pseuo_derivative + DataType.S1615 # L ] # Synapse states - always initialise to zero @@ -128,6 +132,7 @@ def __init__( self.__target_rate = target_rate self.__tau_err = tau_err + self.__l = l @overrides(AbstractNeuronModel.get_n_cpu_cycles) @@ -151,6 +156,7 @@ def add_state_variables(self, state_variables): state_variables[PSI] = self.__psi state_variables[Z] = 0 state_variables[A] = 0 + state_variables[L] = 0 @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -177,7 +183,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), state_variables[Z], state_variables[A], - state_variables[PSI] + state_variables[PSI], + state_variables[L] ] # create synaptic state - init all state to zero @@ -212,12 +219,13 @@ def update_values(self, values, parameters, state_variables): # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac, psi) = values + _v_reset, _tau_refrac, psi, l) = values # Not sure this will work with the new array of synapse!!! # Copy the changed data only state_variables[V] = v state_variables[COUNT_REFRAC] = count_refrac state_vairables[PSI] = psi + state_variables[L] = l From 54f715594aad4c2d36e2987ac27c36b07adf6201 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Mon, 21 Oct 2019 14:27:00 +0100 Subject: [PATCH 015/123] add note to replace saturation check --- neural_modelling/src/neuron/synapses.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 7372ecaba94..2b46f670bac 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -188,7 +188,7 @@ static inline void process_fixed_synapses( synapse_type_index_bits); // Add weight to current ring buffer value - int32_t accumulation = ring_buffers[ring_buffer_index] + weight; + int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // switch to saturated arithmetic to avoid complicated saturation check, will it check saturation at both ends? // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) // **NOTE** 0x10000 can be expressed as an ARM literal, From 04a67367e96ac0724c896219ba59771c8fccaf7a Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Fri, 15 Nov 2019 11:33:49 +0000 Subject: [PATCH 016/123] Put threshold in neuron model for easier access during synaptic state updates --- .../neuron_impl_eprop_adaptive.h | 14 +- .../models/neuron_model_eprop_adaptive_impl.h | 40 ++++ .../models/neuron/builds/eprop_adaptive.py | 42 ++-- .../models/neuron/neuron_models/__init__.py | 4 +- ...prop.py => neuron_model_eprop_adaptive.py} | 179 +++++++++++++++--- .../models/neuron/threshold_types/__init__.py | 4 +- .../threshold_types/threshold_type_none.py | 66 +++++++ 7 files changed, 289 insertions(+), 60 deletions(-) rename spynnaker/pyNN/models/neuron/neuron_models/{neuron_model_eprop.py => neuron_model_eprop_adaptive.py} (68%) create mode 100644 spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 5c3ade3c69d..03e769b194d 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -22,7 +22,7 @@ // Includes for model parts used in this implementation #include -#include +#include #include #include #include @@ -230,8 +230,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, input_type_pointer_t input_type = &input_type_array[neuron_index]; // Get threshold and additional input parameters for this neuron - threshold_type_pointer_t threshold_type = - &threshold_type_array[neuron_index]; +// threshold_type_pointer_t threshold_type = +// &threshold_type_array[neuron_index]; additional_input_pointer_t additional_input = &additional_input_array[neuron_index]; synapse_param_pointer_t synapse_type = @@ -239,7 +239,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Get the voltage state_t voltage = neuron_model_get_membrane_voltage(neuron); - state_t B_t = threshold_type->B; + state_t B_t = neuron->B; // cache last timestep threshold level state_t z_t = neuron->z; // recorded_variable_values[V_RECORDING_INDEX] = voltage; @@ -280,7 +280,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, additional_input, voltage); // determine if a spike should occur - threshold_type_update_threshold(neuron->z, threshold_type); + threshold_type_update_threshold(neuron->z, neuron); // update neuron parameters state_t result = neuron_model_state_update( @@ -292,7 +292,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Also update Z (including using refractory period information) - state_t nu = (voltage - threshold_type->B)/threshold_type->B; + state_t nu = (voltage - neuron->B)/neuron->B; if (nu > ZERO){ neuron->z = 1.0k * neuron->A; // implements refractory period @@ -315,7 +315,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - B_t; // threshold_type->B; + B_t; // neuron->B; // global_parameters->core_target_rate; // total_exc; // total synaptic input from input layer // ********************************************************* diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index e9d306ab883..75d95d9f38c 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -19,6 +19,7 @@ #define _NEURON_MODEL_LIF_CURR_IMPL_H_ #include "neuron_model.h" +#include #define SYNAPSES_PER_NEURON 250 @@ -69,6 +70,15 @@ typedef struct neuron_t { // pseudo derivative REAL psi; + // Threshold paramters + REAL B; // Capital B(t) + REAL b; // b(t) + REAL b_0; // small b^0 + decay_t e_to_dt_on_tau_a; // rho + REAL beta; + decay_t adpt; // (1-rho) + REAL scalar; + REAL L; // learning signal // array of synaptic states - peak fan-in of 250 for this case @@ -82,4 +92,34 @@ typedef struct global_neuron_params_t { REAL rate_exp_TC; } global_neuron_params_t; + +static inline void threshold_type_update_threshold(state_t z, + neuron_pointer_t threshold_type){ + +// _print_threshold_params(threshold_type); + + + s1615 temp1 = decay_s1615(threshold_type->b, threshold_type->e_to_dt_on_tau_a); + s1615 temp2 = decay_s1615(threshold_type->scalar, threshold_type->adpt) * z; + + threshold_type->b = temp1 + + temp2; + + +// // Evolve threshold dynamics (decay to baseline) and adapt if z=nonzero +// // Update small b (same regardless of spike - uses z from previous timestep) +// threshold_type->b = +// decay_s1615(threshold_type->b, threshold_type->e_to_dt_on_tau_a) +// + decay_s1615(1000k, threshold_type->adpt) // fold scaling into decay to increase precision +// * z; // stored on neuron +// + // Update large B + threshold_type->B = threshold_type->b_0 + + threshold_type->beta*threshold_type->b; + +} + + + + #endif // _NEURON_MODEL_LIF_CURR_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 764082b8847..ab53996462b 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -16,20 +16,20 @@ from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard from spynnaker.pyNN.models.defaults import default_initial_values from spynnaker.pyNN.models.neuron.neuron_models import ( - NeuronModelEProp) + NeuronModelEPropAdaptive) from spynnaker.pyNN.models.neuron.synapse_types import ( SynapseTypeEPropAdaptive) from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent -from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeAdaptive +from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeNone class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ Adaptive threshold neuron with eprop support """ - @default_initial_values({"v", - "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", + @default_initial_values({"v", + "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", "psi", "target_rate", "tau_err", - "B", "small_b", + "B", "small_b", "l" }) def __init__( @@ -41,33 +41,35 @@ def __init__( #synapse type params tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - + # Regularisation params target_rate=10, tau_err=1000, # fits with 1 ms timestep - + # Threshold parameters B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, - + # Learning signal and weight update constants l=0 - + ): # pylint: disable=too-many-arguments, too-many-locals - neuron_model = NeuronModelEProp( - v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi, + neuron_model = NeuronModelEPropAdaptive( + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi, + # threshold params + B, + small_b, + small_b_0, + tau_a, + beta, target_rate, tau_err, l) - + synapse_type = SynapseTypeEPropAdaptive( - tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) - + input_type = InputTypeCurrent() - - threshold_type = ThresholdTypeAdaptive(B, - small_b, - small_b_0, - tau_a, - beta) + + threshold_type = ThresholdTypeNone() super(EPropAdaptive, self).__init__( model_name="eprop_adaptive", binary="eprop_adaptive.aplx", diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 6da3028db7d..75a64a1132f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -17,9 +17,9 @@ from .neuron_model_izh import NeuronModelIzh from .neuron_model_leaky_integrate_and_fire import ( NeuronModelLeakyIntegrateAndFire) -from .neuron_model_eprop import NeuronModelEProp +from .neuron_model_eprop_adaptive import NeuronModelEPropAdaptive from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout __all__ = ["AbstractNeuronModel", "NeuronModelIzh", - "NeuronModelLeakyIntegrateAndFire", "NeuronModelEProp", + "NeuronModelLeakyIntegrateAndFire", "NeuronModelEPropAdaptive", "NeuronModelStoreRecallReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py similarity index 68% rename from spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py rename to spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 23194ea0a56..5d67e73e09d 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -35,6 +35,15 @@ PSI = "psi" Z = "z" A = "a" +# Threshold +BIG_B = "big_b" +SMALL_B = "small_b" +SMALL_B_0 = "small_b_0" +TAU_A = "tau_a" +BETA = "beta" +ADPT = "adpt" +SCALAR = "scalar" +# Learning signal L = "learning_signal" UNITS = { @@ -47,11 +56,18 @@ TAU_REFRAC: 'ms', Z: 'N/A', A: 'N/A', - PSI: 'N/A' + PSI: 'N/A', + BIG_B: "mV", + SMALL_B: "mV", + SMALL_B_0: "mV", + TAU_A: "ms", + BETA: "N/A", +# ADPT: "mV" + SCALAR: "dimensionless" } -class NeuronModelEProp(AbstractNeuronModel): +class NeuronModelEPropAdaptive(AbstractNeuronModel): __slots__ = [ "__v_init", "__v_rest", @@ -63,28 +79,45 @@ class NeuronModelEProp(AbstractNeuronModel): "__z", "__a", "__psi", + # threshold params + "__B", + "__small_b", + "__small_b_0", + "__tau_a", + "__beta", + # "_adpt" + "__scalar", + # reg params "__target_rate", "__tau_err", + # learning signal "__l" ] def __init__( - self, - v_init, - v_rest, - tau_m, - cm, - i_offset, - v_reset, + self, + v_init, + v_rest, + tau_m, + cm, + i_offset, + v_reset, tau_refrac, psi, + # threshold params + B, + small_b, + small_b_0, + tau_a, + beta, # regularisation params target_rate, tau_err, l ): - + datatype_list = [ + # neuron params DataType.S1615, # v DataType.S1615, # v_rest DataType.S1615, # r_membrane (= tau_m / cm) @@ -96,9 +129,18 @@ def __init__( DataType.S1615, # Z DataType.S1615, # A DataType.S1615, # psi, pseuo_derivative + # threshold params + DataType.S1615, + DataType.S1615, + DataType.S1615, + DataType.UINT32, + DataType.S1615, + DataType.UINT32, + DataType.S1615, + # Learning signal DataType.S1615 # L - ] - + ] + # Synapse states - always initialise to zero eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) DataType.INT16, # delta_w @@ -108,15 +150,14 @@ def __init__( ] # Extend to include fan-in for each neuron datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) - - + global_data_types = [ - DataType.S1615, # core_pop_rate + DataType.S1615, # core_pop_rate DataType.S1615, # core_target_rate DataType.S1615 # rate_exp_TC ] - - super(NeuronModelEProp, self).__init__(data_types=datatype_list, + + super(NeuronModelEPropAdaptive, self).__init__(data_types=datatype_list, global_data_types=global_data_types) if v_init is None: @@ -129,11 +170,22 @@ def __init__( self.__v_reset = v_reset self.__tau_refrac = tau_refrac self.__psi = psi # calculate from v and v_thresh (but will probably end up zero) - + + # threshold params + self.__B = B + self.__small_b = small_b + self.__small_b_0 = small_b_0 + self.__tau_a = tau_a + self.__beta = beta + self.__scalar = 1000 + + # Regularisation params self.__target_rate = target_rate self.__tau_err = tau_err + + # learning signal self.__l = l - + @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -149,6 +201,11 @@ def add_parameters(self, parameters): parameters[V_RESET] = self.__v_reset parameters[TAU_REFRAC] = self.__tau_refrac + parameters[SMALL_B_0] = self.__small_b_0 + parameters[TAU_A] = self.__tau_a + parameters[BETA] = self.__beta + parameters[SCALAR] = self.__scalar + @overrides(AbstractNeuronModel.add_state_variables) def add_state_variables(self, state_variables): state_variables[V] = self.__v_init @@ -158,6 +215,9 @@ def add_state_variables(self, state_variables): state_variables[A] = 0 state_variables[L] = 0 + state_variables[BIG_B] = self.__B + state_variables[SMALL_B] = self.__small_b + @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): return UNITS[variable] @@ -170,13 +230,15 @@ def has_variable(self, variable): @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) def get_values(self, parameters, state_variables, vertex_slice, ts): + ulfract = pow(2, 32) + # Add the rest of the data - values = [state_variables[V], + values = [state_variables[V], parameters[V_REST], parameters[TAU_M] / parameters[CM], parameters[TAU_M].apply_operation( operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - parameters[I_OFFSET], + parameters[I_OFFSET], state_variables[COUNT_REFRAC], parameters[V_RESET], parameters[TAU_REFRAC].apply_operation( @@ -184,9 +246,22 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): state_variables[Z], state_variables[A], state_variables[PSI], + + state_variables[BIG_B], + state_variables[SMALL_B], + parameters[SMALL_B_0], + parameters[TAU_A].apply_operation( + operation=lambda + x: numpy.exp(float(-ts) / (1000.0 * x)) * ulfract), + parameters[BETA], + parameters[TAU_A].apply_operation( + operation=lambda x: (1 - numpy.exp( + float(-ts) / (1000.0 * x))) * ulfract), # ADPT + parameters[SCALAR], + state_variables[L] ] - + # create synaptic state - init all state to zero eprop_syn_init = [0, 0, @@ -194,11 +269,11 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): 0] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) - + return values @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_global_values, + @overrides(AbstractNeuronModel.get_global_values, additional_arguments={'ts'}) def get_global_values(self, ts): glob_vals = [ @@ -206,30 +281,34 @@ def get_global_values(self, ts): self.__target_rate, # set target rate numpy.exp(-float(ts/1000)/self.__tau_err) ] - + print("\n ") print(glob_vals) print(ts) print("\n") return glob_vals - + @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac, psi, l) = values # Not sure this will work with the new array of synapse!!! + _v_reset, _tau_refrac, psi, + big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, + l) = values # Not sure this will work with the new array of synapse!!! # Copy the changed data only state_variables[V] = v state_variables[COUNT_REFRAC] = count_refrac state_vairables[PSI] = psi + + state_variables[BIG_B] = big_b + state_variables[SMALL_B] = small_b + state_variables[L] = l - - - + @property def v_init(self): return self.__v_init @@ -285,3 +364,43 @@ def tau_refrac(self): @tau_refrac.setter def tau_refrac(self, tau_refrac): self.__tau_refrac = tau_refrac + + @property + def B(self): + return self.__B + + @B.setter + def B(self, new_value): + self.__B = new_value + + @property + def small_b(self): + return self.__small_b + + @small_b.setter + def small_b(self, new_value): + self.__small_b = new_value + + @property + def small_b_0(self): + return self.__small_b_0 + + @small_b_0.setter + def small_b_0(self, new_value): + self.__small_b_0 = new_value + + @property + def tau_a(self): + return self.__tau_a + + @tau_a.setter + def tau_a(self, new_value): + self.__tau_a = new_value + + @property + def beta(self): + return self.__beta + + @beta.setter + def beta(self, new_value): + self.__beta = new_value diff --git a/spynnaker/pyNN/models/neuron/threshold_types/__init__.py b/spynnaker/pyNN/models/neuron/threshold_types/__init__.py index 7ce908de545..f7a9dc3c99e 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/__init__.py @@ -17,6 +17,8 @@ from .threshold_type_static import ThresholdTypeStatic from .threshold_type_maass_stochastic import ThresholdTypeMaassStochastic from .threshold_type_adaptive import ThresholdTypeAdaptive +from .threshold_type_none import ThresholdTypeNone __all__ = ["AbstractThresholdType", "ThresholdTypeStatic", - "ThresholdTypeMaassStochastic", "ThresholdTypeAdaptive"] + "ThresholdTypeMaassStochastic", "ThresholdTypeAdaptive", + "ThresholdTypeNone"] diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py new file mode 100644 index 00000000000..db087d9a18e --- /dev/null +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py @@ -0,0 +1,66 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from .abstract_threshold_type import AbstractThresholdType + + + +UNITS = {} + + +class ThresholdTypeNone(AbstractThresholdType): + """ A threshold that is empty of parameters and unused + """ + __slots__ = [] + + def __init__(self): + super(ThresholdTypeNone, self).__init__( + [] # no params + ) + + + @overrides(AbstractThresholdType.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # Just a comparison, but 2 just in case! + return 2 * n_neurons + + @overrides(AbstractThresholdType.add_parameters) + def add_parameters(self, parameters): + pass + + @overrides(AbstractThresholdType.add_state_variables) + def add_state_variables(self, state_variables): + pass + + @overrides(AbstractThresholdType.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractThresholdType.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @overrides(AbstractThresholdType.get_values) + def get_values(self, parameters, state_variables, vertex_slice): + # Add the rest of the data + return [] + + @overrides(AbstractThresholdType.update_values) + def update_values(self, values, parameters, state_variables): + pass + + From 36adad170241c01127f3f57e515073c796dbaa1f Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Fri, 15 Nov 2019 12:45:11 +0000 Subject: [PATCH 017/123] check test scripts still run correctly --- .../implementations/neuron_impl_eprop_adaptive.h | 13 ++++++------- .../models/neuron_model_eprop_adaptive_impl.h | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 03e769b194d..f5f9ade0be4 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -307,17 +307,16 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record V (just as cheap to set then to gate later) recorded_variable_values[V_RECORDING_INDEX] = voltage; // result; - // Record Z - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = z_t; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// global_parameters->core_pop_rate; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +// z_t; + global_parameters->core_pop_rate; // Record B recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - B_t; // neuron->B; -// global_parameters->core_target_rate; -// total_exc; // total synaptic input from input layer +// B_t; // neuron->B; + global_parameters->core_target_rate; +// total_inh; // total synaptic input from input layer // ********************************************************* diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 75d95d9f38c..8080c18c521 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -104,7 +104,7 @@ static inline void threshold_type_update_threshold(state_t z, threshold_type->b = temp1 + temp2; - + // io_printf(IO_BUF, "temp1: %k; temp2: %k\n", temp1, temp2); // // Evolve threshold dynamics (decay to baseline) and adapt if z=nonzero // // Update small b (same regardless of spike - uses z from previous timestep) From f7cc1a35686b9113fe536c168678e4fc522227a6 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Tue, 26 Nov 2019 14:15:07 +0000 Subject: [PATCH 018/123] Remove delays from spike processing, and use field to index synapse array --- .../makefiles/neuron/eprop_adaptive/Makefile | 1 - .../neuron_impl_eprop_adaptive.h | 24 ++++++-- .../models/neuron_model_eprop_adaptive_impl.c | 59 +++++++++++-------- .../models/neuron_model_eprop_adaptive_impl.h | 9 +-- .../src/neuron/spike_processing.c | 12 +++- neural_modelling/src/neuron/synapse_row.h | 2 +- neural_modelling/src/neuron/synapses.c | 20 +++++-- neural_modelling/src/neuron/synapses.h | 4 +- .../connectors/abstract_connector.py | 2 +- .../neuron_model_eprop_adaptive.py | 10 ++-- .../synapse_dynamics_static.py | 4 +- .../neuron/synapse_io/synapse_io_row_based.py | 2 +- spynnaker/pyNN/utilities/constants.py | 8 +-- 13 files changed, 103 insertions(+), 54 deletions(-) diff --git a/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile b/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile index d14f67e4fca..65ee2006ca7 100644 --- a/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile +++ b/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile @@ -17,7 +17,6 @@ APP = $(notdir $(CURDIR)) OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_eprop_adaptive_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_eprop_adaptive.h -//SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c include ../neural_build.mk \ No newline at end of file diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index f5f9ade0be4..57103f4efd8 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -282,6 +282,15 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // determine if a spike should occur threshold_type_update_threshold(neuron->z, neuron); + + // Record B + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// B_t; // neuron->B; +// global_parameters->core_target_rate; +// neuron->syn_state[0].e_bar; +// neuron->syn_state[0].el_a; + total_inh; + // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, @@ -309,13 +318,18 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = + total_exc; // z_t; - global_parameters->core_pop_rate; +// global_parameters->core_pop_rate; +// neuron->psi; +// neuron->syn_state[0].z_bar; - // Record B - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// B_t; // neuron->B; - global_parameters->core_target_rate; +// // Record B +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +//// B_t; // neuron->B; +//// global_parameters->core_target_rate; +//// neuron->syn_state[0].e_bar; +// neuron->syn_state[0].el_a; // total_inh; // total synaptic input from input layer // ********************************************************* diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 2a3e1f42c5c..0ff822a1151 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -76,41 +76,52 @@ state_t neuron_model_state_update( // ****************************************************************** // Update Psi (pseudo-derivative) (done once for each postsynaptic neuron) // ****************************************************************** - // REAL temp1 = (neuron->V_membrane - v_threshold_baseline) * (1/v_thresh) - // REAL temp2 = ((1/v_th) * 0.3 * 1-(abs(temp1)) - // neuron->psi = (temp2 > 0)? temp2 , 0; + REAL psi_temp1 = (neuron->V_membrane - neuron->B) * (1/neuron->b_0); + REAL psi_temp2 = ((absk(psi_temp1))); + neuron->psi = ((1.0k - psi_temp2) > 0.0k)? + (1.0k/neuron->b_0) * 0.3k * 1.0k * (1.0k - psi_temp2) : 0.0k; - // All operations now need doing once per eprop synapse -// for (int syn=0; syn < total_synapses_per_neuron; syn++){ - // ****************************************************************** - // Update eligibility vector - // ****************************************************************** -// neuron->syn_state[syn_ind].ep_a; = neuron->psi * neuron->syn_state[syn_ind].z_bar + -// (global_params->rho - neuron->psi * global_params->beta) * -// neuron->syn_state[syn_ind].ep_a; + uint32_t total_synapses_per_neuron = 1; + REAL rho = 0.998; + // All operations now need doing once per eprop synapse + for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ - // ****************************************************************** - // Update eligibility trace - // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// global_params->beta * neuron->syn_state[syn_ind].ep_a); -// neuron->syn_state[syn_ind].e_bar = "low pass filtered temp_elig_trace" - + // ****************************************************************** + // Low-pass filter incoming spike train + // ****************************************************************** +// neuron->syn_state[syn_ind].z_bar_old = neuron->syn_state[syn_ind].z_bar; +// neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + neuron->syn_state[syn_ind].z_bar_old; // ToDo - // ****************************************************************** - // Update total weight change - // ****************************************************************** -// uint16_t this_dt_weight_change = -global_params->eta * neuron->learning_sig * neuron->syn_state[syn_ind].e_bar; -// neuron->syn_state[syn_ind].delta_w +=this_dt_weight_change; -// } + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** + // updating z_bar is problematic, if spike could come and interrupt neuron update + // (you won't know whether spike arrived before or after update) + // (also need to reset if it was 1 - otherwise it will never be cleared) + neuron->syn_state[syn_ind].el_a = + (neuron->psi * neuron->syn_state[syn_ind].z_bar) + + (rho - neuron->psi * neuron->beta) * + neuron->syn_state[syn_ind].el_a; + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** +// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +// neuron->beta * neuron->syn_state[syn_ind].el_a); +// neuron->syn_state[syn_ind].e_bar = temp_elig_trace; + // ****************************************************************** + // Update cached total weight change + // ****************************************************************** + // uint16_t this_dt_weight_change = -global_params->eta * neuron->learning_sig * neuron->syn_state[syn_ind].e_bar; + // neuron->syn_state[syn_ind].delta_w +=this_dt_weight_change; + } return neuron->V_membrane; } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 8080c18c521..98749a903ef 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -25,10 +25,11 @@ typedef struct eprop_syn_state_t { - uint16_t delta_w; // weight change to apply - uint16_t z_bar; // low-pass filtered spike train - uint32_t ep_a; // adaptive component of eligibility vector - uint32_t e_bar; // low-pass filtered eligibility trace + uint32_t delta_w; // weight change to apply + REAL z_bar_old; + REAL z_bar; // low-pass filtered spike train + REAL el_a; // adaptive component of eligibility vector + REAL e_bar; // low-pass filtered eligibility trace }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 08e29346c13..2f47bc1fb8a 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -167,6 +167,7 @@ static inline void setup_synaptic_dma_write(uint32_t dma_buffer_index) { static void multicast_packet_received_callback(uint key, uint payload) { use(payload); any_spike = true; + io_printf(IO_BUF, "mc packet received \n"); log_debug("Received spike %x at %d, DMA Busy = %d", key, time, dma_busy); // If there was space to add spike to incoming spike queue @@ -182,7 +183,7 @@ static void multicast_packet_received_callback(uint key, uint payload) { } } } else { - log_debug("Could not add spike"); + io_printf(IO_BUF, "Could not add spike\n"); } } @@ -190,6 +191,9 @@ static void multicast_packet_received_callback(uint key, uint payload) { static void user_event_callback(uint unused0, uint unused1) { use(unused0); use(unused1); + + io_printf(IO_BUF, "user callback triggered \n"); + setup_synaptic_dma_read(); } @@ -199,6 +203,9 @@ static void dma_complete_callback(uint unused, uint tag) { log_debug("DMA transfer complete at time %u with tag %u", time, tag); + io_printf(IO_BUF, "Entering DMA Complete...\n"); + log_info("Entering DMA Complete...\n"); + // Get pointer to current buffer uint32_t current_buffer_index = buffer_being_read; dma_buffer *current_buffer = &dma_buffers[current_buffer_index]; @@ -240,6 +247,9 @@ static void dma_complete_callback(uint unused, uint tag) { bool spike_processing_initialise( // EXPORTED size_t row_max_n_words, uint mc_packet_callback_priority, uint user_event_priority, uint incoming_spike_buffer_size) { + + io_printf(IO_BUF, "Initialising spike_processing.c....\n"); + // Allocate the DMA buffers for (uint32_t i = 0; i < N_DMA_BUFFERS; i++) { dma_buffers[i].row = spin1_malloc(row_max_n_words * sizeof(uint32_t)); diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index afa9e754db2..f9298a38ae3 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -51,7 +51,7 @@ //! how many bits the synapse delay will take #ifndef SYNAPSE_DELAY_BITS -#define SYNAPSE_DELAY_BITS 4 +#define SYNAPSE_DELAY_BITS 8 #endif // Create some masks based on the number of bits diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 2b46f670bac..37e8b98f16b 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -174,12 +174,24 @@ static inline void process_fixed_synapses( uint32_t synaptic_word = *synaptic_words++; // Extract components from this word - uint32_t delay = - synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); + uint32_t delay = 1; + uint32_t syn_ind_from_delay = + synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); + uint32_t combined_synapse_neuron_index = synapse_row_sparse_type_index( synaptic_word, synapse_type_index_mask); int32_t weight = synapse_row_sparse_weight(synaptic_word); + int32_t neuron_ind = synapse_row_sparse_index(synaptic_word, synapse_type_mask); + + // Use postsynaptic neuron index to access neuron struct, + // and delay field to access correct synapse + // neuron_pointer_t neuron = neuron_array[neuron_ind]->syn_state[syn_ind_from_delay].z_bar; + + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u \n", neuron_ind, syn_ind_from_delay); + + + io_printf(IO_BUF, "signed w: %d \n", weight); // Convert into ring buffer offset @@ -275,7 +287,7 @@ bool synapses_initialise( uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); uint32_t n_ring_buffer_bits = - log_n_neurons + log_n_synapse_types + SYNAPSE_DELAY_BITS; + log_n_neurons + log_n_synapse_types + 1; // SYNAPSE_DELAY_BITS; Fix at delays of 1 timestep, as this means we get memory back, and we don't need delays to prove the concept uint32_t ring_buffer_size = 1 << (n_ring_buffer_bits); ring_buffers = spin1_malloc(ring_buffer_size * sizeof(weight_t)); @@ -339,7 +351,7 @@ bool synapses_process_synaptic_row( // Get address of non-plastic region from row address_t fixed_region_address = synapse_row_fixed_region(row); - + io_printf(IO_BUF, "Processing Spike...\n"); // **TODO** multiple optimised synaptic row formats //if (plastic_tag(row) == 0) { // If this row has a plastic region diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h index 09d089740b1..b9f1b6522ca 100644 --- a/neural_modelling/src/neuron/synapses.h +++ b/neural_modelling/src/neuron/synapses.h @@ -29,7 +29,7 @@ static inline index_t synapses_get_ring_buffer_index( uint32_t simuation_timestep, uint32_t synapse_type_index, uint32_t neuron_index, uint32_t synapse_type_index_bits, uint32_t synapse_index_bits) { - return ((simuation_timestep & SYNAPSE_DELAY_MASK) << synapse_type_index_bits) + return ((simuation_timestep & 1) << synapse_type_index_bits) //SYNAPSE_DELAY_MASK) << synapse_type_index_bits) | (synapse_type_index << synapse_index_bits) | neuron_index; } @@ -40,7 +40,7 @@ static inline index_t synapses_get_ring_buffer_index_combined( uint32_t simulation_timestep, uint32_t combined_synapse_neuron_index, uint32_t synapse_type_index_bits) { - return ((simulation_timestep & SYNAPSE_DELAY_MASK) << synapse_type_index_bits) + return ((simulation_timestep & 1) << synapse_type_index_bits) //) << synapse_type_index_bits) | combined_synapse_neuron_index; } diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index f31079c6fce..5643b3b1b81 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -88,7 +88,7 @@ def set_projection_information( self._n_pre_neurons = pre_population.size self._n_post_neurons = post_population.size self._rng = (self._rng or rng or get_simulator().get_pynn_NumpyRNG()()) - self.__min_delay = machine_time_step / 1000.0 + self.__min_delay = 0 # machine_time_step / 1000.0 def _check_parameter(self, values, name, allow_lists): """ Check that the types of the values is supported. diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 5d67e73e09d..43f77fb8421 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -143,10 +143,11 @@ def __init__( # Synapse states - always initialise to zero eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) - DataType.INT16, # delta_w - DataType.INT16, # z_bar - DataType.INT32, # ep_a - DataType.INT32, # e_bar + DataType.UINT32, # delta_w + DataType.S1615, # z_bar_old + DataType.S1615, # z_bar + DataType.S1615, # ep_a + DataType.S1615, # e_bar ] # Extend to include fan-in for each neuron datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -266,6 +267,7 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): eprop_syn_init = [0, 0, 0, + 1, 0] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index 74dc5c83480..0a3035ab68a 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -79,7 +79,7 @@ def get_static_synaptic_data( fixed_fixed = ( ((numpy.rint(connections["weight"]).astype("uint16") & 0xFFFF).astype("uint32") << 16) | - ((connections["delay"].astype("uint32") & 0xF) << + ((connections["delay"].astype("uint32") & 0xFF) << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype( "uint32") << n_neuron_id_bits) | @@ -136,7 +136,7 @@ def read_static_synaptic_data( (data & neuron_id_mask) + post_vertex_slice.lo_atom) connections["weight"] = ((data >> 16) & 0xFFFF).astype("int16") connections["delay"] = (data >> (n_neuron_id_bits + - n_synapse_type_bits)) & 0xF + n_synapse_type_bits)) & 0xFF connections["delay"][connections["delay"] == 0] = 16 return connections diff --git a/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py b/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py index 40824c6aa55..c8afdeb2610 100644 --- a/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py +++ b/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py @@ -42,7 +42,7 @@ class SynapseIORowBased(AbstractSynapseIO): @overrides(AbstractSynapseIO.get_maximum_delay_supported_in_ms) def get_maximum_delay_supported_in_ms(self, machine_time_step): # There are 16 slots, one per time step - return 16 * (machine_time_step / 1000.0) + return 255 # * (machine_time_step / 1000.0) def _n_words(self, n_bytes): return math.ceil(float(n_bytes) / 4.0) diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 167e39b66dd..be4c90ce926 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -52,12 +52,12 @@ SCALE = WEIGHT_FLOAT_TO_FIXED_SCALE * NA_TO_PA_SCALE # natively supported delays for all abstract_models -MAX_SUPPORTED_DELAY_TICS = 16 -MAX_DELAY_BLOCKS = 8 -MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 +MAX_SUPPORTED_DELAY_TICS = 256 +MAX_DELAY_BLOCKS = 0 +MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 256 # the minimum supported delay slot between two neurons -MIN_SUPPORTED_DELAY = 1 +MIN_SUPPORTED_DELAY = 0 # Regions for populations POPULATION_BASED_REGIONS = Enum( From b61026a01971214bbba1191a9065b087bfe341b7 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Tue, 26 Nov 2019 15:54:17 +0000 Subject: [PATCH 019/123] Skeleton sinusoid readout neuron --- .../neuron/sinusoid_readout/Makefile | 13 + .../neuron/store_recall_readout/Makefile | 3 - .../neuron_impl_sinusoid_readout.h | 546 ++++++++++++++++++ .../neuron_model_sinusoid_readout_impl.c | 95 +++ .../neuron_model_sinusoid_readout_impl.h | 60 ++ 5 files changed, 714 insertions(+), 3 deletions(-) create mode 100644 neural_modelling/makefiles/neuron/sinusoid_readout/Makefile create mode 100644 neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h create mode 100644 neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c create mode 100644 neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile new file mode 100644 index 00000000000..dcc0e30326e --- /dev/null +++ b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile @@ -0,0 +1,13 @@ +APP = $(notdir $(CURDIR)) + +OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinsuoid_readout.h +SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c + +#TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.c +#TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.h +#WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.c +#WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.h + + +include ../neural_build.mk \ No newline at end of file diff --git a/neural_modelling/makefiles/neuron/store_recall_readout/Makefile b/neural_modelling/makefiles/neuron/store_recall_readout/Makefile index bf46778f57e..9863615e594 100644 --- a/neural_modelling/makefiles/neuron/store_recall_readout/Makefile +++ b/neural_modelling/makefiles/neuron/store_recall_readout/Makefile @@ -1,10 +1,7 @@ APP = $(notdir $(CURDIR)) OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.c -#NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.c -#NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_store_recall_readout.h -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c #TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.c diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h new file mode 100644 index 00000000000..dea26b5fc53 --- /dev/null +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -0,0 +1,546 @@ +#ifndef _NEURON_IMPL_SINUSOID_READOUT_H_ +#define _NEURON_IMPL_SINUSOID_READOUT_H_ + +#include "neuron_impl.h" + +// Includes for model parts used in this implementation +#include +#include +#include +#include +#include + +// Further includes +#include +#include +#include +#include + +#define V_RECORDING_INDEX 0 +#define GSYN_EXCITATORY_RECORDING_INDEX 1 +#define GSYN_INHIBITORY_RECORDING_INDEX 2 + +#ifndef NUM_EXCITATORY_RECEPTORS +#define NUM_EXCITATORY_RECEPTORS 1 +#error NUM_EXCITATORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +#ifndef NUM_INHIBITORY_RECEPTORS +#define NUM_INHIBITORY_RECEPTORS 1 +#error NUM_INHIBITORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +//! Array of neuron states +static neuron_pointer_t neuron_array; + +//! Input states array +static input_type_pointer_t input_type_array; + +//! Additional input array +static additional_input_pointer_t additional_input_array; + +//! Threshold states array +static threshold_type_pointer_t threshold_type_array; + +//! Global parameters for the neurons +static global_neuron_params_pointer_t global_parameters; + +// The synapse shaping parameters +static synapse_param_t *neuron_synapse_shaping_params; + +static REAL next_spike_time = 0; +static uint32_t timer = 0; +static uint32_t target_ind = 0; + +static bool neuron_impl_initialise(uint32_t n_neurons) { + + // allocate DTCM for the global parameter details + if (sizeof(global_neuron_params_t) > 0) { + global_parameters = (global_neuron_params_t *) spin1_malloc( + sizeof(global_neuron_params_t)); + if (global_parameters == NULL) { + log_error("Unable to allocate global neuron parameters" + "- Out of DTCM"); + return false; + } + } + + // Allocate DTCM for neuron array + if (sizeof(neuron_t) != 0) { + neuron_array = (neuron_t *) spin1_malloc(n_neurons * sizeof(neuron_t)); + if (neuron_array == NULL) { + log_error("Unable to allocate neuron array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for input type array and copy block of data + if (sizeof(input_type_t) != 0) { + input_type_array = (input_type_t *) spin1_malloc( + n_neurons * sizeof(input_type_t)); + if (input_type_array == NULL) { + log_error("Unable to allocate input type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for additional input array and copy block of data + if (sizeof(additional_input_t) != 0) { + additional_input_array = (additional_input_pointer_t) spin1_malloc( + n_neurons * sizeof(additional_input_t)); + if (additional_input_array == NULL) { + log_error("Unable to allocate additional input array" + " - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for threshold type array and copy block of data + if (sizeof(threshold_type_t) != 0) { + threshold_type_array = (threshold_type_t *) spin1_malloc( + n_neurons * sizeof(threshold_type_t)); + if (threshold_type_array == NULL) { + log_error("Unable to allocate threshold type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for synapse shaping parameters + if (sizeof(synapse_param_t) != 0) { + neuron_synapse_shaping_params = (synapse_param_t *) spin1_malloc( + n_neurons * sizeof(synapse_param_t)); + if (neuron_synapse_shaping_params == NULL) { + log_error("Unable to allocate synapse parameters array" + " - Out of DTCM"); + return false; + } + } + + // Initialise pointers to Neuron parameters in STDP code + synapse_dynamics_set_neuron_array(neuron_array); + log_info("set pointer to neuron array in stdp code"); + + return true; +} + +static void neuron_impl_add_inputs( + index_t synapse_type_index, index_t neuron_index, + input_t weights_this_timestep) { + // simple wrapper to synapse type input function + synapse_param_pointer_t parameters = + &(neuron_synapse_shaping_params[neuron_index]); + synapse_types_add_neuron_input(synapse_type_index, + parameters, weights_this_timestep); +} + +static void neuron_impl_load_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("reading parameters, next is %u, n_neurons is %u ", + next, n_neurons); + + //log_debug("writing neuron global parameters"); + spin1_memcpy(global_parameters, &address[next], + sizeof(global_neuron_params_t)); + next += (sizeof(global_neuron_params_t) + 3) / 4; + + log_debug("reading neuron local parameters"); + spin1_memcpy(neuron_array, &address[next], n_neurons * sizeof(neuron_t)); + next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + + log_debug("reading input type parameters"); + spin1_memcpy(input_type_array, &address[next], + n_neurons * sizeof(input_type_t)); + next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + + log_debug("reading threshold type parameters"); + spin1_memcpy(threshold_type_array, &address[next], + n_neurons * sizeof(threshold_type_t)); + next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + + log_debug("reading synapse parameters"); + spin1_memcpy(neuron_synapse_shaping_params, &address[next], + n_neurons * sizeof(synapse_param_t)); + next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + + log_debug("reading additional input type parameters"); + spin1_memcpy(additional_input_array, &address[next], + n_neurons * sizeof(additional_input_t)); + next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + + neuron_model_set_global_neuron_params(global_parameters); + +// io_printf(IO_BUF, "\nPrinting global params\n"); +// io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); +// io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); +// io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); +// io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); +// io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); + + + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + +// io_printf(IO_BUF, "size of global params: %u", +// sizeof(global_neuron_params_t)); + + + + #if LOG_LEVEL >= LOG_DEBUG + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + log_debug("-------------------------------------\n"); + //} + #endif // LOG_LEVEL >= LOG_DEBUG +} + + +// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& + +//// Poisson Spike Source Functions +// +//static inline REAL slow_spike_source_get_time_to_spike( +// REAL mean_inter_spike_interval_in_ticks, neuron_pointer_t neuron) { +// return exponential_dist_variate( +// mars_kiss64_seed, +//// neuron->spike_source_seed +// global_parameters->spike_source_seed +// ) +// * mean_inter_spike_interval_in_ticks; +//} +// +// +// +//void set_spike_source_rate(neuron_pointer_t neuron, REAL rate, +// threshold_type_pointer_t threshold_type) { +// +// // clip rate to ensure divde by 0 and overflow don't occur +// if (rate < 0.25){ +// rate = 0.25; +// } else if (rate > threshold_type->threshold_value) { +// rate = threshold_type->threshold_value; +// } +// +// REAL rate_diff = neuron->rate_at_last_setting - rate; +// +// // ensure rate_diff is absolute +// if (rate_diff < 0.0k) { +// rate_diff = -rate_diff; +// } +// +// // Has rate changed by more than a predefined threshold since it was last +// // used to update the mean isi ticks? +// if ((rate_diff) > neuron->rate_update_threshold){ +// // then update the rate +// neuron->rate_at_last_setting = rate; +// +// // Update isi ticks based on new rate +// neuron->mean_isi_ticks = +// // rate * +// //// global_parameters->ticks_per_second; // shouldn't this be ticks_per_second/rate? +// // neuron->ticks_per_second ; // shouldn't this be ticks_per_second/rate? +// (global_parameters->ticks_per_second / rate); // shouldn't this be ticks_per_second/rate? +// +// // Account for time that's already passed since previous spike +// neuron->time_to_spike_ticks = neuron->mean_isi_ticks +// - neuron->time_since_last_spike; +// } // else stick with existing rate and isi ticks - they're within threshold +//} +// +// +//bool timer_update_determine_poisson_spiked(neuron_pointer_t neuron) { +// // NOTE: ALL SOURCES TREATED AS SLOW SOURCES!!! +// // NOTE: NO SOURCE CAN SPIKE MORE THAN ONCE PER TIMESTEP +// // If this spike source should spike now +// +// bool has_spiked = false; +// +// // Advance by one timestep +// // Subtract tick +// neuron->time_to_spike_ticks -= REAL_CONST(1.0); +// +// // Add tick to time since last spike (to enable for dynamic rate change) +// neuron->time_since_last_spike += 1.0k; +// +//// io_printf(IO_BUF, " Time to next spike: %k\n", +//// neuron->time_to_spike_ticks); +// +// if (REAL_COMPARE( +// neuron->time_to_spike_ticks, <=, +// REAL_CONST(0.0))) { +// +// // Update time to spike +// next_spike_time = slow_spike_source_get_time_to_spike( +// neuron->mean_isi_ticks, neuron); +// +// neuron->time_to_spike_ticks += next_spike_time; +// +// // Set time since last spike to zero, so we start counting from here +// neuron->time_since_last_spike = 0; +// +// has_spiked = true; +// } +// +// return has_spiked; +//} + +// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& + + +static bool neuron_impl_do_timestep_update(index_t neuron_index, + input_t external_bias, state_t *recorded_variable_values) { + + // Get the neuron itself + neuron_pointer_t neuron = &neuron_array[neuron_index]; + bool spike = false; + + target_ind = timer & 0x3ff; // repeats on a cycle of 1024 entries in array + +// io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); +// io_printf(IO_BUF, "Target: %k\n\n", +// global_parameters->target_V[target_ind]); + + // Get the input_type parameters and voltage for this neuron + input_type_pointer_t input_type = &input_type_array[neuron_index]; + + // Get threshold and additional input parameters for this neuron + threshold_type_pointer_t threshold_type = + &threshold_type_array[neuron_index]; + additional_input_pointer_t additional_input = + &additional_input_array[neuron_index]; + synapse_param_pointer_t synapse_type = + &neuron_synapse_shaping_params[neuron_index]; + + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + + + // Get the exc and inh values from the synapses + input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); + input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); + + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_value, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_value, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording + REAL total_exc = 0; + REAL total_inh = 0; + + for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ + total_exc += exc_input_values[i]; + } + for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ + total_inh += inh_input_values[i]; + } + + // Call functions to get the input values to be recorded + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + + // Call functions to convert exc_input and inh_input to current + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); + + external_bias += additional_input_get_input_value_as_current( + additional_input, voltage); + + if (neuron_index == 0){ + recorded_variable_values[V_RECORDING_INDEX] = voltage; + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron); +// io_printf(IO_BUF, "Readout membrane pot: %k\n", voltage); + // determine if a spike should occur + // bool spike = threshold_type_is_above_threshold(result, threshold_type); + + // Finally, set global membrane potential to updated value + global_parameters->readout_V = result; + + } else if (neuron_index == 1) { // this is the excitatory error source + + recorded_variable_values[V_RECORDING_INDEX] = + global_parameters->target_V[target_ind]; + + // Update Poisson neuron rate based on updated V + REAL rate = (global_parameters->target_V[target_ind] + - global_parameters->readout_V); // calc difference to +// io_printf(IO_BUF, "New Rate: %k", rate); +// rate = rate * 10; + rate = rate * 1.0k; + if (rate > 0) { // readout is below target, so set rate = diff. + // This will cause potentiation of excitatory synapses, + // and depression of inhibitory synapses +// io_printf(IO_BUF, "\t\t\tTime: %u, logging error: %k\n", timer, rate); + +//// set_spike_source_rate(neuron, rate, +//// threshold_type); +//// +//// // record error in postsynaptic history for use in plasticity calcs +//// synapse_dynamics_process_post_synaptic_event( +//// timer, //time, +//// 0, // neuron_index +//// bitsk(rate<<4)); //weight); +// +// } else { // readout is above target, so set rate = zero +// set_spike_source_rate(neuron, 0, +// threshold_type); + } + +// // judge whether poisson neuron should have fired +// spike = timer_update_determine_poisson_spiked(neuron); + + } else if (neuron_index == 2){ + // Update Poisson neuron rate based on updated V + REAL rate = (global_parameters->target_V[target_ind] + - global_parameters->readout_V); // calc difference to +// io_printf(IO_BUF, "New Rate: %k", rate); + + recorded_variable_values[V_RECORDING_INDEX] = rate; +// rate = rate * 10; + + rate = rate * 1.0k; + if (rate < 0) { + // readout is above target, send spikes from inhibitory neuron with rate = -diff: +// // this will depress excitatory synapses, and potenitate inhibitory synapses +// set_spike_source_rate(neuron, -rate, +// threshold_type); +// +//// io_printf(IO_BUF, "\t\t\tTime: %u, logging error: %k\n", timer, rate); +// // record error in postsynaptic history for use in plasticity calcs +// synapse_dynamics_process_post_synaptic_event( +// timer, //time, +// 0, // neuron_index +// bitsk(rate<<4)); //weight); +// +// } else { // readout is below target, so set rate = 0; +// set_spike_source_rate(neuron, 0, +// threshold_type); + } + + // judge whether poisson neuron should have fired +// spike = timer_update_determine_poisson_spiked(neuron); + + timer++; // update this here, as needs to be done once per iteration over all the neurons + + } + + + + // If spike occurs, communicate to relevant parts of model + if (spike) { + // Call relevant model-based functions + // Tell the neuron model +// neuron_model_has_spiked(neuron); + + // Tell the additional input + additional_input_has_spiked(additional_input); + } + + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); + + #if LOG_LEVEL >= LOG_DEBUG + neuron_model_print_state_variables(neuron); + #endif // LOG_LEVEL >= LOG_DEBUG + + // Return the boolean to the model timestep update + return spike; +} + + + + + +//! \brief stores neuron parameter back into sdram +//! \param[in] address: the address in sdram to start the store +static void neuron_impl_store_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("writing parameters"); + + //log_debug("writing neuron global parameters"); + spin1_memcpy(&address[next], global_parameters, + sizeof(global_neuron_params_t)); + next += (sizeof(global_neuron_params_t) + 3) / 4; + + log_debug("writing neuron local parameters"); + spin1_memcpy(&address[next], neuron_array, + n_neurons * sizeof(neuron_t)); + next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + + log_debug("writing input type parameters"); + spin1_memcpy(&address[next], input_type_array, + n_neurons * sizeof(input_type_t)); + next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + + log_debug("writing threshold type parameters"); + spin1_memcpy(&address[next], threshold_type_array, + n_neurons * sizeof(threshold_type_t)); + next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + + log_debug("writing synapse parameters"); + spin1_memcpy(&address[next], neuron_synapse_shaping_params, + n_neurons * sizeof(synapse_param_t)); + next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + + log_debug("writing additional input type parameters"); + spin1_memcpy(&address[next], additional_input_array, + n_neurons * sizeof(additional_input_t)); + next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; +} + +#if LOG_LEVEL >= LOG_DEBUG +void neuron_impl_print_inputs(uint32_t n_neurons) { + bool empty = true; + for (index_t i = 0; i < n_neurons; i++) { + empty = empty + && (bitsk(synapse_types_get_excitatory_input( + &(neuron_synapse_shaping_params[i])) + - synapse_types_get_inhibitory_input( + &(neuron_synapse_shaping_params[i]))) == 0); + } + + if (!empty) { + log_debug("-------------------------------------\n"); + + for (index_t i = 0; i < n_neurons; i++) { + input_t input = + synapse_types_get_excitatory_input( + &(neuron_synapse_shaping_params[i])) + - synapse_types_get_inhibitory_input( + &(neuron_synapse_shaping_params[i])); + if (bitsk(input) != 0) { + log_debug("%3u: %12.6k (= ", i, input); + synapse_types_print_input( + &(neuron_synapse_shaping_params[i])); + log_debug(")\n"); + } + } + log_debug("-------------------------------------\n"); + } +} + +void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + synapse_types_print_parameters(&(neuron_synapse_shaping_params[n])); + } + log_debug("-------------------------------------\n"); +} + +const char *neuron_impl_get_synapse_type_char(uint32_t synapse_type) { + return synapse_types_get_type_char(synapse_type); +} +#endif // LOG_LEVEL >= LOG_DEBUG + +#endif // _NEURON_IMPL_SINUSOID_READOUT_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c new file mode 100644 index 00000000000..47b493ee5de --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -0,0 +1,95 @@ +#include "neuron_model_sinusoid_readout_impl.h" + +#include + +// simple Leaky I&F ODE +static inline void _lif_neuron_closed_form( + neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { + + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + + // update membrane voltage + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +} + +void neuron_model_set_global_neuron_params( + global_neuron_params_pointer_t params) { + use(params); + + // Does Nothing - no params +} + +state_t neuron_model_state_update( + uint16_t num_excitatory_inputs, input_t* exc_input, + uint16_t num_inhibitory_inputs, input_t* inh_input, + input_t external_bias, neuron_pointer_t neuron) { + + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); + log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); + + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { + REAL total_exc = 0; + REAL total_inh = 0; + + total_exc += exc_input[0]; + total_inh += inh_input[0]; +// for (int i=0; i < num_excitatory_inputs; i++){ +// total_exc += exc_input[i]; +// } +// for (int i=0; i< num_inhibitory_inputs; i++){ +// total_inh += inh_input[i]; +// } + // Get the input in nA + input_t input_this_timestep = + total_exc - total_inh + external_bias + neuron->I_offset; + + _lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep); + } else { + + // countdown refractory timer + neuron->refract_timer -= 1; + } + return neuron->V_membrane; +} + +void neuron_model_has_spiked(neuron_pointer_t neuron) { + + // reset membrane voltage + neuron->V_membrane = neuron->V_reset; + + // reset refractory timer + neuron->refract_timer = neuron->T_refract; +} + +state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { + return neuron->V_membrane; +} + +void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { + log_debug("V membrane = %11.4k mv", neuron->V_membrane); +} + +void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { + io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); + io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); + + io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); + io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); + + io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); + + io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); + io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); + io_printf(IO_BUF, "time_to_spike_ticks = %k \n", + neuron->time_to_spike_ticks); + +// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +} diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h new file mode 100644 index 00000000000..bc3f697de43 --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -0,0 +1,60 @@ +#ifndef _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ +#define _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ + +#include "neuron_model.h" +#include "random.h" + +///////////////////////////////////////////////////////////// +// definition for LIF neuron parameters +typedef struct neuron_t { + // membrane voltage [mV] + REAL V_membrane; + + // membrane resting voltage [mV] + REAL V_rest; + + // membrane resistance [MOhm] + REAL R_membrane; + + // 'fixed' computation parameter - time constant multiplier for + // closed-form solution + // exp(-(machine time step in ms)/(R * C)) [.] + REAL exp_TC; + + // offset current [nA] + REAL I_offset; + + // countdown to end of next refractory period [timesteps] + int32_t refract_timer; + + // post-spike reset membrane voltage [mV] + REAL V_reset; + + // refractory time of neuron [timesteps] + int32_t T_refract; + + + // Poisson compartment params +// REAL mean_isi_ticks; +// REAL time_to_spike_ticks; +// +// int32_t time_since_last_spike; +// REAL rate_at_last_setting; +// REAL rate_update_threshold; + + +// // Should be in global params +// mars_kiss64_seed_t spike_source_seed; // array of 4 values +//// UFRACT seconds_per_tick; +// REAL ticks_per_second; + +} neuron_t; + +typedef struct global_neuron_params_t { +// mars_kiss64_seed_t spike_source_seed; // array of 4 values +// REAL ticks_per_second; + REAL readout_V; + REAL target_V[1024]; +} global_neuron_params_t; + +#endif // _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ From f55810eb4e6dac98e0a789e891f682adaa41e826 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Wed, 27 Nov 2019 08:48:11 +0000 Subject: [PATCH 020/123] skeleton sinusoid readout neuron --- .../neuron/sinusoid_readout/Makefile | 9 +- .../neuron_impl_sinusoid_readout.h | 6 +- .../neuron_impl_store_recall_readout.h | 1 - .../neuron_model_sinusoid_readout_impl.c | 10 +- .../pyNN/models/neuron/builds/__init__.py | 3 +- .../models/neuron/builds/sinusoid_readout.py | 43 +++ .../models/neuron/neuron_models/__init__.py | 3 +- .../neuron_model_sinusoid_readout.py | 296 ++++++++++++++++++ 8 files changed, 354 insertions(+), 17 deletions(-) create mode 100644 spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py create mode 100644 spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile index dcc0e30326e..a33a904b6b8 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile @@ -1,13 +1,8 @@ APP = $(notdir $(CURDIR)) OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c -NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinsuoid_readout.h +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c -#TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.c -#TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.h -#WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.c -#WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.h - -include ../neural_build.mk \ No newline at end of file +include ../neural_build.mk diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index dea26b5fc53..46bacd46f46 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -12,9 +12,11 @@ // Further includes #include +#include #include #include #include +#include #define V_RECORDING_INDEX 0 #define GSYN_EXCITATORY_RECORDING_INDEX 1 @@ -119,7 +121,7 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Initialise pointers to Neuron parameters in STDP code - synapse_dynamics_set_neuron_array(neuron_array); +// synapse_dynamics_set_neuron_array(neuron_array); log_info("set pointer to neuron array in stdp code"); return true; @@ -359,7 +361,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron); + external_bias, neuron, 0.0k); // io_printf(IO_BUF, "Readout membrane pot: %k\n", voltage); // determine if a spike should occur // bool spike = threshold_type_is_above_threshold(result, threshold_type); diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h index 41e7b24c333..0adec47e1e6 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h @@ -16,7 +16,6 @@ #include #include #include -//#include "random.h" #include #define V_RECORDING_INDEX 0 diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index 47b493ee5de..0cc2f55b075 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -22,11 +22,11 @@ void neuron_model_set_global_neuron_params( state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, neuron_pointer_t neuron) { + input_t external_bias, neuron_pointer_t neuron, REAL dummy) { log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); - + use(dummy); // If outside of the refractory period if (neuron->refract_timer <= 0) { @@ -82,9 +82,9 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); - io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); - io_printf(IO_BUF, "time_to_spike_ticks = %k \n", - neuron->time_to_spike_ticks); +// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); +// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", +// neuron->time_to_spike_ticks); // io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); // io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index b799e0f62d6..aa6f31179b8 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -29,10 +29,11 @@ from .if_curr_exp_semd_base import IFCurrExpSEMDBase from .eprop_adaptive import EPropAdaptive from .store_recall_readout import StoreRecallReadout +from .sinusoid_readout import SinusoidReadout __all__ = ["EIFConductanceAlphaPopulation", "HHCondExp", "IFCondAlpha", "IFCondExpBase", "IFCurrAlpha", "IFCurrDualExpBase", "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", - "EPropAdaptive", "StoreRecallReadout"] + "EPropAdaptive", "StoreRecallReadout", "SinusoidReadout"] diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py new file mode 100644 index 00000000000..99830cd095d --- /dev/null +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -0,0 +1,43 @@ +from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard +from spynnaker.pyNN.models.defaults import default_initial_values +from spynnaker.pyNN.models.neuron.neuron_models import ( + NeuronModelLeakyIntegrateAndFireSinusoidReadout) +from spynnaker.pyNN.models.neuron.synapse_types import ( + SynapseTypeEPropAdaptive) +from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent +from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeStatic + + +class SinusoidReadout(AbstractPyNNNeuronModelStandard): + """ + """ + + @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", + "isyn_inh2"}) + def __init__( + self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, + v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, + + isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, + tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, +# mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, + + target_data =[]): + + # pylint: disable=too-many-arguments, too-many-locals + neuron_model = NeuronModelLeakyIntegrateAndFireSinusoidReadout( + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, target_data) + + synapse_type = SynapseTypeEPropAdaptive( + tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) + + input_type = InputTypeCurrent() + + threshold_type = ThresholdTypeStatic(v_thresh) + + super(SinusoidReadout, self).__init__( + model_name="sinusoid_readout", + binary="sinusoid_readout.aplx", + neuron_model=neuron_model, input_type=input_type, + synapse_type=synapse_type, threshold_type=threshold_type) \ No newline at end of file diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 75a64a1132f..0e6e25d8ff1 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -19,7 +19,8 @@ NeuronModelLeakyIntegrateAndFire) from .neuron_model_eprop_adaptive import NeuronModelEPropAdaptive from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout +from .neuron_model_sinusoid_readout import NeuronModelLeakyIntegrateAndFireSinusoidReadout __all__ = ["AbstractNeuronModel", "NeuronModelIzh", "NeuronModelLeakyIntegrateAndFire", "NeuronModelEPropAdaptive", - "NeuronModelStoreRecallReadout"] + "NeuronModelStoreRecallReadout", "NeuronModelLeakyIntegrateAndFireSinusoidReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py new file mode 100644 index 00000000000..3c86a46408b --- /dev/null +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -0,0 +1,296 @@ +import numpy +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from pacman.executor.injection_decorator import inject_items +from .abstract_neuron_model import AbstractNeuronModel + +MICROSECONDS_PER_SECOND = 1000000.0 +MICROSECONDS_PER_MILLISECOND = 1000.0 + +V = "v" +V_REST = "v_rest" +TAU_M = "tau_m" +CM = "cm" +I_OFFSET = "i_offset" +V_RESET = "v_reset" +TAU_REFRAC = "tau_refrac" +COUNT_REFRAC = "count_refrac" +# MEAN_ISI_TICKS = "mean_isi_ticks" +# TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" +# SEED1 = "seed1" +# SEED2 = "seed2" +# SEED3 = "seed3" +# SEED4 = "seed4" +# TICKS_PER_SECOND = "ticks_per_second" +# TIME_SINCE_LAST_SPIKE = "time_since_last_spike" +# RATE_AT_LAST_SETTING = "rate_at_last_setting" +# RATE_UPDATE_THRESHOLD = "rate_update_threshold" +TARGET_DATA = "target_data" + +UNITS = { + V: 'mV', + V_REST: 'mV', + TAU_M: 'ms', + CM: 'nF', + I_OFFSET: 'nA', + V_RESET: 'mV', + TAU_REFRAC: 'ms' +} + + +class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): + __slots__ = [ + "_v_init", + "_v_rest", + "_tau_m", + "_cm", + "_i_offset", + "_v_reset", + "_tau_refrac", + "_mean_isi_ticks", + "_time_to_spike_ticks", + "_time_since_last_spike", + "_rate_at_last_setting", + "_rate_update_threshold", + "_target_data" + ] + + def __init__( + self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, +# mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, + target_data): + + global_data_types=[ +# DataType.UINT32, # MARS KISS seed +# DataType.UINT32, # MARS KISS seed +# DataType.UINT32, # MARS KISS seed +# DataType.UINT32, # MARS KISS seed +# DataType.S1615, # ticks_per_second + DataType.S1615 # global mem pot + ] + global_data_types.extend([DataType.S1615 for i in range(1024)]) + + + super(NeuronModelLeakyIntegrateAndFireSinusoidReadout, self).__init__( + data_types= [ + DataType.S1615, # v + DataType.S1615, # v_rest + DataType.S1615, # r_membrane (= tau_m / cm) + DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.S1615, # i_offset + DataType.INT32, # count_refrac + DataType.S1615, # v_reset + DataType.INT32, # tau_refrac + #### Poisson Compartment Params #### +# DataType.S1615, # REAL mean_isi_ticks +# DataType.S1615, # REAL time_to_spike_ticks +# DataType.INT32, # int32_t time_since_last_spike s +# DataType.S1615, # REAL rate_at_last_setting; s +# DataType.S1615 # REAL rate_update_threshold; p + ], + + global_data_types=global_data_types + ) + + if v_init is None: + v_init = v_rest + + self._v_init = v_init + self._v_rest = v_rest + self._tau_m = tau_m + self._cm = cm + self._i_offset = i_offset + self._v_reset = v_reset + self._tau_refrac = tau_refrac +# self._mean_isi_ticks = mean_isi_ticks +# self._time_to_spike_ticks = time_to_spike_ticks +# self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation +# self._rate_at_last_setting = 0 +# self._rate_update_threshold = 2 + self._target_data = target_data + + @overrides(AbstractNeuronModel.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # A bit of a guess + return 100 * n_neurons + + @overrides(AbstractNeuronModel.add_parameters) + def add_parameters(self, parameters): + parameters[V_REST] = self._v_rest + parameters[TAU_M] = self._tau_m + parameters[CM] = self._cm + parameters[I_OFFSET] = self._i_offset + parameters[V_RESET] = self._v_reset + parameters[TAU_REFRAC] = self._tau_refrac +# parameters[SEED1] = 10065 +# parameters[SEED2] = 232 +# parameters[SEED3] = 3634 +# parameters[SEED4] = 4877 + parameters[TARGET_DATA] = 0.0 + +# parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() +# parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold +# parameters[TARGET_DATA] = self._target_data + + @overrides(AbstractNeuronModel.add_state_variables) + def add_state_variables(self, state_variables): + state_variables[V] = self._v_init + state_variables[COUNT_REFRAC] = 0 +# state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks +# state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential +# state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike +# state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting + + + @overrides(AbstractNeuronModel.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractNeuronModel.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + def get_values(self, parameters, state_variables, vertex_slice, ts): + + # Add the rest of the data + return [state_variables[V], + parameters[V_REST], + parameters[TAU_M] / parameters[CM], + parameters[TAU_M].apply_operation( + operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), + parameters[I_OFFSET], state_variables[COUNT_REFRAC], + parameters[V_RESET], + parameters[TAU_REFRAC].apply_operation( + operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), +# state_variables[MEAN_ISI_TICKS], +# state_variables[TIME_TO_SPIKE_TICKS], +# state_variables[TIME_SINCE_LAST_SPIKE], +# state_variables[RATE_AT_LAST_SETTING], +# parameters[RATE_UPDATE_THRESHOLD] + ] + + @overrides(AbstractNeuronModel.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, + _v_reset, _tau_refrac, +# mean_isi_ticks, time_to_spike_ticks, time_since_last_spike, +# rate_at_last_setting, _rate_update_threshold +# _seed1, _seed2, _seed3, _seed4, _ticks_per_second + ) = values + + # Copy the changed data only + state_variables[V] = v +# state_variables[COUNT_REFRAC] = count_refrac +# state_variables[MEAN_ISI_TICKS] = mean_isi_ticks +# state_variables[TIME_TO_SPIKE_TICKS] = time_to_spike_ticks +# state_variable[TIME_SINCE_LAST_SPIKE] = time_since_last_spike +# state_variabels[RATE_AT_LAST_SETTING] = rate_at_last_setting + + # Global params + @inject_items({"machine_time_step": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_global_values, + additional_arguments={'machine_time_step'}) + def get_global_values(self, machine_time_step): + vals = [ +# 1, # seed 1 +# 2, # seed 2 +# 3, # seed 3 +# 4, # seed 4 +# MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second + 0.0, # set to 0, as will be set in first timestep of model anyway (membrane potential) + ] + +# target_data = [] +# +# for i in range(1024): +# target_data.append( +# # 4 +# 5 + 2 * numpy.sin(2 * i * 2* numpy.pi / 1024) \ +# + 5 * numpy.sin((4 * i * 2* numpy.pi / 1024)) +# ) + vals.extend(self._target_data) + return vals + + @property + def target_data(self): + return self._target_data + + @target_data.setter + def target_data(self, target_data): + self._target_data = target_data + + @property + def v_init(self): + return self._v + + @v_init.setter + def v_init(self, v_init): + self._v = v_init + + @property + def v_rest(self): + return self._v_rest + + @v_rest.setter + def v_rest(self, v_rest): + self._v_rest = v_rest + + @property + def tau_m(self): + return self._tau_m + + @tau_m.setter + def tau_m(self, tau_m): + self._tau_m = tau_m + + @property + def cm(self): + return self._cm + + @cm.setter + def cm(self, cm): + self._cm = cm + + @property + def i_offset(self): + return self._i_offset + + @i_offset.setter + def i_offset(self, i_offset): + self._i_offset = i_offset + + @property + def v_reset(self): + return self._v_reset + + @v_reset.setter + def v_reset(self, v_reset): + self._v_reset = v_reset + + @property + def tau_refrac(self): + return self._tau_refrac + + @tau_refrac.setter + def tau_refrac(self, tau_refrac): + self._tau_refrac = tau_refrac + +# @property +# def mean_isi_ticks(self): +# return self._mean_isi_ticks +# +# @mean_isi_ticks.setter +# def mean_isi_ticks(self, new_mean_isi_ticks): +# self._mean_isi_ticks = new_mean_isi_ticks +# +# @property +# def time_to_spike_ticks(self): +# return self._time_to_spike_ticks +# +# @mean_isi_ticks.setter +# def time_to_spike_ticks(self, new_time_to_spike_ticks): +# self._time_to_spike_ticks = new_time_to_spike_ticks \ No newline at end of file From dfa0a5593e0ed78e2155fa2b93c4df93f0ad1096 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Wed, 27 Nov 2019 09:33:36 +0000 Subject: [PATCH 021/123] sinusoid readout implemented --- .../neuron_impl_sinusoid_readout.h | 190 ++---------------- .../neuron_model_sinusoid_readout_impl.h | 2 +- .../neuron_model_sinusoid_readout.py | 4 +- 3 files changed, 22 insertions(+), 174 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 46bacd46f46..4b626078707 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -4,7 +4,7 @@ #include "neuron_impl.h" // Includes for model parts used in this implementation -#include +#include #include #include #include @@ -53,7 +53,7 @@ static global_neuron_params_pointer_t global_parameters; static synapse_param_t *neuron_synapse_shaping_params; static REAL next_spike_time = 0; -static uint32_t timer = 0; +extern uint32_t time; static uint32_t target_ind = 0; static bool neuron_impl_initialise(uint32_t n_neurons) { @@ -201,96 +201,6 @@ static void neuron_impl_load_neuron_parameters( } -// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& - -//// Poisson Spike Source Functions -// -//static inline REAL slow_spike_source_get_time_to_spike( -// REAL mean_inter_spike_interval_in_ticks, neuron_pointer_t neuron) { -// return exponential_dist_variate( -// mars_kiss64_seed, -//// neuron->spike_source_seed -// global_parameters->spike_source_seed -// ) -// * mean_inter_spike_interval_in_ticks; -//} -// -// -// -//void set_spike_source_rate(neuron_pointer_t neuron, REAL rate, -// threshold_type_pointer_t threshold_type) { -// -// // clip rate to ensure divde by 0 and overflow don't occur -// if (rate < 0.25){ -// rate = 0.25; -// } else if (rate > threshold_type->threshold_value) { -// rate = threshold_type->threshold_value; -// } -// -// REAL rate_diff = neuron->rate_at_last_setting - rate; -// -// // ensure rate_diff is absolute -// if (rate_diff < 0.0k) { -// rate_diff = -rate_diff; -// } -// -// // Has rate changed by more than a predefined threshold since it was last -// // used to update the mean isi ticks? -// if ((rate_diff) > neuron->rate_update_threshold){ -// // then update the rate -// neuron->rate_at_last_setting = rate; -// -// // Update isi ticks based on new rate -// neuron->mean_isi_ticks = -// // rate * -// //// global_parameters->ticks_per_second; // shouldn't this be ticks_per_second/rate? -// // neuron->ticks_per_second ; // shouldn't this be ticks_per_second/rate? -// (global_parameters->ticks_per_second / rate); // shouldn't this be ticks_per_second/rate? -// -// // Account for time that's already passed since previous spike -// neuron->time_to_spike_ticks = neuron->mean_isi_ticks -// - neuron->time_since_last_spike; -// } // else stick with existing rate and isi ticks - they're within threshold -//} -// -// -//bool timer_update_determine_poisson_spiked(neuron_pointer_t neuron) { -// // NOTE: ALL SOURCES TREATED AS SLOW SOURCES!!! -// // NOTE: NO SOURCE CAN SPIKE MORE THAN ONCE PER TIMESTEP -// // If this spike source should spike now -// -// bool has_spiked = false; -// -// // Advance by one timestep -// // Subtract tick -// neuron->time_to_spike_ticks -= REAL_CONST(1.0); -// -// // Add tick to time since last spike (to enable for dynamic rate change) -// neuron->time_since_last_spike += 1.0k; -// -//// io_printf(IO_BUF, " Time to next spike: %k\n", -//// neuron->time_to_spike_ticks); -// -// if (REAL_COMPARE( -// neuron->time_to_spike_ticks, <=, -// REAL_CONST(0.0))) { -// -// // Update time to spike -// next_spike_time = slow_spike_source_get_time_to_spike( -// neuron->mean_isi_ticks, neuron); -// -// neuron->time_to_spike_ticks += next_spike_time; -// -// // Set time since last spike to zero, so we start counting from here -// neuron->time_since_last_spike = 0; -// -// has_spiked = true; -// } -// -// return has_spiked; -//} - -// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& static bool neuron_impl_do_timestep_update(index_t neuron_index, @@ -300,7 +210,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, neuron_pointer_t neuron = &neuron_array[neuron_index]; bool spike = false; - target_ind = timer & 0x3ff; // repeats on a cycle of 1024 entries in array + target_ind = time & 0x3ff; // repeats on a cycle of 1024 entries in array // io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); // io_printf(IO_BUF, "Target: %k\n\n", @@ -343,8 +253,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } // Call functions to get the input values to be recorded - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( @@ -355,87 +265,25 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); - if (neuron_index == 0){ - recorded_variable_values[V_RECORDING_INDEX] = voltage; +// if (neuron_index == 0){ + recorded_variable_values[V_RECORDING_INDEX] = voltage; // update neuron parameters - state_t result = neuron_model_state_update( + state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, external_bias, neuron, 0.0k); -// io_printf(IO_BUF, "Readout membrane pot: %k\n", voltage); - // determine if a spike should occur - // bool spike = threshold_type_is_above_threshold(result, threshold_type); - - // Finally, set global membrane potential to updated value - global_parameters->readout_V = result; - - } else if (neuron_index == 1) { // this is the excitatory error source - - recorded_variable_values[V_RECORDING_INDEX] = - global_parameters->target_V[target_ind]; - - // Update Poisson neuron rate based on updated V - REAL rate = (global_parameters->target_V[target_ind] - - global_parameters->readout_V); // calc difference to -// io_printf(IO_BUF, "New Rate: %k", rate); -// rate = rate * 10; - rate = rate * 1.0k; - if (rate > 0) { // readout is below target, so set rate = diff. - // This will cause potentiation of excitatory synapses, - // and depression of inhibitory synapses -// io_printf(IO_BUF, "\t\t\tTime: %u, logging error: %k\n", timer, rate); - -//// set_spike_source_rate(neuron, rate, -//// threshold_type); -//// -//// // record error in postsynaptic history for use in plasticity calcs -//// synapse_dynamics_process_post_synaptic_event( -//// timer, //time, -//// 0, // neuron_index -//// bitsk(rate<<4)); //weight); -// -// } else { // readout is above target, so set rate = zero -// set_spike_source_rate(neuron, 0, -// threshold_type); - } - -// // judge whether poisson neuron should have fired -// spike = timer_update_determine_poisson_spiked(neuron); - - } else if (neuron_index == 2){ - // Update Poisson neuron rate based on updated V - REAL rate = (global_parameters->target_V[target_ind] - - global_parameters->readout_V); // calc difference to -// io_printf(IO_BUF, "New Rate: %k", rate); - - recorded_variable_values[V_RECORDING_INDEX] = rate; -// rate = rate * 10; - - rate = rate * 1.0k; - if (rate < 0) { - // readout is above target, send spikes from inhibitory neuron with rate = -diff: -// // this will depress excitatory synapses, and potenitate inhibitory synapses -// set_spike_source_rate(neuron, -rate, -// threshold_type); -// -//// io_printf(IO_BUF, "\t\t\tTime: %u, logging error: %k\n", timer, rate); -// // record error in postsynaptic history for use in plasticity calcs -// synapse_dynamics_process_post_synaptic_event( -// timer, //time, -// 0, // neuron_index -// bitsk(rate<<4)); //weight); -// -// } else { // readout is below target, so set rate = 0; -// set_spike_source_rate(neuron, 0, -// threshold_type); - } - // judge whether poisson neuron should have fired -// spike = timer_update_determine_poisson_spiked(neuron); - - timer++; // update this here, as needs to be done once per iteration over all the neurons - - } + // Calculate error + REAL error = result - global_parameters->target_V[target_ind]; + + // Record readout + recorded_variable_values[V_RECORDING_INDEX] = result; + // Record target + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = + global_parameters->target_V[target_ind]; + // Record Error + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + error; diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index bc3f697de43..c6ed61cd24d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -53,7 +53,7 @@ typedef struct neuron_t { typedef struct global_neuron_params_t { // mars_kiss64_seed_t spike_source_seed; // array of 4 values // REAL ticks_per_second; - REAL readout_V; +// REAL readout_V; REAL target_V[1024]; } global_neuron_params_t; diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 3c86a46408b..7ce79ff4a63 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -66,7 +66,7 @@ def __init__( # DataType.UINT32, # MARS KISS seed # DataType.UINT32, # MARS KISS seed # DataType.S1615, # ticks_per_second - DataType.S1615 # global mem pot +# DataType.S1615 # global mem pot ] global_data_types.extend([DataType.S1615 for i in range(1024)]) @@ -201,7 +201,7 @@ def get_global_values(self, machine_time_step): # 3, # seed 3 # 4, # seed 4 # MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second - 0.0, # set to 0, as will be set in first timestep of model anyway (membrane potential) +# 0.0, # set to 0, as will be set in first timestep of model anyway (membrane potential) ] # target_data = [] From d448e65dc4c791da2610d0a26bcf74df12448ccf Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Thu, 28 Nov 2019 11:27:40 +0000 Subject: [PATCH 022/123] Learning signal by payload --- .../neuron_impl_eprop_adaptive.h | 7 +- .../neuron_impl_sinusoid_readout.h | 10 ++- .../models/neuron_model_eprop_adaptive_impl.h | 1 + neural_modelling/src/neuron/neuron.c | 2 +- .../src/neuron/spike_processing.c | 38 ++++++++ .../models/neuron/builds/eprop_adaptive.py | 6 +- .../neuron_model_eprop_adaptive.py | 39 +++++++-- .../neuron_model_sinusoid_readout.py | 87 ++----------------- 8 files changed, 93 insertions(+), 97 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 57103f4efd8..6e47724e79f 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -49,6 +49,10 @@ shaping include #endif + +extern REAL learning_signal; + + //! Array of neuron states static neuron_pointer_t neuron_array; @@ -289,7 +293,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // global_parameters->core_target_rate; // neuron->syn_state[0].e_bar; // neuron->syn_state[0].el_a; - total_inh; +// total_inh; + learning_signal * neuron->w_fb; // update neuron parameters state_t result = neuron_model_state_update( diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 4b626078707..0440336470a 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -54,8 +54,12 @@ static synapse_param_t *neuron_synapse_shaping_params; static REAL next_spike_time = 0; extern uint32_t time; +extern key_t key; static uint32_t target_ind = 0; + + + static bool neuron_impl_initialise(uint32_t n_neurons) { // allocate DTCM for the global parameter details @@ -285,7 +289,11 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = error; - + // Send error (learning signal) as packet with payload + while (!spin1_send_mc_packet( + key | neuron_index, bitsk(error), 1 )) { + spin1_delay_us(1); + } // If spike occurs, communicate to relevant parts of model if (spike) { diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 98749a903ef..447d946fae2 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -81,6 +81,7 @@ typedef struct neuron_t { REAL scalar; REAL L; // learning signal + REAL w_fb; // feedback weight // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index 97d9d79550d..809b36beede 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -36,7 +36,7 @@ extern uint ticks; #define SPIKE_RECORDING_CHANNEL 0 //! The key to be used for this core (will be ORed with neuron ID) -static key_t key; +key_t key; //MADE NON STATIC!!! //! A checker that says if this model should be transmitting. If set to false //! by the data region, then this model should not have a key. diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 2f47bc1fb8a..6383ab13969 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -56,6 +56,7 @@ static uint32_t number_of_rewires = 0; static bool any_spike = false; /* PRIVATE FUNCTIONS - static for inlining */ +REAL learning_signal; static inline void do_dma_read( address_t row_address, size_t n_bytes_to_transfer) { @@ -242,6 +243,34 @@ static void dma_complete_callback(uint unused, uint tag) { setup_synaptic_dma_read(); } +static void multicast_packet_wpayload_received_callback(uint key, uint payload){ + + learning_signal = kbits(payload); + + // Print payload to test transmission of error + io_printf(IO_BUF, "payload: %k\n", learning_signal); + + // Assign learning signal to global memory + +// // If there was space to add spike to incoming spike queue +// if (in_spikes_add_spike(key)) { +// // If we're not already processing synaptic DMAs, +// // flag pipeline as busy and trigger a feed event +// if (!dma_busy) { +// log_debug("Sending user event for new spike"); +// if (spin1_trigger_user_event(0, 0)) { +// dma_busy = true; +// } else { +// log_debug("Could not trigger user event\n"); +// } +// } +// } else { +// io_printf(IO_BUF, "Could not add spike in mc_payload_received\n"); +// } + +} + + /* INTERFACE FUNCTIONS - cannot be static */ bool spike_processing_initialise( // EXPORTED @@ -282,6 +311,15 @@ bool spike_processing_initialise( // EXPORTED DMA_TAG_READ_SYNAPTIC_ROW, dma_complete_callback); spin1_callback_on(USER_EVENT, user_event_callback, user_event_priority); + + io_printf(IO_BUF, "About to register MCPL callback\n"); + + // Register MC_PACKET_RECEIVED_PAYLOAD + spin1_callback_on(MCPL_PACKET_RECEIVED, + multicast_packet_wpayload_received_callback, mc_packet_callback_priority); + + io_printf(IO_BUF, "Registered MCPL callback successfully\n"); + return true; } diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index ab53996462b..9126deee551 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -30,7 +30,7 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", "psi", "target_rate", "tau_err", "B", "small_b", - "l" + "l", "w_fb" }) def __init__( self, @@ -49,7 +49,7 @@ def __init__( B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, # Learning signal and weight update constants - l=0 + l=0, w_fb=0.5, ): # pylint: disable=too-many-arguments, too-many-locals @@ -61,7 +61,7 @@ def __init__( small_b_0, tau_a, beta, - target_rate, tau_err, l) + target_rate, tau_err, l, w_fb) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 43f77fb8421..c8414f631a8 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -45,6 +45,7 @@ SCALAR = "scalar" # Learning signal L = "learning_signal" +W_FB = "feedback_weight" UNITS = { V: 'mV', @@ -79,6 +80,7 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__z", "__a", "__psi", + # threshold params "__B", "__small_b", @@ -87,11 +89,14 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__beta", # "_adpt" "__scalar", + # reg params "__target_rate", "__tau_err", + # learning signal - "__l" + "__l", + "__w_fb" ] def __init__( @@ -104,16 +109,19 @@ def __init__( v_reset, tau_refrac, psi, + # threshold params B, small_b, small_b_0, tau_a, beta, + # regularisation params target_rate, tau_err, - l + l, + w_fb ): datatype_list = [ @@ -138,7 +146,8 @@ def __init__( DataType.UINT32, DataType.S1615, # Learning signal - DataType.S1615 # L + DataType.S1615, # L + DataType.S1615 # w_fb ] # Synapse states - always initialise to zero @@ -186,6 +195,7 @@ def __init__( # learning signal self.__l = l + self.__w_fb = w_fb @overrides(AbstractNeuronModel.get_n_cpu_cycles) @@ -206,18 +216,20 @@ def add_parameters(self, parameters): parameters[TAU_A] = self.__tau_a parameters[BETA] = self.__beta parameters[SCALAR] = self.__scalar + parameters[W_FB] = self.__w_fb @overrides(AbstractNeuronModel.add_state_variables) def add_state_variables(self, state_variables): state_variables[V] = self.__v_init state_variables[COUNT_REFRAC] = 0 state_variables[PSI] = self.__psi - state_variables[Z] = 0 - state_variables[A] = 0 - state_variables[L] = 0 - + state_variables[Z] = 0 # initalise to zero + state_variables[A] = 0 # initialise to zero + state_variables[BIG_B] = self.__B state_variables[SMALL_B] = self.__small_b + + state_variables[L] = self.__l @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -260,7 +272,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): float(-ts) / (1000.0 * x))) * ulfract), # ADPT parameters[SCALAR], - state_variables[L] + state_variables[L], + parameters[W_FB] ] # create synaptic state - init all state to zero @@ -298,7 +311,7 @@ def update_values(self, values, parameters, state_variables): (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l) = values # Not sure this will work with the new array of synapse!!! + l, __w_fb) = values # Not sure this will work with the new array of synapse!!! # Copy the changed data only state_variables[V] = v @@ -406,3 +419,11 @@ def beta(self): @beta.setter def beta(self, new_value): self.__beta = new_value + + @property + def w_fb(self): + return self.__w_fb# + + @w_fb.setter + def w_fb(self, new_value): + self.__w_fb = new_value diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 7ce79ff4a63..5702df3abfc 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -47,11 +47,6 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): "_i_offset", "_v_reset", "_tau_refrac", - "_mean_isi_ticks", - "_time_to_spike_ticks", - "_time_since_last_spike", - "_rate_at_last_setting", - "_rate_update_threshold", "_target_data" ] @@ -60,14 +55,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, target_data): - global_data_types=[ -# DataType.UINT32, # MARS KISS seed -# DataType.UINT32, # MARS KISS seed -# DataType.UINT32, # MARS KISS seed -# DataType.UINT32, # MARS KISS seed -# DataType.S1615, # ticks_per_second -# DataType.S1615 # global mem pot - ] + global_data_types=[] global_data_types.extend([DataType.S1615 for i in range(1024)]) @@ -81,12 +69,6 @@ def __init__( DataType.INT32, # count_refrac DataType.S1615, # v_reset DataType.INT32, # tau_refrac - #### Poisson Compartment Params #### -# DataType.S1615, # REAL mean_isi_ticks -# DataType.S1615, # REAL time_to_spike_ticks -# DataType.INT32, # int32_t time_since_last_spike s -# DataType.S1615, # REAL rate_at_last_setting; s -# DataType.S1615 # REAL rate_update_threshold; p ], global_data_types=global_data_types @@ -102,11 +84,6 @@ def __init__( self._i_offset = i_offset self._v_reset = v_reset self._tau_refrac = tau_refrac -# self._mean_isi_ticks = mean_isi_ticks -# self._time_to_spike_ticks = time_to_spike_ticks -# self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation -# self._rate_at_last_setting = 0 -# self._rate_update_threshold = 2 self._target_data = target_data @overrides(AbstractNeuronModel.get_n_cpu_cycles) @@ -122,24 +99,13 @@ def add_parameters(self, parameters): parameters[I_OFFSET] = self._i_offset parameters[V_RESET] = self._v_reset parameters[TAU_REFRAC] = self._tau_refrac -# parameters[SEED1] = 10065 -# parameters[SEED2] = 232 -# parameters[SEED3] = 3634 -# parameters[SEED4] = 4877 parameters[TARGET_DATA] = 0.0 - -# parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() -# parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold -# parameters[TARGET_DATA] = self._target_data + @overrides(AbstractNeuronModel.add_state_variables) def add_state_variables(self, state_variables): state_variables[V] = self._v_init state_variables[COUNT_REFRAC] = 0 -# state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks -# state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential -# state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike -# state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting @overrides(AbstractNeuronModel.get_units) @@ -164,11 +130,6 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[V_RESET], parameters[TAU_REFRAC].apply_operation( operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), -# state_variables[MEAN_ISI_TICKS], -# state_variables[TIME_TO_SPIKE_TICKS], -# state_variables[TIME_SINCE_LAST_SPIKE], -# state_variables[RATE_AT_LAST_SETTING], -# parameters[RATE_UPDATE_THRESHOLD] ] @overrides(AbstractNeuronModel.update_values) @@ -176,42 +137,19 @@ def update_values(self, values, parameters, state_variables): # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac, -# mean_isi_ticks, time_to_spike_ticks, time_since_last_spike, -# rate_at_last_setting, _rate_update_threshold -# _seed1, _seed2, _seed3, _seed4, _ticks_per_second - ) = values + _v_reset, _tau_refrac) = values # Copy the changed data only state_variables[V] = v -# state_variables[COUNT_REFRAC] = count_refrac -# state_variables[MEAN_ISI_TICKS] = mean_isi_ticks -# state_variables[TIME_TO_SPIKE_TICKS] = time_to_spike_ticks -# state_variable[TIME_SINCE_LAST_SPIKE] = time_since_last_spike -# state_variabels[RATE_AT_LAST_SETTING] = rate_at_last_setting + # Global params @inject_items({"machine_time_step": "MachineTimeStep"}) @overrides(AbstractNeuronModel.get_global_values, additional_arguments={'machine_time_step'}) def get_global_values(self, machine_time_step): - vals = [ -# 1, # seed 1 -# 2, # seed 2 -# 3, # seed 3 -# 4, # seed 4 -# MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second -# 0.0, # set to 0, as will be set in first timestep of model anyway (membrane potential) - ] + vals = [] -# target_data = [] -# -# for i in range(1024): -# target_data.append( -# # 4 -# 5 + 2 * numpy.sin(2 * i * 2* numpy.pi / 1024) \ -# + 5 * numpy.sin((4 * i * 2* numpy.pi / 1024)) -# ) vals.extend(self._target_data) return vals @@ -279,18 +217,3 @@ def tau_refrac(self): def tau_refrac(self, tau_refrac): self._tau_refrac = tau_refrac -# @property -# def mean_isi_ticks(self): -# return self._mean_isi_ticks -# -# @mean_isi_ticks.setter -# def mean_isi_ticks(self, new_mean_isi_ticks): -# self._mean_isi_ticks = new_mean_isi_ticks -# -# @property -# def time_to_spike_ticks(self): -# return self._time_to_spike_ticks -# -# @mean_isi_ticks.setter -# def time_to_spike_ticks(self, new_time_to_spike_ticks): -# self._time_to_spike_ticks = new_time_to_spike_ticks \ No newline at end of file From 8521b6e90d625fa8627bea74968124721b23ce58 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Thu, 28 Nov 2019 17:08:51 +0000 Subject: [PATCH 023/123] Add complete synaptic state updates --- .../neuron_impl_eprop_adaptive.h | 12 +++--- .../models/neuron_model_eprop_adaptive_impl.c | 38 +++++++++++++------ .../models/neuron_model_eprop_adaptive_impl.h | 3 +- neural_modelling/src/neuron/synapses.c | 8 +++- .../models/neuron/builds/eprop_adaptive.py | 10 +++-- .../neuron_model_eprop_adaptive.py | 17 ++++++--- 6 files changed, 61 insertions(+), 27 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 6e47724e79f..841b40fa2c9 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -54,7 +54,7 @@ extern REAL learning_signal; //! Array of neuron states -static neuron_pointer_t neuron_array; +neuron_pointer_t neuron_array; //! Input states array static input_type_pointer_t input_type_array; @@ -292,9 +292,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // B_t; // neuron->B; // global_parameters->core_target_rate; // neuron->syn_state[0].e_bar; -// neuron->syn_state[0].el_a; + neuron->syn_state[0].el_a; // total_inh; - learning_signal * neuron->w_fb; +// learning_signal * neuron->w_fb; // update neuron parameters state_t result = neuron_model_state_update( @@ -323,10 +323,12 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - total_exc; +// neuron->syn_state[0].delta_w; +// neuron->syn_state[0].z_bar; +// total_exc; // z_t; // global_parameters->core_pop_rate; -// neuron->psi; + neuron->psi; // neuron->syn_state[0].z_bar; // // Record B diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 0ff822a1151..df0cb1e634d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -19,6 +19,9 @@ #include +extern REAL learning_signal; +REAL local_eta; + // simple Leaky I&F ODE static inline void lif_neuron_closed_form( neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep, @@ -34,6 +37,9 @@ static inline void lif_neuron_closed_form( void neuron_model_set_global_neuron_params( global_neuron_params_pointer_t params) { use(params); + + local_eta = params->eta; + io_printf(IO_BUF, "local eta = %k", local_eta); // Does Nothing - no params } @@ -82,25 +88,32 @@ state_t neuron_model_state_update( (1.0k/neuron->b_0) * 0.3k * 1.0k * (1.0k - psi_temp2) : 0.0k; uint32_t total_synapses_per_neuron = 1; + + +// neuron->psi = neuron->psi << 10; + REAL rho = 0.998; + + neuron->L = learning_signal * neuron->w_fb; + + // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** -// neuron->syn_state[syn_ind].z_bar_old = neuron->syn_state[syn_ind].z_bar; -// neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + neuron->syn_state[syn_ind].z_bar_old; // ToDo - + neuron->syn_state[syn_ind].z_bar = + neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; // ****************************************************************** // Update eligibility vector // ****************************************************************** - // updating z_bar is problematic, if spike could come and interrupt neuron update - // (you won't know whether spike arrived before or after update) - // (also need to reset if it was 1 - otherwise it will never be cleared) neuron->syn_state[syn_ind].el_a = (neuron->psi * neuron->syn_state[syn_ind].z_bar) + (rho - neuron->psi * neuron->beta) * @@ -110,16 +123,19 @@ state_t neuron_model_state_update( // ****************************************************************** // Update eligibility trace // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); + REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - + neuron->beta * neuron->syn_state[syn_ind].el_a); -// neuron->syn_state[syn_ind].e_bar = temp_elig_trace; + neuron->syn_state[syn_ind].e_bar = + neuron->exp_TC * neuron->syn_state[syn_ind].e_bar + + (1 - neuron->exp_TC) * temp_elig_trace; // ****************************************************************** // Update cached total weight change // ****************************************************************** - // uint16_t this_dt_weight_change = -global_params->eta * neuron->learning_sig * neuron->syn_state[syn_ind].e_bar; - // neuron->syn_state[syn_ind].delta_w +=this_dt_weight_change; + uint16_t this_dt_weight_change = + -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + neuron->syn_state[syn_ind].delta_w = this_dt_weight_change; } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 447d946fae2..5d11e4ba677 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -26,7 +26,7 @@ typedef struct eprop_syn_state_t { uint32_t delta_w; // weight change to apply - REAL z_bar_old; + REAL z_bar_inp; REAL z_bar; // low-pass filtered spike train REAL el_a; // adaptive component of eligibility vector REAL e_bar; // low-pass filtered eligibility trace @@ -92,6 +92,7 @@ typedef struct global_neuron_params_t { REAL core_pop_rate; REAL core_target_rate; REAL rate_exp_TC; + REAL eta; // learning rate } global_neuron_params_t; diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 37e8b98f16b..315364fb6d4 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -23,6 +23,7 @@ #include #include #include +#include "models/neuron_model_eprop_adaptive_impl.h" //! if using profiler import profiler tags #ifdef PROFILER_ENABLED @@ -31,6 +32,7 @@ // Globals required for synapse benchmarking to work. uint32_t num_fixed_pre_synaptic_events = 0; +extern neuron_pointer_t neuron_array; // The number of neurons static uint32_t n_neurons; @@ -184,13 +186,15 @@ static inline void process_fixed_synapses( int32_t neuron_ind = synapse_row_sparse_index(synaptic_word, synapse_type_mask); + + // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, // and delay field to access correct synapse // neuron_pointer_t neuron = neuron_array[neuron_ind]->syn_state[syn_ind_from_delay].z_bar; io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u \n", neuron_ind, syn_ind_from_delay); - - + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; io_printf(IO_BUF, "signed w: %d \n", weight); diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 9126deee551..df6bd80cff4 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -30,7 +30,7 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", "psi", "target_rate", "tau_err", "B", "small_b", - "l", "w_fb" + "l", "w_fb", "eta" }) def __init__( self, @@ -49,7 +49,7 @@ def __init__( B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, # Learning signal and weight update constants - l=0, w_fb=0.5, + l=0, w_fb=0.5, eta=1.0 ): # pylint: disable=too-many-arguments, too-many-locals @@ -61,7 +61,11 @@ def __init__( small_b_0, tau_a, beta, - target_rate, tau_err, l, w_fb) + # Regularisation params + target_rate, tau_err, + # Learning signal params + l, w_fb, eta + ) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index c8414f631a8..83f68020aa3 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -96,7 +96,8 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): # learning signal "__l", - "__w_fb" + "__w_fb", + "__eta" ] def __init__( @@ -121,7 +122,8 @@ def __init__( target_rate, tau_err, l, - w_fb + w_fb, + eta ): datatype_list = [ @@ -164,7 +166,8 @@ def __init__( global_data_types = [ DataType.S1615, # core_pop_rate DataType.S1615, # core_target_rate - DataType.S1615 # rate_exp_TC + DataType.S1615, # rate_exp_TC + DataType.S1615 # eta (learning rate) ] super(NeuronModelEPropAdaptive, self).__init__(data_types=datatype_list, @@ -196,6 +199,8 @@ def __init__( # learning signal self.__l = l self.__w_fb = w_fb + + self.__eta = eta @overrides(AbstractNeuronModel.get_n_cpu_cycles) @@ -218,6 +223,7 @@ def add_parameters(self, parameters): parameters[SCALAR] = self.__scalar parameters[W_FB] = self.__w_fb + @overrides(AbstractNeuronModel.add_state_variables) def add_state_variables(self, state_variables): state_variables[V] = self.__v_init @@ -280,7 +286,7 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): eprop_syn_init = [0, 0, 0, - 1, + 0, 0] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) @@ -294,7 +300,8 @@ def get_global_values(self, ts): glob_vals = [ self.__target_rate, # initialise global pop rate to the target self.__target_rate, # set target rate - numpy.exp(-float(ts/1000)/self.__tau_err) + numpy.exp(-float(ts/1000)/self.__tau_err), + self.__eta # learning rate ] print("\n ") From 387f1d302b783e7a0ee9a2fc1a94b9c5b24d2fff Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Thu, 28 Nov 2019 17:20:38 +0000 Subject: [PATCH 024/123] begin sketching out weight update scheme --- .../synapse_dynamics_eprop_adaptive_impl.c | 59 +++++++++++++------ 1 file changed, 40 insertions(+), 19 deletions(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 043b7cae77d..005726b2326 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -31,6 +31,9 @@ #include #include + +extern neuron_pointer_t neuron_array; + static uint32_t synapse_type_index_bits; static uint32_t synapse_index_bits; static uint32_t synapse_index_mask; @@ -280,19 +283,19 @@ bool synapse_dynamics_process_plastic_synapses( num_plastic_pre_synaptic_events += plastic_synapse; - // Get event history from synaptic row - pre_event_history_t *event_history = - plastic_event_history(plastic_region_address); - - // Get last pre-synaptic event from event history - const uint32_t last_pre_time = event_history->prev_time; - const pre_trace_t last_pre_trace = event_history->prev_trace; - - // Update pre-synaptic trace - log_debug("Adding pre-synaptic event to trace at time:%u", time); - event_history->prev_time = time; - event_history->prev_trace = - timing_add_pre_spike(time, last_pre_time, last_pre_trace); +// // Get event history from synaptic row +// pre_event_history_t *event_history = +// plastic_event_history(plastic_region_address); +// +// // Get last pre-synaptic event from event history +// const uint32_t last_pre_time = event_history->prev_time; +// const pre_trace_t last_pre_trace = event_history->prev_trace; +// +// // Update pre-synaptic trace +// log_debug("Adding pre-synaptic event to trace at time:%u", time); +// event_history->prev_time = time; +// event_history->prev_trace = +// timing_add_pre_spike(time, last_pre_time, last_pre_trace); // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { @@ -302,7 +305,12 @@ bool synapse_dynamics_process_plastic_synapses( // Extract control-word components // **NOTE** cunningly, control word is just the same as lower // 16-bits of 32-bit fixed synapse so same functions can be used - uint32_t delay_axonal = sparse_axonal_delay(control_word); +// uint32_t delay_axonal = sparse_axonal_delay(control_word); + + uint32_t delay = 1; + uint32_t syn_ind_from_delay = + synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); + uint32_t delay_dendritic = synapse_row_sparse_delay( control_word, synapse_type_index_bits); uint32_t type = synapse_row_sparse_type( @@ -316,11 +324,24 @@ bool synapse_dynamics_process_plastic_synapses( update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); - // Update the synapse state - final_state_t final_state = plasticity_update_synapse( - time, last_pre_time, last_pre_trace, event_history->prev_trace, - delay_dendritic, delay_axonal, current_state, - &post_event_history[index]); + // Access weight change from synaptic state in DTCM + neuron_pointer_t neuron = &neuron_array[ + synapse_row_sparse_index(synaptic_word, synapse_type_mask) + ]; + neuron->syn_state[syn_ind_from_delay].delta_w; + + +// // Update the synapse state +// final_state_t final_state = plasticity_update_synapse( +// time, last_pre_time, last_pre_trace, event_history->prev_trace, +// delay_dendritic, delay_axonal, current_state, +// &post_event_history[index]); + + + // Access and apply weight change from synaptic state array + // Use neuron id to index into neuron array, and delay to index into synapse array + + // Convert into ring buffer offset uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( From a86232ffb1f1dbb63a801df5f9ba8db4ad09ab59 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Fri, 29 Nov 2019 15:52:36 +0000 Subject: [PATCH 025/123] Change arithmetic to fit with prototype implementation of weight change --- .../neuron/implementations/neuron_impl_eprop_adaptive.h | 8 ++++---- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 6 ++++-- .../src/neuron/models/neuron_model_eprop_adaptive_impl.h | 2 +- .../neuron/neuron_models/neuron_model_eprop_adaptive.py | 2 +- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 841b40fa2c9..a3a2bca862b 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -291,8 +291,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = // B_t; // neuron->B; // global_parameters->core_target_rate; -// neuron->syn_state[0].e_bar; - neuron->syn_state[0].el_a; + neuron->syn_state[0].e_bar; +// neuron->syn_state[0].el_a; // total_inh; // learning_signal * neuron->w_fb; @@ -323,12 +323,12 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// neuron->syn_state[0].delta_w; + neuron->syn_state[0].delta_w; // neuron->syn_state[0].z_bar; // total_exc; // z_t; // global_parameters->core_pop_rate; - neuron->psi; +// neuron->psi; // neuron->syn_state[0].z_bar; // // Record B diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index df0cb1e634d..05c644941db 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -85,7 +85,9 @@ state_t neuron_model_state_update( REAL psi_temp1 = (neuron->V_membrane - neuron->B) * (1/neuron->b_0); REAL psi_temp2 = ((absk(psi_temp1))); neuron->psi = ((1.0k - psi_temp2) > 0.0k)? - (1.0k/neuron->b_0) * 0.3k * 1.0k * (1.0k - psi_temp2) : 0.0k; + (1.0k/neuron->b_0) * +// 0.3k * + (1.0k - psi_temp2) : 0.0k; uint32_t total_synapses_per_neuron = 1; @@ -133,7 +135,7 @@ state_t neuron_model_state_update( // ****************************************************************** // Update cached total weight change // ****************************************************************** - uint16_t this_dt_weight_change = + REAL this_dt_weight_change = -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; neuron->syn_state[syn_ind].delta_w = this_dt_weight_change; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 5d11e4ba677..a9ffc609c87 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -25,7 +25,7 @@ typedef struct eprop_syn_state_t { - uint32_t delta_w; // weight change to apply + REAL delta_w; // weight change to apply REAL z_bar_inp; REAL z_bar; // low-pass filtered spike train REAL el_a; // adaptive component of eligibility vector diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 83f68020aa3..facaf8cab36 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -154,7 +154,7 @@ def __init__( # Synapse states - always initialise to zero eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) - DataType.UINT32, # delta_w + DataType.S1615, # delta_w DataType.S1615, # z_bar_old DataType.S1615, # z_bar DataType.S1615, # ep_a From 76c811a6582e99da80889e49134eb25c8bdf1bb8 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Wed, 22 Jan 2020 12:35:27 +0000 Subject: [PATCH 026/123] fix error by payload test example --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 4 ++-- .../src/neuron/implementations/neuron_impl_sinusoid_readout.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index a3a2bca862b..1efcc5db9ec 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -291,10 +291,10 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = // B_t; // neuron->B; // global_parameters->core_target_rate; - neuron->syn_state[0].e_bar; +// neuron->syn_state[0].e_bar; // neuron->syn_state[0].el_a; // total_inh; -// learning_signal * neuron->w_fb; + learning_signal * neuron->w_fb; // update neuron parameters state_t result = neuron_model_state_update( diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 0440336470a..ead4d1bf7a8 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -35,7 +35,7 @@ #endif //! Array of neuron states -static neuron_pointer_t neuron_array; +neuron_pointer_t neuron_array; //! Input states array static input_type_pointer_t input_type_array; From 0abc0bc23fe032cd5417467852e0ed622582b237 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Fri, 24 Jan 2020 16:37:41 +0000 Subject: [PATCH 027/123] Convert to delta synapses and add test --- .../neuron_impl_eprop_adaptive.h | 28 +++++----- .../models/neuron_model_eprop_adaptive_impl.c | 20 +++---- .../synapse_type_eprop_adaptive.h | 56 +++++++++++-------- neural_modelling/src/neuron/synapses.c | 18 ++++-- .../synapse_type_eprop_adaptive.py | 10 ++-- 5 files changed, 74 insertions(+), 58 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 1efcc5db9ec..520310b4b90 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -258,16 +258,16 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, input_t* inh_input_values = input_type_get_input_value( inh_value, input_type, NUM_INHIBITORY_RECEPTORS); - // Sum g_syn contributions from all receptors for recording - REAL total_exc = 0; - REAL total_inh = 0; - - for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { - total_exc += exc_input_values[i]; - } - for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { - total_inh += inh_input_values[i]; - } +// // Sum g_syn contributions from all receptors for recording +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { +// total_exc += exc_input_values[i]; +// } +// for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { +// total_inh += inh_input_values[i]; +// } // // Call functions to get the input values to be recorded // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; @@ -293,8 +293,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // global_parameters->core_target_rate; // neuron->syn_state[0].e_bar; // neuron->syn_state[0].el_a; -// total_inh; - learning_signal * neuron->w_fb; + exc_input_values[1]; // record recurrent input (signed) +// learning_signal * neuron->w_fb; // update neuron parameters state_t result = neuron_model_state_update( @@ -323,9 +323,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - neuron->syn_state[0].delta_w; +// neuron->syn_state[0].delta_w; // neuron->syn_state[0].z_bar; -// total_exc; + exc_input_values[0]; // record input input (signed) // z_t; // global_parameters->core_pop_rate; // neuron->psi; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 05c644941db..5739e0af664 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -52,18 +52,18 @@ state_t neuron_model_state_update( log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); - REAL total_exc = 0; - REAL total_inh = 0; - - for (int i=0; i < num_excitatory_inputs; i++) { - total_exc += exc_input[i]; - } - for (int i=0; i< num_inhibitory_inputs; i++) { - total_inh += inh_input[i]; - } +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// for (int i=0; i < num_excitatory_inputs; i++) { +// total_exc += exc_input[i]; +// } +// for (int i=0; i< num_inhibitory_inputs; i++) { +// total_inh += inh_input[i]; +// } // Get the input in nA input_t input_this_timestep = - exc_input[0] - inh_input[0] + external_bias + neuron->I_offset; + exc_input[0] + exc_input[1] + neuron->I_offset; lif_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep, B_t); diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h index f7d3647c80d..dcb81b4e820 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h @@ -67,18 +67,22 @@ typedef enum input_buffer_regions { static inline void synapse_types_shape_input( synapse_param_pointer_t parameter) { - parameter->input_buffer_excitatory_value = decay_s1615( - parameter->input_buffer_excitatory_value, - parameter->exc_decay); - parameter->input_buffer_excitatory2_value = decay_s1615( - parameter->input_buffer_excitatory2_value, - parameter->exc2_decay); - parameter->input_buffer_inhibitory_value = decay_s1615( - parameter->input_buffer_inhibitory_value, - parameter->inh_decay); - parameter->input_buffer_inhibitory2_value = decay_s1615( - parameter->input_buffer_inhibitory2_value, - parameter->inh2_decay); + parameter->input_buffer_excitatory_value = 0; +// decay_s1615( +// parameter->input_buffer_excitatory_value, +// parameter->exc_decay); + parameter->input_buffer_excitatory2_value = 0; +// decay_s1615( +// parameter->input_buffer_excitatory2_value, +// parameter->exc2_decay); + parameter->input_buffer_inhibitory_value = 0; +// decay_s1615( +// parameter->input_buffer_inhibitory_value, +// parameter->inh_decay); + parameter->input_buffer_inhibitory2_value = 0; +// decay_s1615( +// parameter->input_buffer_inhibitory2_value, +// parameter->inh2_decay); } //! \brief adds the inputs for a give timer period to a given neuron that is @@ -92,24 +96,28 @@ static inline void synapse_types_add_neuron_input( index_t synapse_type_index, synapse_param_pointer_t parameter, input_t input) { if (synapse_type_index == EXCITATORY_ONE) { - parameter->input_buffer_excitatory_value = - parameter->input_buffer_excitatory_value + - decay_s1615(input, parameter->exc_init); + parameter->input_buffer_excitatory_value += input; +// = +// parameter->input_buffer_excitatory_value + +// decay_s1615(input, parameter->exc_init); } else if (synapse_type_index == EXCITATORY_TWO) { - parameter->input_buffer_excitatory2_value = - parameter->input_buffer_excitatory2_value + - decay_s1615(input, parameter->exc2_init); + parameter->input_buffer_excitatory2_value += input; +// = +// parameter->input_buffer_excitatory2_value + +// decay_s1615(input, parameter->exc2_init); } else if (synapse_type_index == INHIBITORY_ONE) { - parameter->input_buffer_inhibitory_value = - parameter->input_buffer_inhibitory_value + - decay_s1615(input, parameter->inh_init); + parameter->input_buffer_inhibitory_value += input; +// = +// parameter->input_buffer_inhibitory_value + +// decay_s1615(input, parameter->inh_init); } else if (synapse_type_index == INHIBITORY_TWO) { - parameter->input_buffer_inhibitory2_value = - parameter->input_buffer_inhibitory2_value + - decay_s1615(input, parameter->inh2_init); + parameter->input_buffer_inhibitory2_value += input; +// = +// parameter->input_buffer_inhibitory2_value + +// decay_s1615(input, parameter->inh2_init); } } diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 315364fb6d4..0a7035279b8 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -34,6 +34,8 @@ uint32_t num_fixed_pre_synaptic_events = 0; extern neuron_pointer_t neuron_array; +uint32_t RECURRENT_SYNAPSE_OFFSET = 100; + // The number of neurons static uint32_t n_neurons; @@ -184,17 +186,23 @@ static inline void process_fixed_synapses( synaptic_word, synapse_type_index_mask); int32_t weight = synapse_row_sparse_weight(synaptic_word); - int32_t neuron_ind = synapse_row_sparse_index(synaptic_word, synapse_type_mask); + int32_t neuron_ind = synapse_row_sparse_index(synaptic_word, synapse_index_mask); + uint32_t type = synapse_row_sparse_type(synaptic_word, synapse_index_bits, synapse_type_mask); // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, - // and delay field to access correct synapse - // neuron_pointer_t neuron = neuron_array[neuron_ind]->syn_state[syn_ind_from_delay].z_bar; - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u \n", neuron_ind, syn_ind_from_delay); + if (type==1){ + // this is a recurrent synapse: add 100 to index to correct array location + syn_ind_from_delay =+ RECURRENT_SYNAPSE_OFFSET; + } + + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u \n", neuron_ind, syn_ind_from_delay, type); + neuron_pointer_t neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; + + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in !!!! io_printf(IO_BUF, "signed w: %d \n", weight); diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index 4682e57de68..050102d40d3 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -129,19 +129,19 @@ def get_n_synapse_types(self): @overrides(AbstractSynapseType.get_synapse_id_by_target) def get_synapse_id_by_target(self, target): - if target == "excitatory": + if target == "input_connections": return 0 - elif target == "exc_err": + elif target == "recurrent_connections": return 1 - elif target == "inhibitory": + elif target == "learning_signal": return 2 - elif target == "inh_err": + elif target == "unused": return 3 return None @overrides(AbstractSynapseType.get_synapse_targets) def get_synapse_targets(self): - return "excitatory", "exc_err", "inhibitory", "inh_err" + return "input_connections", "recurrent_connections", "learning_signal", "unused" @property def tau_syn_E(self): From 0efa07d7ca9630b446c4f77e86e5649ed3014768 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 2 Mar 2020 18:45:39 +0000 Subject: [PATCH 028/123] folded in eprop learning to sinusoid readout neuron, altered make file, created separate synapses.c for sinusoid to process neuron array separately, altered print outs, couple bug fixes --- neural_modelling/makefiles/neuron/Makefile | 1 + .../makefiles/neuron/neural_build.mk | 11 +- .../neuron/sinusoid_readout/Makefile | 1 + .../neuron_impl_eprop_adaptive.h | 8 +- .../neuron_impl_sinusoid_readout.h | 6 +- .../neuron_impl_store_recall_readout.h | 62 ++- .../models/neuron_model_eprop_adaptive_impl.c | 39 +- .../neuron_model_sinusoid_readout_impl.c | 70 +++ .../neuron_model_sinusoid_readout_impl.h | 18 + .../synapse_dynamics_eprop_adaptive_impl.c | 11 +- neural_modelling/src/neuron/sinusynapses.c | 516 ++++++++++++++++++ .../src/neuron/spike_processing.c | 2 +- neural_modelling/src/neuron/synapses.c | 5 +- .../models/neuron/builds/sinusoid_readout.py | 12 +- .../neuron_model_eprop_adaptive.py | 5 +- .../neuron_model_sinusoid_readout.py | 98 +++- 16 files changed, 795 insertions(+), 70 deletions(-) create mode 100644 neural_modelling/src/neuron/sinusynapses.c diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index 090fa3385be..b6b1aac2c59 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -14,6 +14,7 @@ # along with this program. If not, see . MODELS = eprop_adaptive \ + sinusoid_readout \ # IF_curr_exp \ IF_cond_exp \ IZK_curr_exp \ diff --git a/neural_modelling/makefiles/neuron/neural_build.mk b/neural_modelling/makefiles/neuron/neural_build.mk index 77140cacca2..6c3c5b72edc 100644 --- a/neural_modelling/makefiles/neuron/neural_build.mk +++ b/neural_modelling/makefiles/neuron/neural_build.mk @@ -46,6 +46,10 @@ ifndef SYNAPSE_DEBUG SYNAPSE_DEBUG = LOG_INFO endif +ifndef SINUSYNAPSES + SINUSYNAPSES = neuron/synapses.c +endif + ifndef PLASTIC_DEBUG PLASTIC_DEBUG = LOG_INFO endif @@ -198,7 +202,7 @@ OTHER_SOURCES_CONVERTED := $(call strip_source_dirs,$(OTHER_SOURCES)) # List all the sources relative to one of SOURCE_DIRS SOURCES = common/out_spikes.c \ neuron/c_main.c \ - neuron/synapses.c \ + $(SINUSYNAPSES) \ neuron/neuron.c \ neuron/spike_processing.c \ neuron/population_table/population_table_$(POPULATION_TABLE_IMPL)_impl.c \ @@ -217,6 +221,11 @@ $(BUILD_DIR)neuron/c_main.o: $(MODIFIED_DIR)neuron/c_main.c -@mkdir -p $(dir $@) $(SYNAPSE_TYPE_COMPILE) -o $@ $< +$(BUILD_DIR)neuron/sinusynapses.o: $(MODIFIED_DIR)neuron/sinusynapses.c + #sinusynapses.c + -@mkdir -p $(dir $@) + $(SYNAPSE_TYPE_COMPILE) -o $@ $< + $(BUILD_DIR)neuron/synapses.o: $(MODIFIED_DIR)neuron/synapses.c #synapses.c -@mkdir -p $(dir $@) diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile index a33a904b6b8..3903bd476ba 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile @@ -3,6 +3,7 @@ APP = $(notdir $(CURDIR)) OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c +SINUSYNAPSES = neuron/sinusynapses.c include ../neural_build.mk diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 520310b4b90..770e9c52fc3 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -207,6 +207,10 @@ static void neuron_impl_load_neuron_parameters( * n_neurons; // scale initial value, too + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { @@ -323,9 +327,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// neuron->syn_state[0].delta_w; + neuron->syn_state[0].delta_w; // neuron->syn_state[0].z_bar; - exc_input_values[0]; // record input input (signed) +// exc_input_values[0]; // record input input (signed) // z_t; // global_parameters->core_pop_rate; // neuron->psi; diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index ead4d1bf7a8..8ba59aae32c 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -182,7 +182,7 @@ static void neuron_impl_load_neuron_parameters( // io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); // io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); // io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); -// io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); + io_printf(IO_BUF, "eta: %k \n\n", global_parameters->eta); for (index_t n = 0; n < n_neurons; n++) { @@ -284,12 +284,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[V_RECORDING_INDEX] = result; // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - global_parameters->target_V[target_ind]; +// global_parameters->target_V[target_ind]; + neuron->syn_state[0].delta_w; // Record Error recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = error; // Send error (learning signal) as packet with payload + // ToDo can't I just alter the global variable here? while (!spin1_send_mc_packet( key | neuron_index, bitsk(error), 1 )) { spin1_delay_us(1); diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h index 0adec47e1e6..20037f875c6 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h @@ -233,7 +233,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Change broadcasted value and state with probability // State - 0: idle, 1: storing, 2:stored-idle, 3:recall - if (timer % 200 == 0){ + if (timer % 200 == 0 && neuron_index == 2){ //todo check this isn't changing for every neuron if (store_recall_state == STATE_RECALL || store_recall_state == STATE_STORING){ store_recall_state = (store_recall_state + 1) % STATE_SHIFT; } @@ -317,30 +317,12 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); - // If during recall calculate error - if (neuron_index == 2 && store_recall_state == STATE_RECALL){ - ticks_for_mean += 1; - // Softmax of the exc and inh inputs representing 1 and 0 respectively - // may need to scale to stop huge numbers going in the exp - global_parameters->mean_0 += global_parameters->readout_V_0; - global_parameters->mean_1 += global_parameters->readout_V_1; - accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); - accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); - accum softmax_0 = exp_0 / (exp_1 + exp_0); - accum softmax_1 = exp_1 / (exp_1 + exp_0); - // What to do if log(0)? - if (stored_value){ - global_parameters->cross_entropy = -logk(softmax_1); - } - else{ - global_parameters->cross_entropy = -logk(softmax_0); - } - } // Reset values after recall if (store_recall_state == STATE_IDLE){ ticks_for_mean = 0; global_parameters->mean_0 == 0; global_parameters->mean_1 == 0; + //todo check if readout_V_0/1 need resetting too } if (neuron_index == 0){ @@ -370,18 +352,38 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[V_RECORDING_INDEX] = stored_value; // Switched to always broadcasting error but with packet - if (store_recall_state == STATE_RECALL){ - // Broadcast error - } - - } else if (neuron_index == 3){ // this is the deprecated - - // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not - if (global_parameters->cross_entropy < -0.7){ - // it's incorrect so change doing what you're doing or suppress synapses? + if (store_recall_state == STATE_RECALL){ //todo ensure this neuron id is correct + ticks_for_mean += 1; //todo is it a running error like this over recall? + // Softmax of the exc and inh inputs representing 1 and 0 respectively + // may need to scale to stop huge numbers going in the exp + global_parameters->mean_0 += global_parameters->readout_V_0; + global_parameters->mean_1 += global_parameters->readout_V_1; + accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); + accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); + accum softmax_0 = exp_0 / (exp_1 + exp_0); + accum softmax_1 = exp_1 / (exp_1 + exp_0); + // What to do if log(0)? + if (stored_value){ + global_parameters->cross_entropy = -logk(softmax_1); + } + else{ + global_parameters->cross_entropy = -logk(softmax_0); + } + while (!spin1_send_mc_packet( + key | neuron_index, bitsk(error), 1 )) { + spin1_delay_us(1); + } } - timer++; // update this here, as needs to be done once per iteration over all the neurons + timer++; } +// else if (neuron_index == 3){ // this is the deprecated +// +// // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not +// if (global_parameters->cross_entropy < -0.7){ +// // it's incorrect so change doing what you're doing or suppress synapses? +// } +// timer++; // update this here, as needs to be done once per iteration over all the neurons +// } // Shape the existing input according to the included rule synapse_types_shape_input(synapse_type); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 5739e0af664..f021106ecc1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -39,7 +39,7 @@ void neuron_model_set_global_neuron_params( use(params); local_eta = params->eta; - io_printf(IO_BUF, "local eta = %k", local_eta); + io_printf(IO_BUF, "local eta = %k\n", local_eta); // Does Nothing - no params } @@ -65,8 +65,8 @@ state_t neuron_model_state_update( input_t input_this_timestep = exc_input[0] + exc_input[1] + neuron->I_offset; - lif_neuron_closed_form( - neuron, neuron->V_membrane, input_this_timestep, B_t); + lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep, B_t); // If outside of the refractory period if (neuron->refract_timer <= 0) { @@ -89,7 +89,7 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 1; + uint32_t total_synapses_per_neuron = 1; //todo should this be fixed // neuron->psi = neuron->psi << 10; @@ -102,7 +102,6 @@ state_t neuron_model_state_update( // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ - // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** @@ -110,6 +109,14 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update // reset input (can't have more than one spike per timestep + if (!syn_ind){ + io_printf(IO_BUF, "total synapses = %u\n" + "z_bar_inp = %k - z_bar = %k\n" + "L = %k = l * w_fb = %k * %k\n", + total_synapses_per_neuron, + neuron->syn_state[syn_ind].z_bar_inp, neuron->syn_state[syn_ind].z_bar, + neuron->L, learning_signal, neuron -> w_fb); + } neuron->syn_state[syn_ind].z_bar_inp = 0; @@ -137,7 +144,11 @@ state_t neuron_model_state_update( // ****************************************************************** REAL this_dt_weight_change = -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w = this_dt_weight_change; + neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; + + +// io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", +// local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); } @@ -162,13 +173,17 @@ void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { } void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { - log_debug("V reset = %11.4k mv", neuron->V_reset); - log_debug("V rest = %11.4k mv", neuron->V_rest); + io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); + io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); + + io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); + io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); + + io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); - log_debug("I offset = %11.4k nA", neuron->I_offset); - log_debug("R membrane = %11.4k Mohm", neuron->R_membrane); + io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); - log_debug("exp(-ms/(RC)) = %11.4k [.]", neuron->exp_TC); + io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); - log_debug("T refract = %u timesteps", neuron->T_refract); + io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); } diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index 0cc2f55b075..24980655ba1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -2,6 +2,9 @@ #include +extern REAL learning_signal; +REAL local_eta; + // simple Leaky I&F ODE static inline void _lif_neuron_closed_form( neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { @@ -16,6 +19,9 @@ void neuron_model_set_global_neuron_params( global_neuron_params_pointer_t params) { use(params); + local_eta = params->eta; + io_printf(IO_BUF, "local eta = %k\n", local_eta); + // Does Nothing - no params } @@ -52,6 +58,64 @@ state_t neuron_model_state_update( // countdown refractory timer neuron->refract_timer -= 1; } + + uint32_t total_synapses_per_neuron = 1; //todo should this be fixed + + neuron->L = learning_signal * neuron->w_fb; + + // All operations now need doing once per eprop synapse + for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ + // ****************************************************************** + // Low-pass filter incoming spike train + // ****************************************************************** + neuron->syn_state[syn_ind].z_bar = + neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + // reset input (can't have more than one spike per timestep + if (!syn_ind){// || neuron->syn_state[syn_ind].z_bar_inp){ + io_printf(IO_BUF, "total synapses = %u - syn_ind = %u\n" + "z_bar_inp = %k - z_bar = %k\n" + "L = %k = l * w_fb = %k * %k\n" + , + total_synapses_per_neuron, + syn_ind, + neuron->syn_state[syn_ind].z_bar_inp, + neuron->syn_state[syn_ind].z_bar, + neuron->L, learning_signal, neuron -> w_fb + ); + } + neuron->syn_state[syn_ind].z_bar_inp = 0; + + + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** +// neuron->syn_state[syn_ind].el_a = +// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + +// (rho - neuron->psi * neuron->beta) * +// neuron->syn_state[syn_ind].el_a; + + + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** +// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +// neuron->beta * neuron->syn_state[syn_ind].el_a); +// +// neuron->syn_state[syn_ind].e_bar = +// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar +// + (1 - neuron->exp_TC) * temp_elig_trace; + + // ****************************************************************** + // Update cached total weight change + // ****************************************************************** + REAL this_dt_weight_change = +// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + -local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; + + } + return neuron->V_membrane; } @@ -82,6 +146,12 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); + + io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); + + io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); + +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); // io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); // io_printf(IO_BUF, "time_to_spike_ticks = %k \n", // neuron->time_to_spike_ticks); diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index c6ed61cd24d..fa9684ca597 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -4,6 +4,17 @@ #include "neuron_model.h" #include "random.h" +#define SYNAPSES_PER_NEURON 250 + + +typedef struct eprop_syn_state_t { + REAL delta_w; // weight change to apply + REAL z_bar_inp; + REAL z_bar; // low-pass filtered spike train +// REAL el_a; // adaptive component of eligibility vector +// REAL e_bar; // low-pass filtered eligibility trace +}eprop_syn_state_t; + ///////////////////////////////////////////////////////////// // definition for LIF neuron parameters typedef struct neuron_t { @@ -33,6 +44,12 @@ typedef struct neuron_t { // refractory time of neuron [timesteps] int32_t T_refract; + REAL L; // learning signal + REAL w_fb; // feedback weight + + // array of synaptic states - peak fan-in of >250 for this case + eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; + // Poisson compartment params // REAL mean_isi_ticks; @@ -55,6 +72,7 @@ typedef struct global_neuron_params_t { // REAL ticks_per_second; // REAL readout_V; REAL target_V[1024]; + REAL eta; } global_neuron_params_t; #endif // _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 005726b2326..f9497e28ac4 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -30,6 +30,7 @@ #include #include #include +#include "models/neuron_model_eprop_adaptive_impl.h" extern neuron_pointer_t neuron_array; @@ -328,9 +329,17 @@ bool synapse_dynamics_process_plastic_synapses( neuron_pointer_t neuron = &neuron_array[ synapse_row_sparse_index(synaptic_word, synapse_type_mask) ]; - neuron->syn_state[syn_ind_from_delay].delta_w; +// neuron->syn_state[syn_ind_from_delay].delta_w; + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", + synapse_row_sparse_index(synaptic_word, synapse_type_mask), + syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); + + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in !!!! + + final_state = neuron->syn_state[syn_ind_from_delay].delta_w + current_state; + io_printf(IO_BUF, "current %u + d_weight %u = new %u", current_state, neuron->syn_state[syn_ind_from_delay].delta_w, final_state); // // Update the synapse state // final_state_t final_state = plasticity_update_synapse( // time, last_pre_time, last_pre_trace, event_history->prev_trace, diff --git a/neural_modelling/src/neuron/sinusynapses.c b/neural_modelling/src/neuron/sinusynapses.c new file mode 100644 index 00000000000..185186a0706 --- /dev/null +++ b/neural_modelling/src/neuron/sinusynapses.c @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "synapses.h" +#include "spike_processing.h" +#include "neuron.h" +#include "plasticity/synapse_dynamics.h" +#include +#include +#include +#include +//#include "models/neuron_model_eprop_adaptive_impl.h" +#include "models/neuron_model_sinusoid_readout_impl.h" + +//! if using profiler import profiler tags +#ifdef PROFILER_ENABLED + #include "profile_tags.h" +#endif + +// Globals required for synapse benchmarking to work. +uint32_t num_fixed_pre_synaptic_events = 0; +extern neuron_pointer_t neuron_array; + +uint32_t RECURRENT_SYNAPSE_OFFSET = 100; + +// The number of neurons +static uint32_t n_neurons; + +// The number of synapse types +static uint32_t n_synapse_types; + +// Ring buffers to handle delays between synapses and neurons +static weight_t *ring_buffers; + +// Amount to left shift the ring buffer by to make it an input +static uint32_t *ring_buffer_to_input_left_shifts; + +// Count of the number of times the ring buffers have saturated +static uint32_t saturation_count = 0; + +static uint32_t synapse_type_index_bits; +static uint32_t synapse_type_index_mask; +static uint32_t synapse_index_bits; +static uint32_t synapse_index_mask; +static uint32_t synapse_type_bits; +static uint32_t synapse_type_mask; + + +/* PRIVATE FUNCTIONS */ + +#if LOG_LEVEL >= LOG_DEBUG +static const char *get_type_char(uint32_t synapse_type) { + return neuron_get_synapse_type_char(synapse_type); +} +#endif // LOG_LEVEL >= LOG_DEBUG + +static inline void print_synaptic_row(synaptic_row_t synaptic_row) { +#if LOG_LEVEL >= LOG_DEBUG + log_debug("Synaptic row, at address %08x Num plastic words:%u\n", + (uint32_t) synaptic_row, synapse_row_plastic_size(synaptic_row)); + if (synaptic_row == NULL) { + return; + } + log_debug("----------------------------------------\n"); + + // Get details of fixed region + address_t fixed_region_address = synapse_row_fixed_region(synaptic_row); + address_t fixed_synapses = + synapse_row_fixed_weight_controls(fixed_region_address); + size_t n_fixed_synapses = + synapse_row_num_fixed_synapses(fixed_region_address); + log_debug("Fixed region %u fixed synapses (%u plastic control words):\n", + n_fixed_synapses, + synapse_row_num_plastic_controls(fixed_region_address)); + + for (uint32_t i = 0; i < n_fixed_synapses; i++) { + uint32_t synapse = fixed_synapses[i]; + uint32_t synapse_type = synapse_row_sparse_type( + synapse, synapse_index_bits, synapse_type_mask); + + log_debug("%08x [%3d: (w: %5u (=", + synapse, i, synapse_row_sparse_weight(synapse)); + synapses_print_weight(synapse_row_sparse_weight(synapse), + ring_buffer_to_input_left_shifts[synapse_type]); + log_debug( + "nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", + synapse_row_sparse_delay(synapse, synapse_type_index_bits), + get_type_char(synapse_type), + synapse_row_sparse_index(synapse, synapse_index_mask), + SYNAPSE_DELAY_MASK, synapse_type_index_bits); + } + + // If there's a plastic region + if (synapse_row_plastic_size(synaptic_row) > 0) { + log_debug("----------------------------------------\n"); + address_t plastic_region_address = + synapse_row_plastic_region(synaptic_row); + synapse_dynamics_print_plastic_synapses( + plastic_region_address, fixed_region_address, + ring_buffer_to_input_left_shifts); + } + + log_debug("----------------------------------------\n"); +#else + use(synaptic_row); +#endif // LOG_LEVEL >= LOG_DEBUG +} + +static inline void print_ring_buffers(uint32_t time) { +#if LOG_LEVEL >= LOG_DEBUG + io_printf(IO_BUF, "Ring Buffer at %u\n", time); + io_printf(IO_BUF, "----------------------------------------\n"); + for (uint32_t n = 0; n < n_neurons; n++) { + for (uint32_t t = 0; t < n_synapse_types; t++) { + const char *type_string = get_type_char(t); + bool empty = true; + for (uint32_t d = 0; d < (1 << SYNAPSE_DELAY_BITS); d++) { + empty = empty && (ring_buffers[ + synapses_get_ring_buffer_index(d + time, t, n, + synapse_type_index_bits, synapse_index_bits)] == 0); + } + if (!empty) { + io_printf(IO_BUF, "%3d(%s):", n, type_string); + for (uint32_t d = 0; d < (1 << SYNAPSE_DELAY_BITS); d++) { + log_debug(" "); + uint32_t ring_buffer_index = synapses_get_ring_buffer_index( + d + time, t, n, + synapse_type_index_bits, synapse_index_bits); + synapses_print_weight(ring_buffers[ring_buffer_index], + ring_buffer_to_input_left_shifts[t]); + } + io_printf(IO_BUF, "\n"); + } + } + } + io_printf(IO_BUF, "----------------------------------------\n"); +#else + use(time); +#endif // LOG_LEVEL >= LOG_DEBUG +} + +static inline void print_inputs(void) { +#if LOG_LEVEL >= LOG_DEBUG + log_debug("Inputs\n"); + neuron_print_inputs(); +#endif // LOG_LEVEL >= LOG_DEBUG +} + + +// This is the "inner loop" of the neural simulation. +// Every spike event could cause up to 256 different weights to +// be put into the ring buffer. +static inline void process_fixed_synapses( + address_t fixed_region_address, uint32_t time) { + register uint32_t *synaptic_words = + synapse_row_fixed_weight_controls(fixed_region_address); + register uint32_t fixed_synapse = + synapse_row_num_fixed_synapses(fixed_region_address); + + num_fixed_pre_synaptic_events += fixed_synapse; + + for (; fixed_synapse > 0; fixed_synapse--) { + // Get the next 32 bit word from the synaptic_row + // (should auto increment pointer in single instruction) + uint32_t synaptic_word = *synaptic_words++; + + // Extract components from this word + uint32_t delay = 1; + uint32_t syn_ind_from_delay = + synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); + + uint32_t combined_synapse_neuron_index = synapse_row_sparse_type_index( + synaptic_word, synapse_type_index_mask); + int32_t weight = synapse_row_sparse_weight(synaptic_word); + + int32_t neuron_ind = synapse_row_sparse_index(synaptic_word, synapse_index_mask); + + uint32_t type = synapse_row_sparse_type(synaptic_word, synapse_index_bits, synapse_type_mask); + + // For low pass filter of incoming spike train on this synapse + // Use postsynaptic neuron index to access neuron struct, + + if (type==1){ + // this is a recurrent synapse: add 100 to index to correct array location + syn_ind_from_delay =+ RECURRENT_SYNAPSE_OFFSET; + } + + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); + + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in !!!! + + io_printf(IO_BUF, "signed w: %d \n", weight); + + // Convert into ring buffer offset + uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + delay + time, combined_synapse_neuron_index, + synapse_type_index_bits); + + // Add weight to current ring buffer value + int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // switch to saturated arithmetic to avoid complicated saturation check, will it check saturation at both ends? + + // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) + // **NOTE** 0x10000 can be expressed as an ARM literal, + // but 0xFFFF cannot. Therefore, we use (0x10000 - 1) + // to obtain this value +// uint32_t sat_test = accumulation & 0x10000; +// if (sat_test) { +// accumulation = sat_test - 1; +// saturation_count++; +// } + + // Store saturated value back in ring-buffer + ring_buffers[ring_buffer_index] = accumulation; + } +} + +//! private method for doing output debug data on the synapses +static inline void print_synapse_parameters(void) { +//! only if the models are compiled in debug mode will this method contain +//! said lines. +#if LOG_LEVEL >= LOG_DEBUG + // again neuron_synapse_shaping_params has moved to implementation + neuron_print_synapse_parameters(); +#endif // LOG_LEVEL >= LOG_DEBUG +} + +/* INTERFACE FUNCTIONS */ +bool synapses_initialise( + address_t synapse_params_address, address_t direct_matrix_address, + uint32_t n_neurons_value, uint32_t n_synapse_types_value, + uint32_t **ring_buffer_to_input_buffer_left_shifts, + address_t *direct_synapses_address) { + log_debug("synapses_initialise: starting"); + n_neurons = n_neurons_value; + n_synapse_types = n_synapse_types_value; + + // Set up ring buffer left shifts + ring_buffer_to_input_left_shifts = + spin1_malloc(n_synapse_types * sizeof(uint32_t)); + if (ring_buffer_to_input_left_shifts == NULL) { + log_error("Not enough memory to allocate ring buffer"); + return false; + } + spin1_memcpy( + ring_buffer_to_input_left_shifts, synapse_params_address, + n_synapse_types * sizeof(uint32_t)); + *ring_buffer_to_input_buffer_left_shifts = + ring_buffer_to_input_left_shifts; + + // Work out the positions of the direct and indirect synaptic matrices + // and copy the direct matrix to DTCM + uint32_t direct_matrix_size = direct_matrix_address[0]; + log_debug("Direct matrix malloc size is %d", direct_matrix_size); + + if (direct_matrix_size != 0) { + *direct_synapses_address = spin1_malloc(direct_matrix_size); + if (*direct_synapses_address == NULL) { + log_error("Not enough memory to allocate direct matrix"); + return false; + } + log_debug("Copying %u bytes of direct synapses to 0x%08x", + direct_matrix_size, *direct_synapses_address); + spin1_memcpy( + *direct_synapses_address, &direct_matrix_address[1], + direct_matrix_size); + } + + log_debug("synapses_initialise: completed successfully"); + print_synapse_parameters(); + + uint32_t n_neurons_power_2 = n_neurons; + uint32_t log_n_neurons = 1; + if (n_neurons != 1) { + if (!is_power_of_2(n_neurons)) { + n_neurons_power_2 = next_power_of_2(n_neurons); + } + log_n_neurons = ilog_2(n_neurons_power_2); + } + + uint32_t n_synapse_types_power_2 = n_synapse_types; + if (!is_power_of_2(n_synapse_types)) { + n_synapse_types_power_2 = next_power_of_2(n_synapse_types); + } + uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); + + uint32_t n_ring_buffer_bits = + log_n_neurons + log_n_synapse_types + 1; // SYNAPSE_DELAY_BITS; Fix at delays of 1 timestep, as this means we get memory back, and we don't need delays to prove the concept + uint32_t ring_buffer_size = 1 << (n_ring_buffer_bits); + + ring_buffers = spin1_malloc(ring_buffer_size * sizeof(weight_t)); + if (ring_buffers == NULL) { + log_error("Could not allocate %u entries for ring buffers", + ring_buffer_size); + } + for (uint32_t i = 0; i < ring_buffer_size; i++) { + ring_buffers[i] = 0; + } + + synapse_type_index_bits = log_n_neurons + log_n_synapse_types; + synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; + synapse_index_bits = log_n_neurons; + synapse_index_mask = (1 << synapse_index_bits) - 1; + synapse_type_bits = log_n_synapse_types; + synapse_type_mask = (1 << log_n_synapse_types) - 1; + return true; +} + +void synapses_do_timestep_update(timer_t time) { + print_ring_buffers(time); + + // Disable interrupts to stop DMAs interfering with the ring buffers + uint32_t state = spin1_irq_disable(); + + // Transfer the input from the ring buffers into the input buffers + for (uint32_t neuron_index = 0; neuron_index < n_neurons; + neuron_index++) { + // Loop through all synapse types + for (uint32_t synapse_type_index = 0; + synapse_type_index < n_synapse_types; synapse_type_index++) { + // Get index in the ring buffers for the current time slot for + // this synapse type and neuron + uint32_t ring_buffer_index = synapses_get_ring_buffer_index( + time, synapse_type_index, neuron_index, + synapse_type_index_bits, synapse_index_bits); + + // Convert ring-buffer entry to input and add on to correct + // input for this synapse type and neuron + neuron_add_inputs( + synapse_type_index, neuron_index, + synapses_convert_weight_to_input( + ring_buffers[ring_buffer_index], + ring_buffer_to_input_left_shifts[synapse_type_index])); + + // Clear ring buffer + ring_buffers[ring_buffer_index] = 0; + } + } + + print_inputs(); + + // Re-enable the interrupts + spin1_mode_restore(state); +} + +bool synapses_process_synaptic_row( + uint32_t time, synaptic_row_t row, bool write, uint32_t process_id) { + print_synaptic_row(row); + + // Get address of non-plastic region from row + address_t fixed_region_address = synapse_row_fixed_region(row); + io_printf(IO_BUF, "Processing Spike...\n"); + // **TODO** multiple optimised synaptic row formats + //if (plastic_tag(row) == 0) { + // If this row has a plastic region + if (synapse_row_plastic_size(row) > 0) { + // Get region's address + address_t plastic_region_address = synapse_row_plastic_region(row); + + // Process any plastic synapses + profiler_write_entry_disable_fiq( + PROFILER_ENTER | PROFILER_PROCESS_PLASTIC_SYNAPSES); + if (!synapse_dynamics_process_plastic_synapses(plastic_region_address, + fixed_region_address, ring_buffers, time)) { + return false; + } + profiler_write_entry_disable_fiq( + PROFILER_EXIT | PROFILER_PROCESS_PLASTIC_SYNAPSES); + + // Perform DMA write back + if (write) { + spike_processing_finish_write(process_id); + } + } + + // Process any fixed synapses + // **NOTE** this is done after initiating DMA in an attempt + // to hide cost of DMA behind this loop to improve the chance + // that the DMA controller is ready to read next synaptic row afterwards + process_fixed_synapses(fixed_region_address, time); + //} + return true; +} + +//! \brief returns the number of times the synapses have saturated their +//! weights. +//! \return the number of times the synapses have saturated. +uint32_t synapses_get_saturation_count(void) { + return saturation_count; +} + +//! \brief returns the counters for plastic and fixed pre synaptic events +//! based on (if the model was compiled with SYNAPSE_BENCHMARK parameter) or +//! returns 0 +//! \return the counter for plastic and fixed pre synaptic events or 0 +uint32_t synapses_get_pre_synaptic_events(void) { + return (num_fixed_pre_synaptic_events + + synapse_dynamics_get_plastic_pre_synaptic_events()); +} + +//! \brief Searches the synaptic row for the the connection with the +//! specified post-synaptic ID +//! \param[in] id: the (core-local) ID of the neuron to search for in the +//! synaptic row +//! \param[in] row: the core-local address of the synaptic row +//! \param[out] sp_data: the address of a struct through which to return +//! weight, delay information +//! \return bool: was the search successful? +bool find_static_neuron_with_id( + uint32_t id, address_t row, structural_plasticity_data_t *sp_data) { + address_t fixed_region = synapse_row_fixed_region(row); + int32_t fixed_synapse = synapse_row_num_fixed_synapses(fixed_region); + uint32_t *synaptic_words = + synapse_row_fixed_weight_controls(fixed_region); + + uint32_t weight, delay; + bool found = false; + + // Loop through plastic synapses + for (; fixed_synapse > 0; fixed_synapse--) { + // Get next control word (auto incrementing) + // Check if index is the one I'm looking for + uint32_t synaptic_word = *synaptic_words++; + weight = synapse_row_sparse_weight(synaptic_word); + delay = synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); + if (synapse_row_sparse_index(synaptic_word, synapse_index_mask) == id) { + found = true; + break; + } + } + + // Making assumptions explicit + assert(synapse_row_num_plastic_controls(fixed_region) == 0); + + if (found) { + sp_data->weight = weight; + sp_data->offset = + synapse_row_num_fixed_synapses(fixed_region) - fixed_synapse; + sp_data->delay = delay; + return true; + } else { + sp_data->weight = -1; + sp_data->offset = -1; + sp_data->delay = -1; + return false; + } +} + +//! \brief Remove the entry at the specified offset in the synaptic row +//! \param[in] offset: the offset in the row at which to remove the entry +//! \param[in] row: the core-local address of the synaptic row +//! \return bool: was the removal successful? +bool remove_static_neuron_at_offset(uint32_t offset, address_t row) { + address_t fixed_region = synapse_row_fixed_region(row); + int32_t fixed_synapse = synapse_row_num_fixed_synapses(fixed_region); + uint32_t *synaptic_words = + synapse_row_fixed_weight_controls(fixed_region); + + // Delete control word at offset (contains weight) + synaptic_words[offset] = synaptic_words[fixed_synapse - 1]; + + // Decrement FF + fixed_region[0]--; + return true; +} + +//! packing all of the information into the required static control word +static inline uint32_t fixed_synapse_convert( + uint32_t id, uint32_t weight, uint32_t delay, uint32_t type) { + uint32_t new_synapse = weight << (32 - SYNAPSE_WEIGHT_BITS); + new_synapse |= + (delay & ((1 << SYNAPSE_DELAY_BITS) - 1)) << + synapse_type_index_bits; + new_synapse |= + (type & ((1 << synapse_type_bits) - 1)) << synapse_index_bits; + new_synapse |= id & ((1 << synapse_type_index_bits) - 1); + return new_synapse; +} + +//! \brief Add a static entry in the synaptic row +//! \param[in] id: the (core-local) ID of the post-synaptic neuron to be added +//! \param[in] row: the core-local address of the synaptic row +//! \param[in] weight: the initial weight associated with the connection +//! \param[in] delay: the delay associated with the connection +//! \param[in] type: the type of the connection (e.g. inhibitory) +//! \return bool: was the addition successful? +bool add_static_neuron_with_id( + uint32_t id, address_t row, uint32_t weight, uint32_t delay, uint32_t type) { + address_t fixed_region = synapse_row_fixed_region(row); + int32_t fixed_synapse = synapse_row_num_fixed_synapses(fixed_region); + uint32_t *synaptic_words = + synapse_row_fixed_weight_controls(fixed_region); + uint32_t new_synapse = fixed_synapse_convert(id, weight, delay, type); + + // Add control word at offset + synaptic_words[fixed_synapse] = new_synapse; + + // Increment FF + fixed_region[0]++; + return true; +} diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 6383ab13969..9ac9b6f6aa2 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -248,7 +248,7 @@ static void multicast_packet_wpayload_received_callback(uint key, uint payload){ learning_signal = kbits(payload); // Print payload to test transmission of error - io_printf(IO_BUF, "payload: %k\n", learning_signal); +// io_printf(IO_BUF, "payload: %k\n", learning_signal); // Assign learning signal to global memory diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 0a7035279b8..5a34426b080 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -24,6 +24,7 @@ #include #include #include "models/neuron_model_eprop_adaptive_impl.h" +//#include "models/neuron_model_sinusoid_readout_impl.h" //! if using profiler import profiler tags #ifdef PROFILER_ENABLED @@ -198,10 +199,10 @@ static inline void process_fixed_synapses( syn_ind_from_delay =+ RECURRENT_SYNAPSE_OFFSET; } - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u \n", neuron_ind, syn_ind_from_delay, type); - neuron_pointer_t neuron = &neuron_array[neuron_ind]; + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in !!!! io_printf(IO_BUF, "signed w: %d \n", weight); diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index 99830cd095d..77a39e29a75 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -13,7 +13,8 @@ class SinusoidReadout(AbstractPyNNNeuronModelStandard): """ @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2"}) + "isyn_inh2", + "l", "w_fb", "eta"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, @@ -22,11 +23,16 @@ def __init__( tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, # mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, - target_data =[]): + target_data =[], + + # Learning signal and weight update constants + l=0, w_fb=0.5, eta=1.0): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeakyIntegrateAndFireSinusoidReadout( - v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, target_data) + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, target_data, + # Learning signal params + l, w_fb, eta) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index facaf8cab36..402cbab4d7f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -319,11 +319,12 @@ def update_values(self, values, parameters, state_variables): _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, l, __w_fb) = values # Not sure this will work with the new array of synapse!!! + # todo check alignment on this # Copy the changed data only state_variables[V] = v state_variables[COUNT_REFRAC] = count_refrac - state_vairables[PSI] = psi + state_variables[PSI] = psi state_variables[BIG_B] = big_b state_variables[SMALL_B] = small_b @@ -429,7 +430,7 @@ def beta(self, new_value): @property def w_fb(self): - return self.__w_fb# + return self.__w_fb @w_fb.setter def w_fb(self, new_value): diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 5702df3abfc..b9d5fe1db73 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -4,6 +4,9 @@ from pacman.executor.injection_decorator import inject_items from .abstract_neuron_model import AbstractNeuronModel +# constants +SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state + MICROSECONDS_PER_SECOND = 1000000.0 MICROSECONDS_PER_MILLISECOND = 1000.0 @@ -26,6 +29,9 @@ # RATE_AT_LAST_SETTING = "rate_at_last_setting" # RATE_UPDATE_THRESHOLD = "rate_update_threshold" TARGET_DATA = "target_data" +# Learning signal +L = "learning_signal" +W_FB = "feedback_weight" UNITS = { V: 'mV', @@ -47,29 +53,54 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): "_i_offset", "_v_reset", "_tau_refrac", - "_target_data" + "_target_data", + + # learning signal + "__l", + "__w_fb", + "__eta" ] def __init__( self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, # mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, - target_data): + target_data, + l, + w_fb, + eta): + + data_types = [ + DataType.S1615, # v + DataType.S1615, # v_rest + DataType.S1615, # r_membrane (= tau_m / cm) + DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.S1615, # i_offset + DataType.INT32, # count_refrac + DataType.S1615, # v_reset + DataType.INT32, # tau_refrac + # Learning signal + DataType.S1615, # L + DataType.S1615 # w_fb + ] + + # Synapse states - always initialise to zero + eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) + DataType.S1615, # delta_w + DataType.S1615, # z_bar_old + DataType.S1615, # z_bar + # DataType.S1615, # ep_a + # DataType.S1615, # e_bar + ] + # Extend to include fan-in for each neuron + data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) global_data_types=[] global_data_types.extend([DataType.S1615 for i in range(1024)]) + global_data_types.extend([DataType.S1615]) # eta (learning rate) super(NeuronModelLeakyIntegrateAndFireSinusoidReadout, self).__init__( - data_types= [ - DataType.S1615, # v - DataType.S1615, # v_rest - DataType.S1615, # r_membrane (= tau_m / cm) - DataType.S1615, # exp_tc (= e^(-ts / tau_m)) - DataType.S1615, # i_offset - DataType.INT32, # count_refrac - DataType.S1615, # v_reset - DataType.INT32, # tau_refrac - ], + data_types= data_types, global_data_types=global_data_types ) @@ -86,6 +117,12 @@ def __init__( self._tau_refrac = tau_refrac self._target_data = target_data + # learning signal + self.__l = l + self.__w_fb = w_fb + + self.__eta = eta + @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): # A bit of a guess @@ -100,6 +137,9 @@ def add_parameters(self, parameters): parameters[V_RESET] = self._v_reset parameters[TAU_REFRAC] = self._tau_refrac parameters[TARGET_DATA] = 0.0 + + #learning params + parameters[W_FB] = self.__w_fb @overrides(AbstractNeuronModel.add_state_variables) @@ -107,6 +147,9 @@ def add_state_variables(self, state_variables): state_variables[V] = self._v_init state_variables[COUNT_REFRAC] = 0 + #learning params + state_variables[L] = self.__l + @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -121,7 +164,7 @@ def has_variable(self, variable): def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data - return [state_variables[V], + values = [state_variables[V], parameters[V_REST], parameters[TAU_M] / parameters[CM], parameters[TAU_M].apply_operation( @@ -130,18 +173,36 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[V_RESET], parameters[TAU_REFRAC].apply_operation( operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + + state_variables[L], + parameters[W_FB] ] + # create synaptic state - init all state to zero + eprop_syn_init = [0, # delta w + 0, # z_bar_inp + 0]#, # z_bar + # 0, # el_a + # 0] # e_bar + # extend to appropriate fan-in + values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + + return values + @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac) = values + _v_reset, _tau_refrac, + l, __w_fb) = values # Not sure this will work with the new array of synapse!!! + # todo check alignment on this # Copy the changed data only state_variables[V] = v + state_variables[L] = l + # Global params @inject_items({"machine_time_step": "MachineTimeStep"}) @@ -151,6 +212,7 @@ def get_global_values(self, machine_time_step): vals = [] vals.extend(self._target_data) + vals.extend([self.__eta]) return vals @property @@ -217,3 +279,11 @@ def tau_refrac(self): def tau_refrac(self, tau_refrac): self._tau_refrac = tau_refrac + @property + def w_fb(self): + return self.__w_fb + + @w_fb.setter + def w_fb(self, new_value): + self.__w_fb = new_value + From 18a7ebbe95fa1d893eb3429f16a14d2bd1670ddb Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Mon, 2 Mar 2020 20:20:17 +0000 Subject: [PATCH 029/123] Add plastic weight update --- .../Makefile | 28 +++ .../makefiles/neuron/neural_build.mk | 2 +- .../neuron_impl_eprop_adaptive.h | 7 +- .../models/neuron_model_eprop_adaptive_impl.c | 2 +- .../synapse_dynamics_eprop_adaptive_impl.c | 117 +++++++++---- .../timing_dependence/timing_eprop_impl.c | 35 ++++ .../timing_dependence/timing_eprop_impl.h | 160 ++++++++++++++++++ .../weight_dependence/weight_eprop_reg_impl.c | 65 +++++++ .../weight_dependence/weight_eprop_reg_impl.h | 112 ++++++++++++ neural_modelling/src/neuron/synapses.c | 22 +-- .../stdp/timing_dependence/__init__.py | 4 +- .../timing_dependence_eprop.py | 121 +++++++++++++ .../stdp/weight_dependence/__init__.py | 3 +- .../weight_dependence_eprop_reg.py | 91 ++++++++++ .../synapse_dynamics/synapse_dynamics_stdp.py | 4 +- .../pyNN/models/neuron/synaptic_manager.py | 6 + 16 files changed, 713 insertions(+), 66 deletions(-) create mode 100644 neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile create mode 100644 neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c create mode 100644 neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h create mode 100644 neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c create mode 100644 neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h create mode 100644 spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py create mode 100644 spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py diff --git a/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile new file mode 100644 index 00000000000..f5e394f887d --- /dev/null +++ b/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile @@ -0,0 +1,28 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP = $(notdir $(CURDIR)) + +OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_eprop_adaptive_impl.c +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_eprop_adaptive.h + +SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c + +TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c +TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h +WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h + +include ../neural_build.mk \ No newline at end of file diff --git a/neural_modelling/makefiles/neuron/neural_build.mk b/neural_modelling/makefiles/neuron/neural_build.mk index 77140cacca2..fe5a9192a02 100644 --- a/neural_modelling/makefiles/neuron/neural_build.mk +++ b/neural_modelling/makefiles/neuron/neural_build.mk @@ -234,7 +234,7 @@ $(BUILD_DIR)neuron/population_table/population_table_binary_search_impl.o: $(MOD #STDP Build rules If and only if STDP used ifeq ($(STDP_ENABLED), 1) - STDP_INCLUDES:= -include $(SYNAPSE_TYPE_H) -include $(WEIGHT_DEPENDENCE_H) -include $(TIMING_DEPENDENCE_H) + STDP_INCLUDES:= -include $(WEIGHT_DEPENDENCE_H) -include $(TIMING_DEPENDENCE_H) STDP_COMPILE = $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -DSTDP_ENABLED=$(STDP_ENABLED) -DSYNGEN_ENABLED=$(SYNGEN_ENABLED) $(STDP_INCLUDES) $(SYNAPSE_DYNAMICS_O): $(SYNAPSE_DYNAMICS_C) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 520310b4b90..ea35ab2a13d 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -290,10 +290,11 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = // B_t; // neuron->B; +// neuron->syn_state[0].z_bar; // global_parameters->core_target_rate; // neuron->syn_state[0].e_bar; // neuron->syn_state[0].el_a; - exc_input_values[1]; // record recurrent input (signed) + exc_input_values[0]; // record input input (signed) // learning_signal * neuron->w_fb; // update neuron parameters @@ -323,9 +324,9 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// neuron->syn_state[0].delta_w; + neuron->syn_state[0].delta_w; // neuron->syn_state[0].z_bar; - exc_input_values[0]; // record input input (signed) +// exc_input_values[0]; // record input input (signed) // z_t; // global_parameters->core_pop_rate; // neuron->psi; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 5739e0af664..bac02bef795 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -137,7 +137,7 @@ state_t neuron_model_state_update( // ****************************************************************** REAL this_dt_weight_change = -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w = this_dt_weight_change; + neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 005726b2326..104279f48d5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -32,6 +32,9 @@ #include +#include +#include + extern neuron_pointer_t neuron_array; static uint32_t synapse_type_index_bits; @@ -73,6 +76,9 @@ uint32_t plastic_saturation_count = 0; #define SYNAPSE_AXONAL_DELAY_MASK \ ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) + +uint32_t RECURRENT_SYNAPSE_OFFSET = 100; + //--------------------------------------- // Structures //--------------------------------------- @@ -269,6 +275,34 @@ address_t synapse_dynamics_initialise( return weight_result; } + +static inline final_state_t eprop_plasticity_update(update_state_t current_state, + REAL delta_w){ + + // Test weight change + // delta_w = -0.1k; + + + // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? + int16_t delta_w_int = bitsk(delta_w) >> 1; // THIS NEEDS UPDATING TO APPROPRIATE SCALING +// int16_t delta_w_int = (int) delta_w; // >> 15; + + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d\n", + delta_w, delta_w_int); + + if (delta_w_int <= 0){ + current_state = weight_one_term_apply_depression(current_state, delta_w_int); + } else { + current_state = weight_one_term_apply_potentiation(current_state, delta_w_int); + } + + // Return final synaptic word and weight + return synapse_structure_get_final_state(current_state); +} + + + + bool synapse_dynamics_process_plastic_synapses( address_t plastic_region_address, address_t fixed_region_address, weight_t *ring_buffers, uint32_t time) { @@ -283,19 +317,9 @@ bool synapse_dynamics_process_plastic_synapses( num_plastic_pre_synaptic_events += plastic_synapse; -// // Get event history from synaptic row -// pre_event_history_t *event_history = -// plastic_event_history(plastic_region_address); -// -// // Get last pre-synaptic event from event history -// const uint32_t last_pre_time = event_history->prev_time; -// const pre_trace_t last_pre_trace = event_history->prev_trace; -// -// // Update pre-synaptic trace -// log_debug("Adding pre-synaptic event to trace at time:%u", time); -// event_history->prev_time = time; -// event_history->prev_trace = -// timing_add_pre_spike(time, last_pre_time, last_pre_trace); + // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? + + // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { @@ -307,12 +331,12 @@ bool synapse_dynamics_process_plastic_synapses( // 16-bits of 32-bit fixed synapse so same functions can be used // uint32_t delay_axonal = sparse_axonal_delay(control_word); - uint32_t delay = 1; + uint32_t delay = 1.0k; uint32_t syn_ind_from_delay = - synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits); + synapse_row_sparse_delay(control_word, synapse_type_index_bits); - uint32_t delay_dendritic = synapse_row_sparse_delay( - control_word, synapse_type_index_bits); +// uint32_t delay_dendritic = synapse_row_sparse_delay( +// control_word, synapse_type_index_bits); uint32_t type = synapse_row_sparse_type( control_word, synapse_index_bits, synapse_type_mask); uint32_t index = @@ -320,16 +344,40 @@ bool synapse_dynamics_process_plastic_synapses( uint32_t type_index = synapse_row_sparse_type_index( control_word, synapse_type_index_mask); + + int32_t neuron_ind = synapse_row_sparse_index(control_word, synapse_index_mask); + + // For low pass filter of incoming spike train on this synapse + // Use postsynaptic neuron index to access neuron struct, + + if (type==1){ + // this is a recurrent synapse: add 100 to index to correct array location + syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; + } + + + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! + + // Create update state from the plastic synaptic word update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); - // Access weight change from synaptic state in DTCM - neuron_pointer_t neuron = &neuron_array[ - synapse_row_sparse_index(synaptic_word, synapse_type_mask) - ]; - neuron->syn_state[syn_ind_from_delay].delta_w; + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", + neuron_ind, syn_ind_from_delay, type, + current_state.initial_weight, + neuron->syn_state[syn_ind_from_delay].delta_w); + + + // Perform weight update: + // Go through typical weight update process to clip to limits + final_state_t final_state = eprop_plasticity_update(current_state, + neuron->syn_state[syn_ind_from_delay].delta_w); + + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; // // Update the synapse state // final_state_t final_state = plasticity_update_synapse( @@ -338,28 +386,23 @@ bool synapse_dynamics_process_plastic_synapses( // &post_event_history[index]); - // Access and apply weight change from synaptic state array - // Use neuron id to index into neuron array, and delay to index into synapse array - - + // Add contribution to synaptic input // Convert into ring buffer offset uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( - delay_axonal + delay_dendritic + time, type_index, + // delay_axonal + delay_dendritic + + time, type_index, synapse_type_index_bits); - // Add weight to ring-buffer entry - // **NOTE** Dave suspects that this could be a - // potential location for overflow - - uint32_t accumulation = ring_buffers[ring_buffer_index] + + // Check for ring buffer saturation + int32_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); - uint32_t sat_test = accumulation & 0x10000; - if (sat_test) { - accumulation = sat_test - 1; - plastic_saturation_count++; - } +// uint32_t sat_test = accumulation & 0x10000; +// if (sat_test) { +// accumulation = sat_test - 1; +// plastic_saturation_count++; +// } ring_buffers[ring_buffer_index] = accumulation; diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c new file mode 100644 index 00000000000..34756b9afd9 --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "timing_eprop_impl.h" + +//--------------------------------------- +// Globals +//--------------------------------------- +// Exponential lookup-tables +int16_t tau_plus_lookup[TAU_PLUS_SIZE]; +int16_t tau_minus_lookup[TAU_MINUS_SIZE]; + +//--------------------------------------- +// Functions +//--------------------------------------- +address_t timing_initialise(address_t address) { + io_printf(IO_BUF, "timing_initialise: starting\n"); + io_printf(IO_BUF, "\t Nothing to be done\n"); + + return address; +} diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h new file mode 100644 index 00000000000..f7026b8d761 --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _TIMING_PAIR_IMPL_H_ +#define _TIMING_PAIR_IMPL_H_ + +//--------------------------------------- +// Typedefines +//--------------------------------------- +typedef int16_t post_trace_t; +typedef int16_t pre_trace_t; + +#include +#include "timing.h" +#include + +// Include debug header for log_info etc +#include + +// Include generic plasticity maths functions +#include +#include + +//--------------------------------------- +// Macros +//--------------------------------------- +// Exponential decay lookup parameters +#define TAU_PLUS_TIME_SHIFT 0 +#define TAU_PLUS_SIZE 256 + +#define TAU_MINUS_TIME_SHIFT 0 +#define TAU_MINUS_SIZE 256 + +// Helper macros for looking up decays +#define DECAY_LOOKUP_TAU_PLUS(time) \ + maths_lut_exponential_decay( \ + time, TAU_PLUS_TIME_SHIFT, TAU_PLUS_SIZE, tau_plus_lookup) +#define DECAY_LOOKUP_TAU_MINUS(time) \ + maths_lut_exponential_decay( \ + time, TAU_MINUS_TIME_SHIFT, TAU_MINUS_SIZE, tau_minus_lookup) + +//--------------------------------------- +// Externals +//--------------------------------------- +extern int16_t tau_plus_lookup[TAU_PLUS_SIZE]; +extern int16_t tau_minus_lookup[TAU_MINUS_SIZE]; + +//--------------------------------------- +// Timing dependence inline functions +//--------------------------------------- +static inline post_trace_t timing_get_initial_post_trace(void) { + return 0; +} + +//--------------------------------------- +static inline post_trace_t timing_add_post_spike( + uint32_t time, uint32_t last_time, post_trace_t last_trace) { + // Get time since last spike + uint32_t delta_time = time - last_time; + + // Decay previous o1 and o2 traces + int32_t decayed_o1_trace = STDP_FIXED_MUL_16X16(last_trace, + DECAY_LOOKUP_TAU_MINUS(delta_time)); + + // Add energy caused by new spike to trace + // **NOTE** o2 trace is pre-multiplied by a3_plus + int32_t new_o1_trace = decayed_o1_trace + STDP_FIXED_POINT_ONE; + + log_debug("\tdelta_time=%d, o1=%d\n", delta_time, new_o1_trace); + + // Return new pre- synaptic event with decayed trace values with energy + // for new spike added + return (post_trace_t) new_o1_trace; +} + +//--------------------------------------- +static inline pre_trace_t timing_add_pre_spike( + uint32_t time, uint32_t last_time, pre_trace_t last_trace) { + // Get time since last spike + uint32_t delta_time = time - last_time; + + // Decay previous r1 and r2 traces + int32_t decayed_r1_trace = STDP_FIXED_MUL_16X16( + last_trace, DECAY_LOOKUP_TAU_PLUS(delta_time)); + + // Add energy caused by new spike to trace + int32_t new_r1_trace = decayed_r1_trace + STDP_FIXED_POINT_ONE; + + log_debug("\tdelta_time=%u, r1=%d\n", delta_time, new_r1_trace); + + // Return new pre-synaptic event with decayed trace values with energy + // for new spike added + return (pre_trace_t) new_r1_trace; +} + +//--------------------------------------- +static inline update_state_t timing_apply_pre_spike( + uint32_t time, pre_trace_t trace, uint32_t last_pre_time, + pre_trace_t last_pre_trace, uint32_t last_post_time, + post_trace_t last_post_trace, update_state_t previous_state) { + use(&trace); + use(last_pre_time); + use(&last_pre_trace); + + // Get time of event relative to last post-synaptic event + uint32_t time_since_last_post = time - last_post_time; + if (time_since_last_post > 0) { + int32_t decayed_o1 = STDP_FIXED_MUL_16X16( + last_post_trace, DECAY_LOOKUP_TAU_MINUS(time_since_last_post)); + + log_debug("\t\t\ttime_since_last_post_event=%u, decayed_o1=%d\n", + time_since_last_post, decayed_o1); + + // Apply depression to state (which is a weight_state) + return weight_one_term_apply_depression(previous_state, decayed_o1); + } else { + return previous_state; + } +} + +//--------------------------------------- +static inline update_state_t timing_apply_post_spike( + uint32_t time, post_trace_t trace, uint32_t last_pre_time, + pre_trace_t last_pre_trace, uint32_t last_post_time, + post_trace_t last_post_trace, update_state_t previous_state) { + use(&trace); + use(last_post_time); + use(&last_post_trace); + + // Get time of event relative to last pre-synaptic event + uint32_t time_since_last_pre = time - last_pre_time; + if (time_since_last_pre > 0) { + int32_t decayed_r1 = STDP_FIXED_MUL_16X16( + last_pre_trace, DECAY_LOOKUP_TAU_PLUS(time_since_last_pre)); + + log_debug("\t\t\ttime_since_last_pre_event=%u, decayed_r1=%d\n", + time_since_last_pre, decayed_r1); + + // Apply potentiation to state (which is a weight_state) + return weight_one_term_apply_potentiation(previous_state, decayed_r1); + } else { + return previous_state; + } +} + +#endif // _TIMING_PAIR_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c new file mode 100644 index 00000000000..4a0ee8a1674 --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "weight_eprop_reg_impl.h" + +//--------------------------------------- +// Globals +//--------------------------------------- +// Global plasticity parameter data +plasticity_weight_region_data_t *plasticity_weight_region_data; + +//--------------------------------------- +// Functions +//--------------------------------------- +address_t weight_initialise( + address_t address, uint32_t n_synapse_types, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + use(ring_buffer_to_input_buffer_left_shifts); + + io_printf(IO_BUF, "weight_initialise: starting\n"); + io_printf(IO_BUF, "\teprop_reg weight dependence\n"); + + // Copy plasticity region data from address + // **NOTE** this seems somewhat safer than relying on sizeof + int32_t *plasticity_word = (int32_t *) address; + plasticity_weight_region_data = + spin1_malloc(sizeof(plasticity_weight_region_data_t) * n_synapse_types); + if (plasticity_weight_region_data == NULL) { + io_printf(IO_BUF, "Could not initialise weight region data\n"); + return NULL; + } + for (uint32_t s = 0; s < n_synapse_types; s++) { + plasticity_weight_region_data[s].min_weight = *plasticity_word++; + plasticity_weight_region_data[s].max_weight = *plasticity_word++; +// plasticity_weight_region_data[s].a2_plus = *plasticity_word++; +// plasticity_weight_region_data[s].a2_minus = *plasticity_word++; + + io_printf(IO_BUF, "\tSynapse type %u: Min weight:%d, Max weight:%d \n" +// "A2+:%d, A2-:%d" + , + s, plasticity_weight_region_data[s].min_weight, + plasticity_weight_region_data[s].max_weight +// plasticity_weight_region_data[s].a2_plus, +// plasticity_weight_region_data[s].a2_minus + ); + } + io_printf(IO_BUF, "weight_initialise: completed successfully\n"); + + // Return end address of region + return (address_t) plasticity_word; +} diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h new file mode 100644 index 00000000000..11d057119f4 --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ +#define _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ + +// Include generic plasticity maths functions +#include +#include +#include + +#include + +//--------------------------------------- +// Structures +//--------------------------------------- +typedef struct { + int32_t min_weight; + int32_t max_weight; + +// int32_t a2_plus; +// int32_t a2_minus; +} plasticity_weight_region_data_t; + +typedef struct { + int32_t initial_weight; + + int32_t a2_plus; + int32_t a2_minus; + + const plasticity_weight_region_data_t *weight_region; +} weight_state_t; + +#include "weight_one_term.h" + +//--------------------------------------- +// Externals +//--------------------------------------- +extern plasticity_weight_region_data_t *plasticity_weight_region_data; + +//--------------------------------------- +// STDP weight dependance functions +//--------------------------------------- +static inline weight_state_t weight_get_initial( + weight_t weight, index_t synapse_type) { + return (weight_state_t) { + .initial_weight = (int32_t) weight, + .a2_plus = 0, + .a2_minus = 0, + .weight_region = &plasticity_weight_region_data[synapse_type] + }; +} + +//--------------------------------------- +static inline weight_state_t weight_one_term_apply_depression( + weight_state_t state, int32_t a2_minus) { + io_printf(IO_BUF, "depressing: %d\n", a2_minus); + state.a2_minus += a2_minus; + return state; +} + +//--------------------------------------- +static inline weight_state_t weight_one_term_apply_potentiation( + weight_state_t state, int32_t a2_plus) { + + io_printf(IO_BUF, "potentiating: %d\n", a2_plus); + state.a2_plus += a2_plus; + return state; +} + +//--------------------------------------- +static inline weight_t weight_get_final(weight_state_t new_state) { + // Scale potentiation and depression + // **NOTE** A2+ and A2- are pre-scaled into weight format +// int32_t scaled_a2_plus = STDP_FIXED_MUL_16X16( +// new_state.a2_plus, new_state.weight_region->a2_plus); +// int32_t scaled_a2_minus = STDP_FIXED_MUL_16X16( +// new_state.a2_minus, new_state.weight_region->a2_minus); + + // Apply all terms to initial weight + int32_t new_weight = + new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; + + // Clamp new weight + new_weight = MIN(new_state.weight_region->max_weight, + MAX(new_weight, new_state.weight_region->min_weight)); + + io_printf(IO_BUF, "\told_weight:%d, a2+:%d, a2-:%d, " +// "scaled a2+:%d, scaled a2-:%d," + " new_weight:%d\n", + new_state.initial_weight, new_state.a2_plus, new_state.a2_minus, +// scaled_a2_plus, scaled_a2_minus, + new_weight); + + return (weight_t) new_weight; +} + +#endif // _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 0a7035279b8..1c3fdae470e 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -34,8 +34,6 @@ uint32_t num_fixed_pre_synaptic_events = 0; extern neuron_pointer_t neuron_array; -uint32_t RECURRENT_SYNAPSE_OFFSET = 100; - // The number of neurons static uint32_t n_neurons; @@ -186,25 +184,7 @@ static inline void process_fixed_synapses( synaptic_word, synapse_type_index_mask); int32_t weight = synapse_row_sparse_weight(synaptic_word); - int32_t neuron_ind = synapse_row_sparse_index(synaptic_word, synapse_index_mask); - - uint32_t type = synapse_row_sparse_type(synaptic_word, synapse_index_bits, synapse_type_mask); - - // For low pass filter of incoming spike train on this synapse - // Use postsynaptic neuron index to access neuron struct, - - if (type==1){ - // this is a recurrent synapse: add 100 to index to correct array location - syn_ind_from_delay =+ RECURRENT_SYNAPSE_OFFSET; - } - - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u \n", neuron_ind, syn_ind_from_delay, type); - - neuron_pointer_t neuron = &neuron_array[neuron_ind]; - - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in !!!! - - io_printf(IO_BUF, "signed w: %d \n", weight); + io_printf(IO_BUF, "static signed w: %d \n", weight); // Convert into ring buffer offset uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py index 3e5a3a30652..c479fd30cc2 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py @@ -21,9 +21,11 @@ from .timing_dependence_spike_nearest_pair import ( TimingDependenceSpikeNearestPair) from .timing_dependence_vogels_2011 import TimingDependenceVogels2011 +from .timing_dependence_eprop import TimingDependenceEprop __all__ = [ "AbstractTimingDependence", "TimingDependenceSpikePair", "TimingDependencePfisterSpikeTriplet", "TimingDependenceRecurrent", - "TimingDependenceSpikeNearestPair", "TimingDependenceVogels2011" + "TimingDependenceSpikeNearestPair", "TimingDependenceVogels2011", + "TimingDependenceEprop" ] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py new file mode 100644 index 00000000000..6e0376c6573 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py @@ -0,0 +1,121 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging +from spinn_utilities.overrides import overrides +from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( + plasticity_helpers) +from .abstract_timing_dependence import AbstractTimingDependence +from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( + SynapseStructureWeightOnly) + +logger = logging.getLogger(__name__) + +# LOOKUP_TAU_PLUS_SIZE = 256 +# LOOKUP_TAU_PLUS_SHIFT = 0 +# LOOKUP_TAU_MINUS_SIZE = 256 +# LOOKUP_TAU_MINUS_SHIFT = 0 + + +class TimingDependenceEprop(AbstractTimingDependence): + __slots__ = [ + "__synapse_structure", +# "__tau_minus", +# "__tau_minus_last_entry", +# "__tau_plus", +# "__tau_plus_last_entry" + ] + + def __init__(self): #, tau_plus=20.0, tau_minus=20.0): +# self.__tau_plus = tau_plus +# self.__tau_minus = tau_minus + + self.__synapse_structure = SynapseStructureWeightOnly() + +# # provenance data +# self.__tau_plus_last_entry = None +# self.__tau_minus_last_entry = None + +# @property +# def tau_plus(self): +# return self.__tau_plus +# +# @property +# def tau_minus(self): +# return self.__tau_minus + + @overrides(AbstractTimingDependence.is_same_as) + def is_same_as(self, timing_dependence): + if not isinstance(timing_dependence, TimingDependenceEprop): + return False + return (self.__tau_plus == timing_dependence.tau_plus and + self.__tau_minus == timing_dependence.tau_minus) + + @property + def vertex_executable_suffix(self): + return "eprop" + + @property + def pre_trace_n_bytes(self): + + # Pair rule requires no pre-synaptic trace when only the nearest + # Neighbours are considered and, a single 16-bit R1 trace + return 2 + + @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes) + def get_parameters_sdram_usage_in_bytes(self): + return 0 + + @property + def n_weight_terms(self): + return 1 + + @overrides(AbstractTimingDependence.write_parameters) + def write_parameters(self, spec, machine_time_step, weight_scales): + + # There are currently no parameters to write for this rule + pass + + # Check timestep is valid +# if machine_time_step != 1000: +# raise NotImplementedError( +# "STDP LUT generation currently only supports 1ms timesteps") + +# # Write lookup tables +# self.__tau_plus_last_entry = plasticity_helpers.write_exp_lut( +# spec, self.__tau_plus, LOOKUP_TAU_PLUS_SIZE, +# LOOKUP_TAU_PLUS_SHIFT) +# self.__tau_minus_last_entry = plasticity_helpers.write_exp_lut( +# spec, self.__tau_minus, LOOKUP_TAU_MINUS_SIZE, +# LOOKUP_TAU_MINUS_SHIFT) + + @property + def synaptic_structure(self): + return self.__synapse_structure + +# @overrides(AbstractTimingDependence.get_provenance_data) +# def get_provenance_data(self, pre_population_label, post_population_label): +# prov_data = list() +# prov_data.append(plasticity_helpers.get_lut_provenance( +# pre_population_label, post_population_label, "SpikePairRule", +# "tau_plus_last_entry", "tau_plus", self.__tau_plus_last_entry)) +# prov_data.append(plasticity_helpers.get_lut_provenance( +# pre_population_label, post_population_label, "SpikePairRule", +# "tau_minus_last_entry", "tau_minus", self.__tau_minus_last_entry)) +# return prov_data + + @overrides(AbstractTimingDependence.get_parameter_names) + def get_parameter_names(self): + return [] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py index 99616f7367b..59cc42dfb8b 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py @@ -18,7 +18,8 @@ from .weight_dependence_additive import WeightDependenceAdditive from .weight_dependence_multiplicative import WeightDependenceMultiplicative from .weight_dependence_additive_triplet import WeightDependenceAdditiveTriplet +from .weight_dependence_eprop_reg import WeightDependenceEpropReg __all__ = ["AbstractHasAPlusAMinus", "AbstractWeightDependence", "WeightDependenceAdditive", "WeightDependenceMultiplicative", - "WeightDependenceAdditiveTriplet"] + "WeightDependenceAdditiveTriplet", "WeightDependenceEpropReg"] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py new file mode 100644 index 00000000000..9ae02163b6a --- /dev/null +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py @@ -0,0 +1,91 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus +from .abstract_weight_dependence import AbstractWeightDependence + + +class WeightDependenceEpropReg( + AbstractHasAPlusAMinus, AbstractWeightDependence): + __slots__ = [ + "__w_max", + "__w_min"] + + def __init__(self, w_min=0.0, w_max=1.0): + super(WeightDependenceEpropReg, self).__init__() + self.__w_min = w_min + self.__w_max = w_max + + @property + def w_min(self): + return self.__w_min + + @property + def w_max(self): + return self.__w_max + + @overrides(AbstractWeightDependence.is_same_as) + def is_same_as(self, weight_dependence): + # pylint: disable=protected-access + if not isinstance(weight_dependence, WeightDependenceEpropReg): + return False + return ( + (self.__w_min == weight_dependence.w_min) and + (self.__w_max == weight_dependence.w_max) and + (self.A_plus == weight_dependence.A_plus) and + (self.A_minus == weight_dependence.A_minus)) + + @property + def vertex_executable_suffix(self): + return "reg" + + @overrides(AbstractWeightDependence.get_parameters_sdram_usage_in_bytes) + def get_parameters_sdram_usage_in_bytes( + self, n_synapse_types, n_weight_terms): + if n_weight_terms != 1: + raise NotImplementedError( + "Multiplicative weight dependence only supports single terms") + + return (2 # Number of 32-bit parameters + * 4) * n_synapse_types + + @overrides(AbstractWeightDependence.write_parameters) + def write_parameters( + self, spec, machine_time_step, weight_scales, n_weight_terms): + if n_weight_terms != 1: + raise NotImplementedError( + "Multiplicative weight dependence only supports single terms") + + # Loop through each synapse type's weight scale + for w in weight_scales: + spec.write_value( + data=int(round(self.__w_min * w)), data_type=DataType.INT32) + spec.write_value( + data=int(round(self.__w_max * w)), data_type=DataType.INT32) + +# spec.write_value( +# data=int(round(self.A_plus * w)), data_type=DataType.INT32) +# spec.write_value( +# data=int(round(self.A_minus * w)), data_type=DataType.INT32) + + @property + def weight_maximum(self): + return self.__w_max + + @overrides(AbstractWeightDependence.get_parameter_names) + def get_parameter_names(self): + return ['w_min', 'w_max'] #, 'A_plus', 'A_minus'] diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index e70ecbd8454..11e6d29a05b 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -236,7 +236,9 @@ def get_plastic_synaptic_data( plastic_plastic = numpy.zeros( len(connections) * n_half_words, dtype="uint16") plastic_plastic[half_word::n_half_words] = \ - numpy.rint(numpy.abs(connections["weight"])).astype("uint16") + numpy.rint(connections["weight"]).astype("uint16") + + # numpy.rint(numpy.abs(connections["weight"])).astype("uint16") # Convert the plastic data into groups of bytes per connection and # then into rows diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index f3eb28ebac4..1a0b3f1b906 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -526,11 +526,17 @@ def _get_ring_buffer_to_input_left_shifts( w + 1 if (2 ** w) <= a else w for w, a in zip(max_weight_powers, max_weights)) + # fix weight shift so we can scale eligibility trace calculations accordingly. + max_weight_powers = (2 #if w >= 1 else w + for w in max_weight_powers) + # If we have synapse dynamics that uses signed weights, # Add another bit of shift to prevent overflows if weights_signed: max_weight_powers = (m + 1 for m in max_weight_powers) + + return list(max_weight_powers) @staticmethod From 7ddeadf248eafcf35e12ea9c655d6087127d600b Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Tue, 3 Mar 2020 09:38:10 +0000 Subject: [PATCH 030/123] Add regularisation functionality --- .../neuron_impl_eprop_adaptive.h | 2 +- .../neuron/plasticity/stdp/stdp_typedefs.h | 3 + .../synapse_dynamics_eprop_adaptive_impl.c | 144 +++++++++--------- .../synapse_structure/synapse_structure.h | 2 +- .../synapse_structure_weight_impl.h | 4 +- .../stdp/weight_dependence/weight.h | 2 +- .../weight_dependence/weight_eprop_reg_impl.c | 7 +- .../weight_dependence/weight_eprop_reg_impl.h | 42 +++-- .../weight_dependence_eprop_reg.py | 14 +- 9 files changed, 128 insertions(+), 92 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index ea35ab2a13d..a7ab30dadca 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -66,7 +66,7 @@ static additional_input_pointer_t additional_input_array; static threshold_type_pointer_t threshold_type_array; //! Global parameters for the neurons -static global_neuron_params_pointer_t global_parameters; +global_neuron_params_pointer_t global_parameters; // The synapse shaping parameters static synapse_param_t *neuron_synapse_shaping_params; diff --git a/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h b/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h index 4efb9391c77..58c87a4aff3 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h +++ b/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h @@ -28,4 +28,7 @@ // Helper macros for 16-bit fixed-point multiplication #define STDP_FIXED_MUL_16X16(a, b) maths_fixed_mul16(a, b, STDP_FIXED_POINT) + +#define PRINT_PLASTICITY 1 + #endif // _STDP_TYPEDEFS_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 104279f48d5..c4484e69034 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -36,6 +36,7 @@ #include extern neuron_pointer_t neuron_array; +extern global_neuron_params_pointer_t global_params; static uint32_t synapse_type_index_bits; static uint32_t synapse_index_bits; @@ -94,62 +95,62 @@ post_event_history_t *post_event_history; //--------------------------------------- // Synapse update loop //--------------------------------------- -static inline final_state_t plasticity_update_synapse( - uint32_t time, - const uint32_t last_pre_time, const pre_trace_t last_pre_trace, - const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, - const uint32_t delay_axonal, update_state_t current_state, - const post_event_history_t *post_event_history) { - // Apply axonal delay to time of last presynaptic spike - const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; - - // Get the post-synaptic window of events to be processed - const uint32_t window_begin_time = - (delayed_last_pre_time >= delay_dendritic) - ? (delayed_last_pre_time - delay_dendritic) : 0; - const uint32_t window_end_time = time + delay_axonal - delay_dendritic; - post_event_window_t post_window = post_events_get_window_delayed( - post_event_history, window_begin_time, window_end_time); - - log_debug("\tPerforming deferred synapse update at time:%u", time); - log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", - window_begin_time, window_end_time, post_window.prev_time, - post_window.num_events); - - // print_event_history(post_event_history); - // print_delayed_window_events(post_event_history, window_begin_time, - // window_end_time, delay_dendritic); - - // Process events in post-synaptic window - while (post_window.num_events > 0) { - const uint32_t delayed_post_time = - *post_window.next_time + delay_dendritic; - log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", - delayed_post_time); - - // Apply spike to state - current_state = timing_apply_post_spike( - delayed_post_time, *post_window.next_trace, delayed_last_pre_time, - last_pre_trace, post_window.prev_time, post_window.prev_trace, - current_state); - - // Go onto next event - post_window = post_events_next_delayed(post_window, delayed_post_time); - } - - const uint32_t delayed_pre_time = time + delay_axonal; - log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", - delayed_pre_time, post_window.prev_time); - - // Apply spike to state - // **NOTE** dendritic delay is subtracted - current_state = timing_apply_pre_spike( - delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, - post_window.prev_time, post_window.prev_trace, current_state); - - // Return final synaptic word and weight - return synapse_structure_get_final_state(current_state); -} +//static inline final_state_t plasticity_update_synapse( +// uint32_t time, +// const uint32_t last_pre_time, const pre_trace_t last_pre_trace, +// const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, +// const uint32_t delay_axonal, update_state_t current_state, +// const post_event_history_t *post_event_history) { +// // Apply axonal delay to time of last presynaptic spike +// const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; +// +// // Get the post-synaptic window of events to be processed +// const uint32_t window_begin_time = +// (delayed_last_pre_time >= delay_dendritic) +// ? (delayed_last_pre_time - delay_dendritic) : 0; +// const uint32_t window_end_time = time + delay_axonal - delay_dendritic; +// post_event_window_t post_window = post_events_get_window_delayed( +// post_event_history, window_begin_time, window_end_time); +// +// log_debug("\tPerforming deferred synapse update at time:%u", time); +// log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", +// window_begin_time, window_end_time, post_window.prev_time, +// post_window.num_events); +// +// // print_event_history(post_event_history); +// // print_delayed_window_events(post_event_history, window_begin_time, +// // window_end_time, delay_dendritic); +// +// // Process events in post-synaptic window +// while (post_window.num_events > 0) { +// const uint32_t delayed_post_time = +// *post_window.next_time + delay_dendritic; +// log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", +// delayed_post_time); +// +// // Apply spike to state +// current_state = timing_apply_post_spike( +// delayed_post_time, *post_window.next_trace, delayed_last_pre_time, +// last_pre_trace, post_window.prev_time, post_window.prev_trace, +// current_state); +// +// // Go onto next event +// post_window = post_events_next_delayed(post_window, delayed_post_time); +// } +// +// const uint32_t delayed_pre_time = time + delay_axonal; +// log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", +// delayed_pre_time, post_window.prev_time); +// +// // Apply spike to state +// // **NOTE** dendritic delay is subtracted +// current_state = timing_apply_pre_spike( +// delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, +// post_window.prev_time, post_window.prev_trace, current_state); +// +// // Return final synaptic word and weight +// return synapse_structure_get_final_state(current_state); +//} //--------------------------------------- // Synaptic row plastic-region implementation @@ -284,11 +285,14 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? - int16_t delta_w_int = bitsk(delta_w) >> 1; // THIS NEEDS UPDATING TO APPROPRIATE SCALING + int16_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d\n", - delta_w, delta_w_int); + + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d\n", + delta_w, delta_w_int); + } if (delta_w_int <= 0){ current_state = weight_one_term_apply_depression(current_state, delta_w_int); @@ -296,8 +300,13 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state current_state = weight_one_term_apply_potentiation(current_state, delta_w_int); } + + // Calculate regularisation error + REAL reg_error = global_params->core_target_rate - global_params->core_pop_rate; + + // Return final synaptic word and weight - return synapse_structure_get_final_state(current_state); + return synapse_structure_get_final_state(current_state, reg_error); } @@ -355,7 +364,6 @@ bool synapse_dynamics_process_plastic_synapses( syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } - neuron_pointer_t neuron = &neuron_array[neuron_ind]; neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! @@ -364,12 +372,12 @@ bool synapse_dynamics_process_plastic_synapses( update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); - - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", neuron_ind, syn_ind_from_delay, type, current_state.initial_weight, neuron->syn_state[syn_ind_from_delay].delta_w); - + } // Perform weight update: // Go through typical weight update process to clip to limits @@ -379,14 +387,6 @@ bool synapse_dynamics_process_plastic_synapses( // reset delta_w as weight change has now been applied neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; -// // Update the synapse state -// final_state_t final_state = plasticity_update_synapse( -// time, last_pre_time, last_pre_trace, event_history->prev_trace, -// delay_dendritic, delay_axonal, current_state, -// &post_event_history[index]); - - - // Add contribution to synaptic input // Convert into ring buffer offset uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure.h index c2049889d79..f3b38e323c6 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure.h @@ -24,7 +24,7 @@ static update_state_t synapse_structure_get_update_state( plastic_synapse_t synaptic_word, index_t synapse_type); static final_state_t synapse_structure_get_final_state( - update_state_t state); + update_state_t state, REAL reg_error); static weight_t synapse_structure_get_final_weight( final_state_t final_state); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h index 11f4ec3af61..ff778a48e76 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h @@ -41,8 +41,8 @@ static inline update_state_t synapse_structure_get_update_state( //--------------------------------------- static inline final_state_t synapse_structure_get_final_state( - update_state_t state) { - return weight_get_final(state); + update_state_t state, REAL reg_error) { + return weight_get_final(state, reg_error); } //--------------------------------------- diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h index b6f7d8818b3..6026a0e2a06 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h @@ -54,6 +54,6 @@ address_t weight_initialise( */ static weight_state_t weight_get_initial(weight_t weight, index_t synapse_type); -static weight_t weight_get_final(weight_state_t new_state); +static weight_t weight_get_final(weight_state_t new_state, REAL reg_error); #endif // _WEIGHT_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c index 4a0ee8a1674..e13daf601c5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c @@ -46,14 +46,17 @@ address_t weight_initialise( for (uint32_t s = 0; s < n_synapse_types; s++) { plasticity_weight_region_data[s].min_weight = *plasticity_word++; plasticity_weight_region_data[s].max_weight = *plasticity_word++; + plasticity_weight_region_data[s].reg_rate = kbits(*plasticity_word++); + // plasticity_weight_region_data[s].a2_plus = *plasticity_word++; // plasticity_weight_region_data[s].a2_minus = *plasticity_word++; - io_printf(IO_BUF, "\tSynapse type %u: Min weight:%d, Max weight:%d \n" + io_printf(IO_BUF, "\tSynapse type %u: Min weight:%d, Max weight:%d, reg_rate: %k \n" // "A2+:%d, A2-:%d" , s, plasticity_weight_region_data[s].min_weight, - plasticity_weight_region_data[s].max_weight + plasticity_weight_region_data[s].max_weight, + plasticity_weight_region_data[s].reg_rate // plasticity_weight_region_data[s].a2_plus, // plasticity_weight_region_data[s].a2_minus ); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 11d057119f4..4efbc91a520 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -31,7 +31,7 @@ typedef struct { int32_t min_weight; int32_t max_weight; - + REAL reg_rate; // int32_t a2_plus; // int32_t a2_minus; } plasticity_weight_region_data_t; @@ -68,7 +68,11 @@ static inline weight_state_t weight_get_initial( //--------------------------------------- static inline weight_state_t weight_one_term_apply_depression( weight_state_t state, int32_t a2_minus) { - io_printf(IO_BUF, "depressing: %d\n", a2_minus); + + + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "depressing: %d\n", a2_minus); + } state.a2_minus += a2_minus; return state; } @@ -76,14 +80,16 @@ static inline weight_state_t weight_one_term_apply_depression( //--------------------------------------- static inline weight_state_t weight_one_term_apply_potentiation( weight_state_t state, int32_t a2_plus) { - - io_printf(IO_BUF, "potentiating: %d\n", a2_plus); + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "potentiating: %d\n", a2_plus); + } state.a2_plus += a2_plus; return state; } //--------------------------------------- -static inline weight_t weight_get_final(weight_state_t new_state) { +static inline weight_t weight_get_final(weight_state_t new_state, + REAL reg_error) { // Scale potentiation and depression // **NOTE** A2+ and A2- are pre-scaled into weight format // int32_t scaled_a2_plus = STDP_FIXED_MUL_16X16( @@ -91,20 +97,36 @@ static inline weight_t weight_get_final(weight_state_t new_state) { // int32_t scaled_a2_minus = STDP_FIXED_MUL_16X16( // new_state.a2_minus, new_state.weight_region->a2_minus); - // Apply all terms to initial weight + // Apply eprop plasticity updates to initial weight int32_t new_weight = new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; - // Clamp new weight + // Calculate regularisation + if (new_state.weight_region->reg_rate > 0.0k) { // if reg rate is zero, regularisation is turned off + if (reg_error > 0.1k) { + // increase weight (core rate is below target) + new_weight = new_weight + + (new_weight * new_state.weight_region->reg_rate * reg_error); + + } else if (reg_error < -0.1k){ + // reduce weight (core rate is above target) + new_weight = new_weight + - (new_weight * new_state.weight_region->reg_rate * reg_error); + } + } + + // Clamp new weight to bounds new_weight = MIN(new_state.weight_region->max_weight, MAX(new_weight, new_state.weight_region->min_weight)); - io_printf(IO_BUF, "\told_weight:%d, a2+:%d, a2-:%d, " -// "scaled a2+:%d, scaled a2-:%d," + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "\told_weight:%d, a2+:%d, a2-:%d, " + // "scaled a2+:%d, scaled a2-:%d," " new_weight:%d\n", new_state.initial_weight, new_state.a2_plus, new_state.a2_minus, -// scaled_a2_plus, scaled_a2_minus, + // scaled_a2_plus, scaled_a2_minus, new_weight); + } return (weight_t) new_weight; } diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py index 9ae02163b6a..4326454b06d 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py @@ -23,12 +23,14 @@ class WeightDependenceEpropReg( AbstractHasAPlusAMinus, AbstractWeightDependence): __slots__ = [ "__w_max", - "__w_min"] + "__w_min", + "__reg_rate"] - def __init__(self, w_min=0.0, w_max=1.0): + def __init__(self, w_min=0.0, w_max=1.0, reg_rate=0.0): super(WeightDependenceEpropReg, self).__init__() self.__w_min = w_min self.__w_max = w_max + self.__reg_rate = reg_rate @property def w_min(self): @@ -37,6 +39,10 @@ def w_min(self): @property def w_max(self): return self.__w_max + + @property + def reg_rate(self): + return self.__reg_rate @overrides(AbstractWeightDependence.is_same_as) def is_same_as(self, weight_dependence): @@ -60,7 +66,7 @@ def get_parameters_sdram_usage_in_bytes( raise NotImplementedError( "Multiplicative weight dependence only supports single terms") - return (2 # Number of 32-bit parameters + return (3 # Number of 32-bit parameters * 4) * n_synapse_types @overrides(AbstractWeightDependence.write_parameters) @@ -82,6 +88,8 @@ def write_parameters( # spec.write_value( # data=int(round(self.A_minus * w)), data_type=DataType.INT32) + spec.write_value(self.__reg_rate, data_type=DataType.S1615) + @property def weight_maximum(self): return self.__w_max From a07ee44f9eeb8ccf52141e5e940ca28c88482492 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Wed, 4 Mar 2020 18:46:02 +0000 Subject: [PATCH 031/123] learning now in sinusoid, weight is shifted to make more sense, print statements adjusted --- neural_modelling/makefiles/neuron/Makefile | 2 + .../makefiles/neuron/neural_build.mk | 11 +- .../neuron/sinusoid_readout/Makefile | 10 +- .../neuron_impl_eprop_adaptive.h | 4 +- .../neuron_impl_sinusoid_readout.h | 6 +- .../synapse_dynamics_eprop_adaptive_impl.c | 18 +- .../synapse_dynamics_sinusoid_readout_impl.c | 562 ++++++++++++++++++ 7 files changed, 592 insertions(+), 21 deletions(-) diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index b6b1aac2c59..4076269d5b3 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -14,7 +14,9 @@ # along with this program. If not, see . MODELS = eprop_adaptive \ + eprop_adaptive_stdp_mad_eprop_reg \ sinusoid_readout \ + sinusoid_readout_stdp_mad_eprop_reg \ # IF_curr_exp \ IF_cond_exp \ IZK_curr_exp \ diff --git a/neural_modelling/makefiles/neuron/neural_build.mk b/neural_modelling/makefiles/neuron/neural_build.mk index 0e46fe12b8a..fe5a9192a02 100644 --- a/neural_modelling/makefiles/neuron/neural_build.mk +++ b/neural_modelling/makefiles/neuron/neural_build.mk @@ -46,10 +46,6 @@ ifndef SYNAPSE_DEBUG SYNAPSE_DEBUG = LOG_INFO endif -ifndef SINUSYNAPSES - SINUSYNAPSES = neuron/synapses.c -endif - ifndef PLASTIC_DEBUG PLASTIC_DEBUG = LOG_INFO endif @@ -202,7 +198,7 @@ OTHER_SOURCES_CONVERTED := $(call strip_source_dirs,$(OTHER_SOURCES)) # List all the sources relative to one of SOURCE_DIRS SOURCES = common/out_spikes.c \ neuron/c_main.c \ - $(SINUSYNAPSES) \ + neuron/synapses.c \ neuron/neuron.c \ neuron/spike_processing.c \ neuron/population_table/population_table_$(POPULATION_TABLE_IMPL)_impl.c \ @@ -221,11 +217,6 @@ $(BUILD_DIR)neuron/c_main.o: $(MODIFIED_DIR)neuron/c_main.c -@mkdir -p $(dir $@) $(SYNAPSE_TYPE_COMPILE) -o $@ $< -$(BUILD_DIR)neuron/sinusynapses.o: $(MODIFIED_DIR)neuron/sinusynapses.c - #sinusynapses.c - -@mkdir -p $(dir $@) - $(SYNAPSE_TYPE_COMPILE) -o $@ $< - $(BUILD_DIR)neuron/synapses.o: $(MODIFIED_DIR)neuron/synapses.c #synapses.c -@mkdir -p $(dir $@) diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile index 3903bd476ba..771ae1c7b58 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile @@ -2,8 +2,16 @@ APP = $(notdir $(CURDIR)) OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h + SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c -SINUSYNAPSES = neuron/sinusynapses.c +#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +#TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c +#TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h +#WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +#WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h include ../neural_build.mk + + diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index b94068aa8ef..fa0045cd657 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -297,8 +297,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // neuron->syn_state[0].z_bar; // global_parameters->core_target_rate; // neuron->syn_state[0].e_bar; -// neuron->syn_state[0].el_a; - exc_input_values[0]; // record input input (signed) + neuron->syn_state[0].el_a; +// exc_input_values[0]; // record input input (signed) // learning_signal * neuron->w_fb; // update neuron parameters diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 8ba59aae32c..e02dae987f2 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -55,6 +55,7 @@ static synapse_param_t *neuron_synapse_shaping_params; static REAL next_spike_time = 0; extern uint32_t time; extern key_t key; +extern REAL learning_signal; static uint32_t target_ind = 0; @@ -279,9 +280,12 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Calculate error REAL error = result - global_parameters->target_V[target_ind]; + learning_signal = error; // Record readout - recorded_variable_values[V_RECORDING_INDEX] = result; + recorded_variable_values[V_RECORDING_INDEX] = +// result; + neuron->syn_state[0].z_bar; // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index c4484e69034..437f0d33285 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -36,7 +36,7 @@ #include extern neuron_pointer_t neuron_array; -extern global_neuron_params_pointer_t global_params; +extern global_neuron_params_pointer_t global_parameters; static uint32_t synapse_type_index_bits; static uint32_t synapse_index_bits; @@ -285,24 +285,25 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? - int16_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING +// int16_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING + int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d\n", - delta_w, delta_w_int); + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d, 16b delta_w_int: %d, delta << 9: %d, delta << 12: %d, delta << 16: %d\n", + delta_w, delta_w_int, (int16_t)delta_w_int, (int16_t)delta_w_int << 9, (int16_t)delta_w_int << 12, (int16_t)delta_w_int << 16); } if (delta_w_int <= 0){ - current_state = weight_one_term_apply_depression(current_state, delta_w_int); + current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 9)); } else { - current_state = weight_one_term_apply_potentiation(current_state, delta_w_int); + current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 9)); } // Calculate regularisation error - REAL reg_error = global_params->core_target_rate - global_params->core_pop_rate; + REAL reg_error = global_parameters->core_target_rate - global_parameters->core_pop_rate; // Return final synaptic word and weight @@ -373,6 +374,9 @@ bool synapse_dynamics_process_plastic_synapses( synapse_structure_get_update_state(*plastic_words, type); if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", + neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", neuron_ind, syn_ind_from_delay, type, current_state.initial_weight, diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index e69de29bb2d..4b882936532 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -0,0 +1,562 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +// Spinn_common includes +#include "static-assert.h" + +// sPyNNaker neural modelling includes +#include + +// Plasticity includes +#include "maths.h" +#include "post_events.h" + +#include "weight_dependence/weight.h" +#include "timing_dependence/timing.h" +#include +#include +#include + + +#include +//#include +#include + +extern neuron_pointer_t neuron_array; +//extern global_neuron_params_pointer_t global_parameters; + +static uint32_t synapse_type_index_bits; +static uint32_t synapse_index_bits; +static uint32_t synapse_index_mask; +static uint32_t synapse_type_index_mask; +static uint32_t synapse_delay_index_type_bits; +static uint32_t synapse_type_mask; + +uint32_t num_plastic_pre_synaptic_events = 0; +uint32_t plastic_saturation_count = 0; + +//--------------------------------------- +// Macros +//--------------------------------------- +// The plastic control words used by Morrison synapses store an axonal delay +// in the upper 3 bits. +// Assuming a maximum of 16 delay slots, this is all that is required as: +// +// 1) Dendritic + Axonal <= 15 +// 2) Dendritic >= Axonal +// +// Therefore: +// +// * Maximum value of dendritic delay is 15 (with axonal delay of 0) +// - It requires 4 bits +// * Maximum value of axonal delay is 7 (with dendritic delay of 8) +// - It requires 3 bits +// +// | Axonal delay | Dendritic delay | Type | Index | +// |---------------------------|--------------------|-------------------|--------------------| +// | SYNAPSE_AXONAL_DELAY_BITS | SYNAPSE_DELAY_BITS | SYNAPSE_TYPE_BITS | SYNAPSE_INDEX_BITS | +// | | | SYNAPSE_TYPE_INDEX_BITS | +// |---------------------------|--------------------|----------------------------------------| +#ifndef SYNAPSE_AXONAL_DELAY_BITS +#define SYNAPSE_AXONAL_DELAY_BITS 3 +#endif + +#define SYNAPSE_AXONAL_DELAY_MASK \ + ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) + + +uint32_t RECURRENT_SYNAPSE_OFFSET = 100; + +//--------------------------------------- +// Structures +//--------------------------------------- +typedef struct { + pre_trace_t prev_trace; + uint32_t prev_time; +} pre_event_history_t; + +post_event_history_t *post_event_history; + +/* PRIVATE FUNCTIONS */ + +//--------------------------------------- +// Synapse update loop +//--------------------------------------- +//static inline final_state_t plasticity_update_synapse( +// uint32_t time, +// const uint32_t last_pre_time, const pre_trace_t last_pre_trace, +// const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, +// const uint32_t delay_axonal, update_state_t current_state, +// const post_event_history_t *post_event_history) { +// // Apply axonal delay to time of last presynaptic spike +// const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; +// +// // Get the post-synaptic window of events to be processed +// const uint32_t window_begin_time = +// (delayed_last_pre_time >= delay_dendritic) +// ? (delayed_last_pre_time - delay_dendritic) : 0; +// const uint32_t window_end_time = time + delay_axonal - delay_dendritic; +// post_event_window_t post_window = post_events_get_window_delayed( +// post_event_history, window_begin_time, window_end_time); +// +// log_debug("\tPerforming deferred synapse update at time:%u", time); +// log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", +// window_begin_time, window_end_time, post_window.prev_time, +// post_window.num_events); +// +// // print_event_history(post_event_history); +// // print_delayed_window_events(post_event_history, window_begin_time, +// // window_end_time, delay_dendritic); +// +// // Process events in post-synaptic window +// while (post_window.num_events > 0) { +// const uint32_t delayed_post_time = +// *post_window.next_time + delay_dendritic; +// log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", +// delayed_post_time); +// +// // Apply spike to state +// current_state = timing_apply_post_spike( +// delayed_post_time, *post_window.next_trace, delayed_last_pre_time, +// last_pre_trace, post_window.prev_time, post_window.prev_trace, +// current_state); +// +// // Go onto next event +// post_window = post_events_next_delayed(post_window, delayed_post_time); +// } +// +// const uint32_t delayed_pre_time = time + delay_axonal; +// log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", +// delayed_pre_time, post_window.prev_time); +// +// // Apply spike to state +// // **NOTE** dendritic delay is subtracted +// current_state = timing_apply_pre_spike( +// delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, +// post_window.prev_time, post_window.prev_trace, current_state); +// +// // Return final synaptic word and weight +// return synapse_structure_get_final_state(current_state); +//} + +//--------------------------------------- +// Synaptic row plastic-region implementation +//--------------------------------------- +static inline plastic_synapse_t* plastic_synapses( + address_t plastic_region_address) { + const uint32_t pre_event_history_size_words = + sizeof(pre_event_history_t) / sizeof(uint32_t); + static_assert( + pre_event_history_size_words * sizeof(uint32_t) == sizeof(pre_event_history_t), + "Size of pre_event_history_t structure should be a multiple" + " of 32-bit words"); + + return (plastic_synapse_t *) + &plastic_region_address[pre_event_history_size_words]; +} + +//--------------------------------------- +static inline pre_event_history_t *plastic_event_history( + address_t plastic_region_address) { + return (pre_event_history_t *) &plastic_region_address[0]; +} + +void synapse_dynamics_print_plastic_synapses( + address_t plastic_region_address, address_t fixed_region_address, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + use(plastic_region_address); + use(fixed_region_address); + use(ring_buffer_to_input_buffer_left_shifts); + +#if LOG_LEVEL >= LOG_DEBUG + // Extract separate arrays of weights (from plastic region), + // Control words (from fixed region) and number of plastic synapses + plastic_synapse_t *plastic_words = plastic_synapses(plastic_region_address); + const control_t *control_words = + synapse_row_plastic_controls(fixed_region_address); + size_t plastic_synapse = + synapse_row_num_plastic_controls(fixed_region_address); + + log_debug("Plastic region %u synapses\n", plastic_synapse); + + // Loop through plastic synapses + for (uint32_t i = 0; i < plastic_synapse; i++) { + // Get next control word (auto incrementing control word) + uint32_t control_word = *control_words++; + uint32_t synapse_type = synapse_row_sparse_type( + control_word, synapse_index_bits, synapse_type_mask); + + // Get weight + update_state_t update_state = synapse_structure_get_update_state( + *plastic_words++, synapse_type); + final_state_t final_state = synapse_structure_get_final_state( + update_state); + weight_t weight = synapse_structure_get_final_weight(final_state); + + log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); + synapses_print_weight( + weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); + log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", + synapse_row_sparse_delay(control_word, synapse_type_index_bits), + synapse_types_get_type_char(synapse_type), + synapse_row_sparse_index(control_word, synapse_index_mask), + SYNAPSE_DELAY_MASK, synapse_type_index_bits); + } +#endif // LOG_LEVEL >= LOG_DEBUG +} + +//--------------------------------------- +static inline index_t sparse_axonal_delay(uint32_t x) { +#if 1 + use(x); + return 0; +#else + return (x >> synapse_delay_index_type_bits) & SYNAPSE_AXONAL_DELAY_MASK; +#endif +} + +address_t synapse_dynamics_initialise( + address_t address, uint32_t n_neurons, uint32_t n_synapse_types, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + // Load timing dependence data + address_t weight_region_address = timing_initialise(address); + if (address == NULL) { + return NULL; + } + + // Load weight dependence data + address_t weight_result = weight_initialise( + weight_region_address, n_synapse_types, + ring_buffer_to_input_buffer_left_shifts); + if (weight_result == NULL) { + return NULL; + } + + post_event_history = post_events_init_buffers(n_neurons); + if (post_event_history == NULL) { + return NULL; + } + + uint32_t n_neurons_power_2 = n_neurons; + uint32_t log_n_neurons = 1; + if (n_neurons != 1) { + if (!is_power_of_2(n_neurons)) { + n_neurons_power_2 = next_power_of_2(n_neurons); + } + log_n_neurons = ilog_2(n_neurons_power_2); + } + + uint32_t n_synapse_types_power_2 = n_synapse_types; + if (!is_power_of_2(n_synapse_types)) { + n_synapse_types_power_2 = next_power_of_2(n_synapse_types); + } + uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); + + synapse_type_index_bits = log_n_neurons + log_n_synapse_types; + synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; + synapse_index_bits = log_n_neurons; + synapse_index_mask = (1 << synapse_index_bits) - 1; + synapse_delay_index_type_bits = + SYNAPSE_DELAY_BITS + synapse_type_index_bits; + synapse_type_mask = (1 << log_n_synapse_types) - 1; + + return weight_result; +} + + +static inline final_state_t eprop_plasticity_update(update_state_t current_state, + REAL delta_w){ + + // Test weight change + // delta_w = -0.1k; + + + // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? +// int32_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING + int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING +// int16_t delta_w_int = (int) delta_w; // >> 15; + + + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d, 16b delta_w_int: %d, delta << 9: %d, delta << 10: %d, delta << 11: %d\n", + delta_w, delta_w_int, (int16_t)delta_w_int, (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 10), (int16_t)(delta_w_int << 11)); + } + + if (delta_w_int <= 0){ + current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 9)); + } else { + current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 9)); + } + + + // Calculate regularisation error + REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; + + + // Return final synaptic word and weight + return synapse_structure_get_final_state(current_state, reg_error); +} + + + + +bool synapse_dynamics_process_plastic_synapses( + address_t plastic_region_address, address_t fixed_region_address, + weight_t *ring_buffers, uint32_t time) { + // Extract separate arrays of plastic synapses (from plastic region), + // Control words (from fixed region) and number of plastic synapses + plastic_synapse_t *plastic_words = + plastic_synapses(plastic_region_address); + const control_t *control_words = + synapse_row_plastic_controls(fixed_region_address); + size_t plastic_synapse = + synapse_row_num_plastic_controls(fixed_region_address); + + num_plastic_pre_synaptic_events += plastic_synapse; + + // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? + + + + // Loop through plastic synapses + for (; plastic_synapse > 0; plastic_synapse--) { + // Get next control word (auto incrementing) + uint32_t control_word = *control_words++; + + // Extract control-word components + // **NOTE** cunningly, control word is just the same as lower + // 16-bits of 32-bit fixed synapse so same functions can be used +// uint32_t delay_axonal = sparse_axonal_delay(control_word); + + uint32_t delay = 1.0k; + uint32_t syn_ind_from_delay = + synapse_row_sparse_delay(control_word, synapse_type_index_bits); + +// uint32_t delay_dendritic = synapse_row_sparse_delay( +// control_word, synapse_type_index_bits); + uint32_t type = synapse_row_sparse_type( + control_word, synapse_index_bits, synapse_type_mask); + uint32_t index = + synapse_row_sparse_index(control_word, synapse_index_mask); + uint32_t type_index = synapse_row_sparse_type_index( + control_word, synapse_type_index_mask); + + + int32_t neuron_ind = synapse_row_sparse_index(control_word, synapse_index_mask); + + // For low pass filter of incoming spike train on this synapse + // Use postsynaptic neuron index to access neuron struct, + + if (type==1){ + // this is a recurrent synapse: add 100 to index to correct array location + syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; + } + + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! + + + // Create update state from the plastic synaptic word + update_state_t current_state = + synapse_structure_get_update_state(*plastic_words, type); + + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", + neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); + + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", + neuron_ind, syn_ind_from_delay, type, + current_state.initial_weight, + neuron->syn_state[syn_ind_from_delay].delta_w); + } + + // Perform weight update: + // Go through typical weight update process to clip to limits + final_state_t final_state = eprop_plasticity_update(current_state, + neuron->syn_state[syn_ind_from_delay].delta_w); + + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // Add contribution to synaptic input + // Convert into ring buffer offset + uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + // delay_axonal + delay_dendritic + + time, type_index, + synapse_type_index_bits); + + // Check for ring buffer saturation + int32_t accumulation = ring_buffers[ring_buffer_index] + + synapse_structure_get_final_weight(final_state); + +// uint32_t sat_test = accumulation & 0x10000; +// if (sat_test) { +// accumulation = sat_test - 1; +// plastic_saturation_count++; +// } + + ring_buffers[ring_buffer_index] = accumulation; + + // Write back updated synaptic word to plastic region + *plastic_words++ = + synapse_structure_get_final_synaptic_word(final_state); + } + return true; +} + +void synapse_dynamics_process_post_synaptic_event( + uint32_t time, index_t neuron_index) { + log_debug("Adding post-synaptic event to trace at time:%u", time); + + // Add post-event + post_event_history_t *history = &post_event_history[neuron_index]; + const uint32_t last_post_time = history->times[history->count_minus_one]; + const post_trace_t last_post_trace = + history->traces[history->count_minus_one]; + post_events_add(time, history, + timing_add_post_spike(time, last_post_time, last_post_trace)); +} + +input_t synapse_dynamics_get_intrinsic_bias( + uint32_t time, index_t neuron_index) { + use(time); + use(neuron_index); + return 0.0k; +} + +uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { + return num_plastic_pre_synaptic_events; +} + +uint32_t synapse_dynamics_get_plastic_saturation_count(void) { + return plastic_saturation_count; +} + +#if SYNGEN_ENABLED == 1 + +//! \brief Searches the synaptic row for the the connection with the +//! specified post-synaptic ID +//! \param[in] id: the (core-local) ID of the neuron to search for in the +//! synaptic row +//! \param[in] row: the core-local address of the synaptic row +//! \param[out] sp_data: the address of a struct through which to return +//! weight, delay information +//! \return bool: was the search successful? +bool find_plastic_neuron_with_id( + uint32_t id, address_t row, structural_plasticity_data_t *sp_data) { + address_t fixed_region = synapse_row_fixed_region(row); + address_t plastic_region_address = synapse_row_plastic_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(plastic_region_address); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + plastic_synapse_t weight; + uint32_t delay; + + // Loop through plastic synapses + for (; plastic_synapse > 0; plastic_synapse--) { + // Get next control word (auto incrementing) + weight = *plastic_words++; + uint32_t control_word = *control_words++; + + // Check if index is the one I'm looking for + delay = synapse_row_sparse_delay(control_word, synapse_type_index_bits); + if (synapse_row_sparse_index(control_word, synapse_index_mask) == id) { + sp_data->weight = weight; + sp_data->offset = + synapse_row_num_plastic_controls(fixed_region) + - plastic_synapse; + sp_data->delay = delay; + return true; + } + } + + sp_data->weight = -1; + sp_data->offset = -1; + sp_data->delay = -1; + return false; +} + +//! \brief Remove the entry at the specified offset in the synaptic row +//! \param[in] offset: the offset in the row at which to remove the entry +//! \param[in] row: the core-local address of the synaptic row +//! \return bool: was the removal successful? +bool remove_plastic_neuron_at_offset(uint32_t offset, address_t row) { + address_t fixed_region = synapse_row_fixed_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(synapse_row_plastic_region(row)); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + + // Delete weight at offset + plastic_words[offset] = plastic_words[plastic_synapse - 1]; + plastic_words[plastic_synapse - 1] = 0; + + // Delete control word at offset + control_words[offset] = control_words[plastic_synapse - 1]; + control_words[plastic_synapse - 1] = 0; + + // Decrement FP + fixed_region[1]--; + + return true; +} + +//! ensuring the weight is of the correct type and size +static inline plastic_synapse_t weight_conversion(uint32_t weight) { + return (plastic_synapse_t) (0xFFFF & weight); +} + +//! packing all of the information into the required plastic control word +static inline control_t control_conversion( + uint32_t id, uint32_t delay, uint32_t type) { + control_t new_control = + (delay & ((1 << SYNAPSE_DELAY_BITS) - 1)) << synapse_type_index_bits; + new_control |= (type & ((1 << synapse_type_index_bits) - 1)) << synapse_index_bits; + new_control |= id & ((1 << synapse_index_bits) - 1); + return new_control; +} + +//! \brief Add a plastic entry in the synaptic row +//! \param[in] id: the (core-local) ID of the post-synaptic neuron to be added +//! \param[in] row: the core-local address of the synaptic row +//! \param[in] weight: the initial weight associated with the connection +//! \param[in] delay: the delay associated with the connection +//! \param[in] type: the type of the connection (e.g. inhibitory) +//! \return bool: was the addition successful? +bool add_plastic_neuron_with_id(uint32_t id, address_t row, + uint32_t weight, uint32_t delay, uint32_t type) { + plastic_synapse_t new_weight = weight_conversion(weight); + control_t new_control = control_conversion(id, delay, type); + + address_t fixed_region = synapse_row_fixed_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(synapse_row_plastic_region(row)); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + + // Add weight at offset + plastic_words[plastic_synapse] = new_weight; + + // Add control word at offset + control_words[plastic_synapse] = new_control; + + // Increment FP + fixed_region[1]++; + return true; +} +#endif From 2b38adca6680ca3526d45b1f242ef8729d23195e Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 10 Mar 2020 10:44:55 +0000 Subject: [PATCH 032/123] Updated sinusoid learning and print management --- .../neuron_impl_sinusoid_readout.h | 82 +++++++++++-------- .../neuron_model_sinusoid_readout_impl.c | 37 +++++---- .../synapse_dynamics_sinusoid_readout_impl.c | 39 +++++---- .../src/neuron/spike_processing.c | 6 +- .../synapse_type_eprop_adaptive.h | 3 + neural_modelling/src/neuron/synapses.c | 5 +- 6 files changed, 102 insertions(+), 70 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index e02dae987f2..43f3a2a2fa7 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -247,15 +247,15 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, inh_value, input_type, NUM_INHIBITORY_RECEPTORS); // Sum g_syn contributions from all receptors for recording - REAL total_exc = 0; - REAL total_inh = 0; - - for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ - total_exc += exc_input_values[i]; - } - for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ - total_inh += inh_input_values[i]; - } +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ +// total_exc += exc_input_values[i]; +// } +// for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ +// total_inh += inh_input_values[i]; +// } // Call functions to get the input values to be recorded // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; @@ -270,36 +270,48 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); -// if (neuron_index == 0){ recorded_variable_values[V_RECORDING_INDEX] = voltage; - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, 0.0k); - - // Calculate error - REAL error = result - global_parameters->target_V[target_ind]; - learning_signal = error; - - // Record readout - recorded_variable_values[V_RECORDING_INDEX] = -// result; - neuron->syn_state[0].z_bar; + if (neuron_index == 0){ + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, 0.0k); + + // Calculate error + REAL error = result - global_parameters->target_V[target_ind]; + learning_signal = error; + // Record Error + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + error; +// neuron->syn_state[3].delta_w; + + // Record readout + recorded_variable_values[V_RECORDING_INDEX] = + result; + // neuron->syn_state[0].z_bar; + + // Send error (learning signal) as packet with payload + // ToDo can't I just alter the global variable here? + while (!spin1_send_mc_packet( + key | neuron_index, bitsk(error), 1 )) { + spin1_delay_us(1); + } + } + else{ + // Record 'Error' + recorded_variable_values[V_RECORDING_INDEX] = +// neuron->syn_state[0].z_bar; + global_parameters->target_V[target_ind]; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + - global_parameters->target_V[target_ind]; + } // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; - neuron->syn_state[0].delta_w; - // Record Error - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - error; - - // Send error (learning signal) as packet with payload - // ToDo can't I just alter the global variable here? - while (!spin1_send_mc_packet( - key | neuron_index, bitsk(error), 1 )) { - spin1_delay_us(1); - } +// neuron->syn_state[0].delta_w; + exc_input_values[0]; + // If spike occurs, communicate to relevant parts of model if (spike) { diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index 24980655ba1..5e62b30c26d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -2,6 +2,7 @@ #include +extern uint32_t time; extern REAL learning_signal; REAL local_eta; @@ -49,7 +50,7 @@ state_t neuron_model_state_update( // } // Get the input in nA input_t input_this_timestep = - total_exc - total_inh + external_bias + neuron->I_offset; + exc_input[0] + exc_input[1] + neuron->I_offset; _lif_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep); @@ -59,7 +60,7 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - uint32_t total_synapses_per_neuron = 1; //todo should this be fixed + uint32_t total_synapses_per_neuron = 2; //todo should this be fixed? neuron->L = learning_signal * neuron->w_fb; @@ -71,20 +72,6 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - // reset input (can't have more than one spike per timestep - if (!syn_ind){// || neuron->syn_state[syn_ind].z_bar_inp){ - io_printf(IO_BUF, "total synapses = %u - syn_ind = %u\n" - "z_bar_inp = %k - z_bar = %k\n" - "L = %k = l * w_fb = %k * %k\n" - , - total_synapses_per_neuron, - syn_ind, - neuron->syn_state[syn_ind].z_bar_inp, - neuron->syn_state[syn_ind].z_bar, - neuron->L, learning_signal, neuron -> w_fb - ); - } - neuron->syn_state[syn_ind].z_bar_inp = 0; // ****************************************************************** @@ -112,7 +99,25 @@ state_t neuron_model_state_update( REAL this_dt_weight_change = // -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; -local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; +// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +// "L = %k = %k * %k = l * w_fb\n" +// "this dw = %k \t tot dw %k\n" +// , +// total_synapses_per_neuron, +// syn_ind, +// neuron->syn_state[syn_ind].z_bar_inp, +// neuron->syn_state[syn_ind].z_bar, +// time, +// neuron->L, learning_signal, neuron -> w_fb, +// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +// ); +// } + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 4b882936532..9b486e1d0f4 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -288,21 +288,30 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? // int32_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING + int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; + if (delta_w){ + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", + delta_w, delta_w_int, (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11)); + io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", + delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); + } - if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d, 16b delta_w_int: %d, delta << 9: %d, delta << 10: %d, delta << 11: %d\n", - delta_w, delta_w_int, (int16_t)delta_w_int, (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 10), (int16_t)(delta_w_int << 11)); - } - - if (delta_w_int <= 0){ - current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 9)); - } else { - current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 9)); + if (delta_w_int < 0){ + current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); + } else { + current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + } + } + else { +// if (PRINT_PLASTICITY){ +// io_printf(IO_BUF, "delta_w: %k\n", delta_w); +// } + current_state = current_state; } - // Calculate regularisation error REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; @@ -375,13 +384,13 @@ bool synapse_dynamics_process_plastic_synapses( synapse_structure_get_update_state(*plastic_words, type); if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", - neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); +// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", +// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k, time: %u\n", neuron_ind, syn_ind_from_delay, type, current_state.initial_weight, - neuron->syn_state[syn_ind_from_delay].delta_w); + neuron->syn_state[syn_ind_from_delay].delta_w, time); } // Perform weight update: @@ -400,7 +409,7 @@ bool synapse_dynamics_process_plastic_synapses( synapse_type_index_bits); // Check for ring buffer saturation - int32_t accumulation = ring_buffers[ring_buffer_index] + + int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); // uint32_t sat_test = accumulation & 0x10000; diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 9ac9b6f6aa2..f49d5fffd37 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -168,8 +168,8 @@ static inline void setup_synaptic_dma_write(uint32_t dma_buffer_index) { static void multicast_packet_received_callback(uint key, uint payload) { use(payload); any_spike = true; - io_printf(IO_BUF, "mc packet received \n"); - log_debug("Received spike %x at %d, DMA Busy = %d", key, time, dma_busy); +// io_printf(IO_BUF, "mc packet received \n"); +// io_printf(IO_BUF, "Received spike %x at %d, DMA Busy = %d", key, time, dma_busy); // If there was space to add spike to incoming spike queue if (in_spikes_add_spike(key)) { @@ -204,7 +204,7 @@ static void dma_complete_callback(uint unused, uint tag) { log_debug("DMA transfer complete at time %u with tag %u", time, tag); - io_printf(IO_BUF, "Entering DMA Complete...\n"); +// io_printf(IO_BUF, "Entering DMA Complete...\n"); log_info("Entering DMA Complete...\n"); // Get pointer to current buffer diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h index dcb81b4e820..b7f102a4dd8 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h @@ -95,6 +95,9 @@ static inline void synapse_types_shape_input( static inline void synapse_types_add_neuron_input( index_t synapse_type_index, synapse_param_pointer_t parameter, input_t input) { +// if (input){ +// io_printf(IO_BUF, "index = %u, %d \t input = %u, %d\t%u\n", synapse_type_index, synapse_type_index, input, input, input>>3); +// } if (synapse_type_index == EXCITATORY_ONE) { parameter->input_buffer_excitatory_value += input; // = diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 1c3fdae470e..0648eabdd2c 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -320,6 +320,9 @@ void synapses_do_timestep_update(timer_t time) { // Convert ring-buffer entry to input and add on to correct // input for this synapse type and neuron +// if (ring_buffers[ring_buffer_index]){ +// io_printf(IO_BUF, "weight: %u, shift: %u\n", ring_buffers[ring_buffer_index], ring_buffer_to_input_left_shifts[synapse_type_index]); +// } neuron_add_inputs( synapse_type_index, neuron_index, synapses_convert_weight_to_input( @@ -343,7 +346,7 @@ bool synapses_process_synaptic_row( // Get address of non-plastic region from row address_t fixed_region_address = synapse_row_fixed_region(row); - io_printf(IO_BUF, "Processing Spike...\n"); +// io_printf(IO_BUF, "Processing Spike...\n"); // **TODO** multiple optimised synaptic row formats //if (plastic_tag(row) == 0) { // If this row has a plastic region From 23a926f0d1a94e683a7138e824098402993f9440 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Wed, 11 Mar 2020 15:57:34 +0000 Subject: [PATCH 033/123] delays now passed correctly, changes to prints --- .../neuron_impl_sinusoid_readout.h | 5 +++-- .../neuron_model_sinusoid_readout_impl.c | 2 +- .../synapse_dynamics_eprop_adaptive_impl.c | 4 ++-- .../synapse_dynamics_sinusoid_readout_impl.c | 19 ++++++++++++------- .../src/neuron/spike_processing.c | 2 +- .../synapse_dynamics/synapse_dynamics_stdp.py | 2 +- 6 files changed, 20 insertions(+), 14 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 43f3a2a2fa7..e1bb889363b 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -285,6 +285,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = error; // neuron->syn_state[3].delta_w; +// neuron->syn_state[0].z_bar; // Record readout recorded_variable_values[V_RECORDING_INDEX] = @@ -309,8 +310,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; -// neuron->syn_state[0].delta_w; - exc_input_values[0]; + neuron->syn_state[neuron_index].delta_w; +// exc_input_values[0]; // If spike occurs, communicate to relevant parts of model diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index 5e62b30c26d..caf05005a35 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -60,7 +60,7 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - uint32_t total_synapses_per_neuron = 2; //todo should this be fixed? + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? neuron->L = learning_signal * neuron->w_fb; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 437f0d33285..dd3552e172c 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -296,9 +296,9 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } if (delta_w_int <= 0){ - current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 9)); + current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); } else { - current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 9)); + current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 9b486e1d0f4..9be72bebca4 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -288,15 +288,19 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? // int32_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING +// int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; if (delta_w){ if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", - delta_w, delta_w_int, (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11)); - io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", - delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" +// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" + "\n", + delta_w, delta_w_int +// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) + ); +// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", +// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); } if (delta_w_int < 0){ @@ -387,8 +391,9 @@ bool synapse_dynamics_process_plastic_synapses( // io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", // neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k, time: %u\n", - neuron_ind, syn_ind_from_delay, type, + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, idx_bits: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", + neuron_ind, syn_ind_from_delay, + synapse_type_index_bits, type, current_state.initial_weight, neuron->syn_state[syn_ind_from_delay].delta_w, time); } diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index f49d5fffd37..660b9eeefaf 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -193,7 +193,7 @@ static void user_event_callback(uint unused0, uint unused1) { use(unused0); use(unused1); - io_printf(IO_BUF, "user callback triggered \n"); +// io_printf(IO_BUF, "user callback triggered \n"); setup_synaptic_dma_read(); } diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 11e6d29a05b..636801c7796 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -211,7 +211,7 @@ def get_plastic_synaptic_data( # Get the fixed data fixed_plastic = ( - ((dendritic_delays.astype("uint16") & 0xF) << + ((dendritic_delays.astype("uint16") & 0xFF) << (n_neuron_id_bits + n_synapse_type_bits)) | ((axonal_delays.astype("uint16") & 0xF) << (4 + n_neuron_id_bits + n_synapse_type_bits)) | From b325dfe0eca6fec123b02c67667edaadc89b5e19 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 13 Mar 2020 15:03:12 +0000 Subject: [PATCH 034/123] fix to allow extraction of signed weights, removed timing dependence check as no longer relevant and causing crashes --- .../neuron/synapse_dynamics/synapse_dynamics_stdp.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 636801c7796..78714de5001 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -131,8 +131,8 @@ def is_same_as(self, synapse_dynamics): if not isinstance(synapse_dynamics, SynapseDynamicsSTDP): return False return ( - self.__timing_dependence.is_same_as( - synapse_dynamics.timing_dependence) and + # self.__timing_dependence.is_same_as( + # synapse_dynamics.timing_dependence) and self.__weight_dependence.is_same_as( synapse_dynamics.weight_dependence) and (self.__dendritic_delay_fraction == @@ -313,7 +313,7 @@ def read_plastic_synaptic_data( n_half_words = synapse_structure.get_n_half_words_per_connection() half_word = synapse_structure.get_weight_half_word() pp_half_words = numpy.concatenate([ - pp[:size * n_half_words * 2].view("uint16")[ + pp[:size * n_half_words * 2].view("int16")[ half_word::n_half_words] for pp, size in zip(pp_without_headers, fp_size)]) @@ -325,7 +325,7 @@ def read_plastic_synaptic_data( (data_fixed & neuron_id_mask) + post_vertex_slice.lo_atom) connections["weight"] = pp_half_words connections["delay"] = (data_fixed >> ( - n_neuron_id_bits + n_synapse_type_bits)) & 0xF + n_neuron_id_bits + n_synapse_type_bits)) & 0xFF connections["delay"][connections["delay"] == 0] = 16 return connections From 5e9da368210b1f7f340ac1ee79af3b317012d348 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 13 Mar 2020 16:40:11 +0000 Subject: [PATCH 035/123] updates to reg firing, rest committed in case home --- .../neuron_impl_eprop_adaptive.h | 23 ++++++---- .../models/neuron_model_eprop_adaptive_impl.c | 29 ++++++++----- .../neuron_model_sinusoid_readout_impl.c | 10 ++--- .../neuron/plasticity/stdp/stdp_typedefs.h | 2 +- .../synapse_dynamics_eprop_adaptive_impl.c | 43 ++++++++++++------- .../synapse_dynamics_sinusoid_readout_impl.c | 5 +-- .../weight_dependence/weight_eprop_reg_impl.h | 30 ++++++------- .../models/neuron/builds/eprop_adaptive.py | 2 + 8 files changed, 84 insertions(+), 60 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index fa0045cd657..0499d6533eb 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -292,15 +292,20 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// B_t; // neuron->B; -// neuron->syn_state[0].z_bar; -// global_parameters->core_target_rate; -// neuron->syn_state[0].e_bar; - neuron->syn_state[0].el_a; -// exc_input_values[0]; // record input input (signed) -// learning_signal * neuron->w_fb; - + if (neuron_index == 1){ + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate; + } + else{ + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + // B_t; // neuron->B; + neuron->L; + // neuron->syn_state[0].z_bar; + // global_parameters->core_target_rate; + // neuron->syn_state[0].e_bar; + // neuron->syn_state[neuron_index].el_a; + // exc_input_values[0]; // record input input (signed) + // learning_signal * neuron->w_fb; + } // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index f021106ecc1..c892c149191 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -89,7 +89,7 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 1; //todo should this be fixed + uint32_t total_synapses_per_neuron = 10; //todo should this be fixed // neuron->psi = neuron->psi << 10; @@ -108,16 +108,6 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - // reset input (can't have more than one spike per timestep - if (!syn_ind){ - io_printf(IO_BUF, "total synapses = %u\n" - "z_bar_inp = %k - z_bar = %k\n" - "L = %k = l * w_fb = %k * %k\n", - total_synapses_per_neuron, - neuron->syn_state[syn_ind].z_bar_inp, neuron->syn_state[syn_ind].z_bar, - neuron->L, learning_signal, neuron -> w_fb); - } - neuron->syn_state[syn_ind].z_bar_inp = 0; // ****************************************************************** @@ -146,6 +136,23 @@ state_t neuron_model_state_update( -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; +// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +// "L = %k = %k * %k = l * w_fb\n" +// "this dw = %k \t tot dw %k\n" +// , +// total_synapses_per_neuron, +// syn_ind, +// neuron->syn_state[syn_ind].z_bar_inp, +// neuron->syn_state[syn_ind].z_bar, +// time, +// neuron->L, learning_signal, neuron -> w_fb, +// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +// ); +// } + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; // io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", // local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index caf05005a35..e5ed361eb85 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -37,11 +37,11 @@ state_t neuron_model_state_update( // If outside of the refractory period if (neuron->refract_timer <= 0) { - REAL total_exc = 0; - REAL total_inh = 0; - - total_exc += exc_input[0]; - total_inh += inh_input[0]; +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// total_exc += exc_input[0]; +// total_inh += inh_input[0]; // for (int i=0; i < num_excitatory_inputs; i++){ // total_exc += exc_input[i]; // } diff --git a/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h b/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h index 58c87a4aff3..eb0e9ad6de3 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h +++ b/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h @@ -29,6 +29,6 @@ #define STDP_FIXED_MUL_16X16(a, b) maths_fixed_mul16(a, b, STDP_FIXED_POINT) -#define PRINT_PLASTICITY 1 +#define PRINT_PLASTICITY 0 #endif // _STDP_TYPEDEFS_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index dd3552e172c..4df50266e57 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -290,20 +290,35 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // int16_t delta_w_int = (int) delta_w; // >> 15; - if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d, 16b delta_w_int: %d, delta << 9: %d, delta << 12: %d, delta << 16: %d\n", - delta_w, delta_w_int, (int16_t)delta_w_int, (int16_t)delta_w_int << 9, (int16_t)delta_w_int << 12, (int16_t)delta_w_int << 16); - } + if (delta_w){ + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" +// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" + "\n", + delta_w, delta_w_int +// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) + ); +// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", +// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); + } - if (delta_w_int <= 0){ - current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); - } else { - current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + if (delta_w_int < 0){ + current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); + } else { + current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + } + } + else { +// if (PRINT_PLASTICITY){ +// io_printf(IO_BUF, "delta_w: %k\n", delta_w); +// } + current_state = current_state; } // Calculate regularisation error REAL reg_error = global_parameters->core_target_rate - global_parameters->core_pop_rate; + io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); // Return final synaptic word and weight @@ -329,8 +344,6 @@ bool synapse_dynamics_process_plastic_synapses( // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? - - // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { // Get next control word (auto incrementing) @@ -374,13 +387,13 @@ bool synapse_dynamics_process_plastic_synapses( synapse_structure_get_update_state(*plastic_words, type); if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", - neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); +// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", +// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u init w (plas): %d, summed_dw: %k\n", + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", neuron_ind, syn_ind_from_delay, type, current_state.initial_weight, - neuron->syn_state[syn_ind_from_delay].delta_w); + neuron->syn_state[syn_ind_from_delay].delta_w, time); } // Perform weight update: @@ -399,7 +412,7 @@ bool synapse_dynamics_process_plastic_synapses( synapse_type_index_bits); // Check for ring buffer saturation - int32_t accumulation = ring_buffers[ring_buffer_index] + + int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); // uint32_t sat_test = accumulation & 0x10000; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 9be72bebca4..a0cfa729334 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -391,9 +391,8 @@ bool synapse_dynamics_process_plastic_synapses( // io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", // neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, idx_bits: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", - neuron_ind, syn_ind_from_delay, - synapse_type_index_bits, type, + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", + neuron_ind, syn_ind_from_delay, type, current_state.initial_weight, neuron->syn_state[syn_ind_from_delay].delta_w, time); } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 4efbc91a520..3aa7d721d6b 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -100,35 +100,33 @@ static inline weight_t weight_get_final(weight_state_t new_state, // Apply eprop plasticity updates to initial weight int32_t new_weight = new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; + int32_t reg_weight = new_weight; + int32_t reg_change = 0; // Calculate regularisation - if (new_state.weight_region->reg_rate > 0.0k) { // if reg rate is zero, regularisation is turned off - if (reg_error > 0.1k) { - // increase weight (core rate is below target) - new_weight = new_weight - + (new_weight * new_state.weight_region->reg_rate * reg_error); - - } else if (reg_error < -0.1k){ - // reduce weight (core rate is above target) - new_weight = new_weight - - (new_weight * new_state.weight_region->reg_rate * reg_error); + if (new_state.weight_region->reg_rate > 0.0k){ // if reg rate is zero, regularisation is turned off + reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; + if (new_weight > 0){ + reg_weight = new_weight + reg_change; + } else if (new_weight < 0){ + reg_weight = new_weight - reg_change; } } - + io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, /8:%d", reg_weight, reg_change, reg_change/8); // Clamp new weight to bounds - new_weight = MIN(new_state.weight_region->max_weight, - MAX(new_weight, new_state.weight_region->min_weight)); + reg_weight = MIN(new_state.weight_region->max_weight, + MAX(reg_weight, new_state.weight_region->min_weight)); if (PRINT_PLASTICITY){ io_printf(IO_BUF, "\told_weight:%d, a2+:%d, a2-:%d, " // "scaled a2+:%d, scaled a2-:%d," - " new_weight:%d\n", + " new_weight:%d, reg_weight:%d, reg_l_rate:%k, reg_error:%k\n", new_state.initial_weight, new_state.a2_plus, new_state.a2_minus, // scaled_a2_plus, scaled_a2_minus, - new_weight); + new_weight, reg_weight, new_state.weight_region->reg_rate, reg_error); } - return (weight_t) new_weight; + return (weight_t) reg_weight; } #endif // _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index df6bd80cff4..12ef882d44d 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -79,3 +79,5 @@ def __init__( model_name="eprop_adaptive", binary="eprop_adaptive.aplx", neuron_model=neuron_model, input_type=input_type, synapse_type=synapse_type, threshold_type=threshold_type) + + From fdc7cdfbbd0b381f1bc9f2159e6edfed76d02d54 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 17 Mar 2020 10:00:59 +0000 Subject: [PATCH 036/123] Fixed neurons per core, updated regularisation --- .../neuron/implementations/neuron_impl_eprop_adaptive.h | 5 ++++- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 2 +- .../stdp/synapse_dynamics_eprop_adaptive_impl.c | 9 ++++++--- .../stdp/weight_dependence/weight_eprop_reg_impl.h | 6 ++++-- spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py | 4 ++++ 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 0499d6533eb..506ce3e07f5 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -51,6 +51,7 @@ extern REAL learning_signal; +uint32_t neurons_in_pop = 1; //! Array of neuron states @@ -154,6 +155,8 @@ static void neuron_impl_load_neuron_parameters( log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); + neurons_in_pop = n_neurons; + if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); spin1_memcpy(global_parameters, &address[next], @@ -293,7 +296,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B if (neuron_index == 1){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; } else{ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index c892c149191..ab47a27a666 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -89,7 +89,7 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 10; //todo should this be fixed + uint32_t total_synapses_per_neuron = 20; //todo should this be fixed // neuron->psi = neuron->psi << 10; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 4df50266e57..74221713576 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -48,6 +48,8 @@ static uint32_t synapse_type_mask; uint32_t num_plastic_pre_synaptic_events = 0; uint32_t plastic_saturation_count = 0; +uint32_t neurons_in_partition = 1; + //--------------------------------------- // Macros //--------------------------------------- @@ -237,6 +239,8 @@ address_t synapse_dynamics_initialise( return NULL; } + neurons_in_partition = n_neurons; + // Load weight dependence data address_t weight_result = weight_initialise( weight_region_address, n_synapse_types, @@ -315,10 +319,9 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state current_state = current_state; } - // Calculate regularisation error - REAL reg_error = global_parameters->core_target_rate - global_parameters->core_pop_rate; - io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); + REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / neurons_in_partition; +// io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); // Return final synaptic word and weight diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 3aa7d721d6b..2d71b3c1890 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -104,7 +104,7 @@ static inline weight_t weight_get_final(weight_state_t new_state, int32_t reg_change = 0; // Calculate regularisation - if (new_state.weight_region->reg_rate > 0.0k){ // if reg rate is zero, regularisation is turned off + if (new_state.weight_region->reg_rate > 0.0k){// && (reg_error > 0.1k || reg_error < -0.1k)){ // if reg rate is zero or error small, regularisation is turned off reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; if (new_weight > 0){ reg_weight = new_weight + reg_change; @@ -112,7 +112,9 @@ static inline weight_t weight_get_final(weight_state_t new_state, reg_weight = new_weight - reg_change; } } - io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, /8:%d", reg_weight, reg_change, reg_change/8); + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, /8:%d", reg_weight, reg_change, reg_change/8); + } // Clamp new weight to bounds reg_weight = MIN(new_state.weight_region->max_weight, MAX(reg_weight, new_state.weight_region->min_weight)); diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 12ef882d44d..e3c79e79a27 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -80,4 +80,8 @@ def __init__( neuron_model=neuron_model, input_type=input_type, synapse_type=synapse_type, threshold_type=threshold_type) + @classmethod + def get_max_atoms_per_core(cls): + return 8 + From 6a8295eb6c9d8162341cec5ff51ba93c96b7d89f Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 17 Mar 2020 14:19:58 +0000 Subject: [PATCH 037/123] removed an annoying print --- neural_modelling/src/neuron/spike_processing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 660b9eeefaf..136b26c6351 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -205,7 +205,7 @@ static void dma_complete_callback(uint unused, uint tag) { log_debug("DMA transfer complete at time %u with tag %u", time, tag); // io_printf(IO_BUF, "Entering DMA Complete...\n"); - log_info("Entering DMA Complete...\n"); +// log_info("Entering DMA Complete...\n"); // Get pointer to current buffer uint32_t current_buffer_index = buffer_being_read; From c57e81a477af2866849020617a0ed992a7d8384d Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 19 Mar 2020 16:06:13 +0000 Subject: [PATCH 038/123] more synapses per eprop neuron, regularised to within a boundary --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 2 +- .../plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index ab47a27a666..0475498c870 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -89,7 +89,7 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 20; //todo should this be fixed + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed // neuron->psi = neuron->psi << 10; diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 2d71b3c1890..ea12d3931d2 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -104,7 +104,7 @@ static inline weight_t weight_get_final(weight_state_t new_state, int32_t reg_change = 0; // Calculate regularisation - if (new_state.weight_region->reg_rate > 0.0k){// && (reg_error > 0.1k || reg_error < -0.1k)){ // if reg rate is zero or error small, regularisation is turned off + if (new_state.weight_region->reg_rate > 0.0k && (reg_error > 1.k || reg_error < -1.k)){ // if reg rate is zero or error small, regularisation is turned off reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; if (new_weight > 0){ reg_weight = new_weight + reg_change; From 1f60cd71f2766d66ee8cf439f7b251db414c2c30 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 19 Mar 2020 16:20:46 +0000 Subject: [PATCH 039/123] reduced error margin --- .../plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index ea12d3931d2..67e2ea6eb74 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -104,7 +104,7 @@ static inline weight_t weight_get_final(weight_state_t new_state, int32_t reg_change = 0; // Calculate regularisation - if (new_state.weight_region->reg_rate > 0.0k && (reg_error > 1.k || reg_error < -1.k)){ // if reg rate is zero or error small, regularisation is turned off + if (new_state.weight_region->reg_rate > 0.0k && (reg_error > 0.2k || reg_error < -0.2k)){ // if reg rate is zero or error small, regularisation is turned off reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; if (new_weight > 0){ reg_weight = new_weight + reg_change; From 1b4bc1fb3a08ffedfdc69f81ad3c8b29180de7c6 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Mon, 23 Mar 2020 19:41:25 +0000 Subject: [PATCH 040/123] Add comments to n_neurons parameter in neuron implementation --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 506ce3e07f5..264e2d18751 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -51,7 +51,7 @@ extern REAL learning_signal; -uint32_t neurons_in_pop = 1; +uint32_t neurons_in_pop; //! Array of neuron states @@ -155,7 +155,7 @@ static void neuron_impl_load_neuron_parameters( log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); - neurons_in_pop = n_neurons; + neurons_in_pop = n_neurons; // get number of neurons running on this core for use during execution if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); @@ -296,7 +296,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B if (neuron_index == 1){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate } else{ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = From e37842c926a8faac98391e1dbd0eb359a5d89324 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Mon, 23 Mar 2020 19:50:04 +0000 Subject: [PATCH 041/123] Be careful setting the size of the e-prop synapse array --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 0475498c870..1e0b2e2dd4a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -89,7 +89,7 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 100; //todo should this be fixed + uint32_t total_synapses_per_neuron = 100; // This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. This should make it possible to do a pause and resume cycle and have reliable unloading of data. // neuron->psi = neuron->psi << 10; From 84237f18ba3aabbdb6ff49a9d55d06051c8002d0 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Mon, 23 Mar 2020 19:55:35 +0000 Subject: [PATCH 042/123] warning about having same parameter but different name, also this needs swapping to an inverse multiply for performance --- .../plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 74221713576..1a5a9879654 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -48,7 +48,7 @@ static uint32_t synapse_type_mask; uint32_t num_plastic_pre_synaptic_events = 0; uint32_t plastic_saturation_count = 0; -uint32_t neurons_in_partition = 1; +uint32_t syn_dynamics_neurons_in_partition; //--------------------------------------- // Macros @@ -239,7 +239,7 @@ address_t synapse_dynamics_initialise( return NULL; } - neurons_in_partition = n_neurons; + syn_dynamics_neurons_in_partition = n_neurons; // Load weight dependence data address_t weight_result = weight_initialise( @@ -320,7 +320,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } // Calculate regularisation error - REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / neurons_in_partition; + REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); From aaf2e561d741234de865d38a243522f9a66b8c81 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Apr 2020 11:43:47 +0100 Subject: [PATCH 043/123] Fix pause-and-resume for eprop neuron model --- .../neuron_impl_eprop_adaptive.h | 27 ++++++-- .../models/neuron_model_eprop_adaptive_impl.c | 18 ++++- .../neuron_model_eprop_adaptive.py | 69 ++++++++++++++----- 3 files changed, 88 insertions(+), 26 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 264e2d18751..6ebc7629b72 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -72,6 +72,9 @@ global_neuron_params_pointer_t global_parameters; // The synapse shaping parameters static synapse_param_t *neuron_synapse_shaping_params; +// Bool to regularise on the first run +static bool initial_regularise = true; + static bool neuron_impl_initialise(uint32_t n_neurons) { // allocate DTCM for the global parameter details if (sizeof(global_neuron_params_t)) { @@ -204,14 +207,19 @@ static void neuron_impl_load_neuron_parameters( // ********************************************** // ******** for eprop regularisation ************ // ********************************************** - global_parameters->core_target_rate = global_parameters->core_target_rate - * n_neurons; // scales target rate depending on number of neurons - global_parameters->core_pop_rate = global_parameters->core_pop_rate - * n_neurons; // scale initial value, too + if (initial_regularise) { + global_parameters->core_target_rate = global_parameters->core_target_rate + * n_neurons; // scales target rate depending on number of neurons + global_parameters->core_pop_rate = global_parameters->core_pop_rate + * n_neurons; // scale initial value, too + initial_regularise = false; + } for (index_t n = 0; n < n_neurons; n++) { neuron_model_print_parameters(&neuron_array[n]); + log_debug("Neuron id %u", n); + neuron_model_print_state_variables(&neuron_array[n]); } #if LOG_LEVEL >= LOG_DEBUG @@ -403,6 +411,14 @@ static void neuron_impl_store_neuron_parameters( next += n_words_needed(n_neurons * sizeof(neuron_t)); } + log_info("****** STORING ******"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + log_debug("Neuron id %u", n); + neuron_model_print_state_variables(&neuron_array[n]); + } + log_info("****** STORING COMPLETE ******"); + if (sizeof(input_type_t)) { log_debug("writing input type parameters"); spin1_memcpy(&address[next], input_type_array, @@ -430,6 +446,9 @@ static void neuron_impl_store_neuron_parameters( n_neurons * sizeof(additional_input_t)); next += n_words_needed(n_neurons * sizeof(additional_input_t)); } + + log_info("global_parameters, core_target_rate, core_pop_rate %k %k", + global_parameters->core_target_rate, global_parameters->core_pop_rate); } #if LOG_LEVEL >= LOG_DEBUG diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 1e0b2e2dd4a..c2688d79ab4 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -89,7 +89,10 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 100; // This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. This should make it possible to do a pause and resume cycle and have reliable unloading of data. + // This parameter is OK to update, as the actual size of the array is set in the + // header file, which matches the Python code. This should make it possible to do + // a pause and resume cycle and have reliable unloading of data. + uint32_t total_synapses_per_neuron = 100; // neuron->psi = neuron->psi << 10; @@ -133,8 +136,8 @@ state_t neuron_model_state_update( // Update cached total weight change // ****************************************************************** REAL this_dt_weight_change = - -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; + local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ // io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " @@ -177,6 +180,15 @@ state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { log_debug("V membrane = %11.4k mv", neuron->V_membrane); + log_debug("learning = %k ", neuron->L); + + log_debug("Printing synapse state values:"); + for (uint32_t syn_ind=0; syn_ind < 100; syn_ind++){ + log_debug("synapse number %u delta_w, z_bar, z_bar_inp, e_bar, el_a %11.4k %11.4k %11.4k %11.4k %11.4k", + syn_ind, neuron->syn_state[syn_ind].delta_w, + neuron->syn_state[syn_ind].z_bar, neuron->syn_state[syn_ind].z_bar_inp, + neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].el_a); + } } void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 402cbab4d7f..3d72596277f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -47,6 +47,12 @@ L = "learning_signal" W_FB = "feedback_weight" +DELTA_W = "delta_w" +Z_BAR_OLD = "z_bar_old" +Z_BAR = "z_bar" +EP_A = "ep_a" +E_BAR = "e_bar" + UNITS = { V: 'mV', V_REST: 'mV', @@ -80,7 +86,7 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__z", "__a", "__psi", - + # threshold params "__B", "__small_b", @@ -89,11 +95,11 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__beta", # "_adpt" "__scalar", - + # reg params "__target_rate", "__tau_err", - + # learning signal "__l", "__w_fb", @@ -110,14 +116,14 @@ def __init__( v_reset, tau_refrac, psi, - + # threshold params B, small_b, small_b_0, tau_a, beta, - + # regularisation params target_rate, tau_err, @@ -199,7 +205,7 @@ def __init__( # learning signal self.__l = l self.__w_fb = w_fb - + self.__eta = eta @@ -231,11 +237,18 @@ def add_state_variables(self, state_variables): state_variables[PSI] = self.__psi state_variables[Z] = 0 # initalise to zero state_variables[A] = 0 # initialise to zero - + state_variables[BIG_B] = self.__B state_variables[SMALL_B] = self.__small_b - - state_variables[L] = self.__l + + state_variables[L] = self.__l + + for n in range(SYNAPSES_PER_NEURON): + state_variables[DELTA_W+str(n)] = 0 + state_variables[Z_BAR_OLD+str(n)] = 0 + state_variables[Z_BAR+str(n)] = 0 + state_variables[EP_A+str(n)] = 0 + state_variables[E_BAR+str(n)] = 0 @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -283,13 +296,14 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): ] # create synaptic state - init all state to zero - eprop_syn_init = [0, - 0, - 0, - 0, - 0] - # extend to appropriate fan-in - values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + for n in range(SYNAPSES_PER_NEURON): + eprop_syn_init = [state_variables[DELTA_W+str(n)], + state_variables[Z_BAR_OLD+str(n)], + state_variables[Z_BAR+str(n)], + state_variables[EP_A+str(n)], + state_variables[E_BAR+str(n)]] + # extend to appropriate fan-in + values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) return values @@ -314,11 +328,21 @@ def get_global_values(self, ts): @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): + delta_w = [0] * SYNAPSES_PER_NEURON + z_bar_old = [0] * SYNAPSES_PER_NEURON + z_bar = [0] * SYNAPSES_PER_NEURON + ep_a = [0] * SYNAPSES_PER_NEURON + e_bar = [0] * SYNAPSES_PER_NEURON # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb) = values # Not sure this will work with the new array of synapse!!! + l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar) = values + + # Not sure this will work with the new array of synapse!!! + # (Note that this function is only called if you do e.g. run(), set(), + # run() i.e. it's not used by auto-pause and resume, so this is + # untested) # todo check alignment on this # Copy the changed data only @@ -331,6 +355,13 @@ def update_values(self, values, parameters, state_variables): state_variables[L] = l + for n in range(SYNAPSES_PER_NEURON): + state_variables[DELTA_W+str(n)] = delta_w[n] + state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] + state_variables[Z_BAR+str(n)] = z_bar[n] + state_variables[EP_A+str(n)] = ep_a[n] + state_variables[E_BAR+str(n)] = e_bar[n] + @property def v_init(self): @@ -427,11 +458,11 @@ def beta(self): @beta.setter def beta(self, new_value): self.__beta = new_value - + @property def w_fb(self): return self.__w_fb - + @w_fb.setter def w_fb(self, new_value): self.__w_fb = new_value From c46b527e16014dcd015019ab304ac6ff8dc51641 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 7 Apr 2020 14:34:09 +0100 Subject: [PATCH 044/123] change log_info's to log_debug --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 6ebc7629b72..9c3e39d9c26 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -411,13 +411,13 @@ static void neuron_impl_store_neuron_parameters( next += n_words_needed(n_neurons * sizeof(neuron_t)); } - log_info("****** STORING ******"); + log_debug("****** STORING ******"); for (index_t n = 0; n < n_neurons; n++) { neuron_model_print_parameters(&neuron_array[n]); log_debug("Neuron id %u", n); neuron_model_print_state_variables(&neuron_array[n]); } - log_info("****** STORING COMPLETE ******"); + log_debug("****** STORING COMPLETE ******"); if (sizeof(input_type_t)) { log_debug("writing input type parameters"); @@ -447,7 +447,7 @@ static void neuron_impl_store_neuron_parameters( next += n_words_needed(n_neurons * sizeof(additional_input_t)); } - log_info("global_parameters, core_target_rate, core_pop_rate %k %k", + log_debug("global_parameters, core_target_rate, core_pop_rate %k %k", global_parameters->core_target_rate, global_parameters->core_pop_rate); } From 74b38e798d1f796e230fa77a3b135c076eef5df7 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Wed, 8 Apr 2020 17:32:45 +0100 Subject: [PATCH 045/123] Add comment for compiler fix: += => -= --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index c2688d79ab4..063e4b1050e 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -137,7 +137,7 @@ state_t neuron_model_state_update( // ****************************************************************** REAL this_dt_weight_change = local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) // if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ // io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " From 29aa11b5bdcc18bd602dfe1bc77077785e77f626 Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Wed, 8 Apr 2020 17:38:01 +0100 Subject: [PATCH 046/123] updated total_synapses_per_neuron comment --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 063e4b1050e..d2735edbd8d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -90,8 +90,9 @@ state_t neuron_model_state_update( (1.0k - psi_temp2) : 0.0k; // This parameter is OK to update, as the actual size of the array is set in the - // header file, which matches the Python code. This should make it possible to do - // a pause and resume cycle and have reliable unloading of data. + // header file, which matches the Python code and aligns memory alocations. + // The value here can be reduced to limit the number of synapse state updates + // required by the neuron uint32_t total_synapses_per_neuron = 100; From 4caf284ae2e29909de214901952cd21ee6f13f90 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 24 Apr 2020 10:16:03 +0100 Subject: [PATCH 047/123] Small changes to regularisation, merged with new branch, left right task not included --- .../neuron_impl_eprop_adaptive.h | 33 +++++-- .../models/neuron_model_eprop_adaptive_impl.c | 85 ++++++++++++++++++- .../neuron_model_sinusoid_readout_impl.c | 8 +- .../synapse_dynamics_eprop_adaptive_impl.c | 12 +-- .../weight_dependence/weight_eprop_reg_impl.h | 20 +++-- .../neuron_model_eprop_adaptive.py | 69 ++++++++++----- .../neuron_model_sinusoid_readout.py | 2 +- 7 files changed, 179 insertions(+), 50 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 506ce3e07f5..9c3e39d9c26 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -51,7 +51,7 @@ extern REAL learning_signal; -uint32_t neurons_in_pop = 1; +uint32_t neurons_in_pop; //! Array of neuron states @@ -72,6 +72,9 @@ global_neuron_params_pointer_t global_parameters; // The synapse shaping parameters static synapse_param_t *neuron_synapse_shaping_params; +// Bool to regularise on the first run +static bool initial_regularise = true; + static bool neuron_impl_initialise(uint32_t n_neurons) { // allocate DTCM for the global parameter details if (sizeof(global_neuron_params_t)) { @@ -155,7 +158,7 @@ static void neuron_impl_load_neuron_parameters( log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); - neurons_in_pop = n_neurons; + neurons_in_pop = n_neurons; // get number of neurons running on this core for use during execution if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); @@ -204,14 +207,19 @@ static void neuron_impl_load_neuron_parameters( // ********************************************** // ******** for eprop regularisation ************ // ********************************************** - global_parameters->core_target_rate = global_parameters->core_target_rate - * n_neurons; // scales target rate depending on number of neurons - global_parameters->core_pop_rate = global_parameters->core_pop_rate - * n_neurons; // scale initial value, too + if (initial_regularise) { + global_parameters->core_target_rate = global_parameters->core_target_rate + * n_neurons; // scales target rate depending on number of neurons + global_parameters->core_pop_rate = global_parameters->core_pop_rate + * n_neurons; // scale initial value, too + initial_regularise = false; + } for (index_t n = 0; n < n_neurons; n++) { neuron_model_print_parameters(&neuron_array[n]); + log_debug("Neuron id %u", n); + neuron_model_print_state_variables(&neuron_array[n]); } #if LOG_LEVEL >= LOG_DEBUG @@ -296,7 +304,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B if (neuron_index == 1){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate } else{ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = @@ -403,6 +411,14 @@ static void neuron_impl_store_neuron_parameters( next += n_words_needed(n_neurons * sizeof(neuron_t)); } + log_debug("****** STORING ******"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + log_debug("Neuron id %u", n); + neuron_model_print_state_variables(&neuron_array[n]); + } + log_debug("****** STORING COMPLETE ******"); + if (sizeof(input_type_t)) { log_debug("writing input type parameters"); spin1_memcpy(&address[next], input_type_array, @@ -430,6 +446,9 @@ static void neuron_impl_store_neuron_parameters( n_neurons * sizeof(additional_input_t)); next += n_words_needed(n_neurons * sizeof(additional_input_t)); } + + log_debug("global_parameters, core_target_rate, core_pop_rate %k %k", + global_parameters->core_target_rate, global_parameters->core_pop_rate); } #if LOG_LEVEL >= LOG_DEBUG diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 0475498c870..aad791bf42e 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -40,6 +40,9 @@ void neuron_model_set_global_neuron_params( local_eta = params->eta; io_printf(IO_BUF, "local eta = %k\n", local_eta); + io_printf(IO_BUF, "core_pop_rate = %k\n", params->core_pop_rate); + io_printf(IO_BUF, "core_target_rate = %k\n", params->core_target_rate); + io_printf(IO_BUF, "rate_exp_TC = %k\n\n", params->rate_exp_TC); // Does Nothing - no params } @@ -89,7 +92,10 @@ state_t neuron_model_state_update( // 0.3k * (1.0k - psi_temp2) : 0.0k; - uint32_t total_synapses_per_neuron = 100; //todo should this be fixed +// This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. This should make it possible to do a pause and resume cycle and have reliable unloading of data. + uint32_t total_input_synapses_per_neuron = 200; //todo should this be fixed + uint32_t total_recurrent_synapses_per_neuron = 0; //todo should this be fixed + uint32_t recurrent_offset = 100; // neuron->psi = neuron->psi << 10; @@ -101,13 +107,15 @@ state_t neuron_model_state_update( // All operations now need doing once per eprop synapse - for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ + for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + (1 - neuron->exp_TC) * +// + + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** @@ -154,6 +162,66 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; +// io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", +// local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); + + } + + + // All operations now need doing once per eprop synapse + for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ + // ****************************************************************** + // Low-pass filter incoming spike train + // ****************************************************************** + neuron->syn_state[syn_ind].z_bar = + neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** + neuron->syn_state[syn_ind].el_a = + (neuron->psi * neuron->syn_state[syn_ind].z_bar) + + (rho - neuron->psi * neuron->beta) * + neuron->syn_state[syn_ind].el_a; + + + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** + REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - + neuron->beta * neuron->syn_state[syn_ind].el_a); + + neuron->syn_state[syn_ind].e_bar = + neuron->exp_TC * neuron->syn_state[syn_ind].e_bar + + (1 - neuron->exp_TC) * temp_elig_trace; + + // ****************************************************************** + // Update cached total weight change + // ****************************************************************** + REAL this_dt_weight_change = + local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) + +// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +// "L = %k = %k * %k = l * w_fb\n" +// "this dw = %k \t tot dw %k\n" +// , +// total_synapses_per_neuron, +// syn_ind, +// neuron->syn_state[syn_ind].z_bar_inp, +// neuron->syn_state[syn_ind].z_bar, +// time, +// neuron->L, learning_signal, neuron -> w_fb, +// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +// ); +// } + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; + // io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", // local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); @@ -177,6 +245,15 @@ state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { log_debug("V membrane = %11.4k mv", neuron->V_membrane); + log_debug("learning = %k ", neuron->L); + + log_debug("Printing synapse state values:"); + for (uint32_t syn_ind=0; syn_ind < 100; syn_ind++){ + log_debug("synapse number %u delta_w, z_bar, z_bar_inp, e_bar, el_a %11.4k %11.4k %11.4k %11.4k %11.4k", + syn_ind, neuron->syn_state[syn_ind].delta_w, + neuron->syn_state[syn_ind].z_bar, neuron->syn_state[syn_ind].z_bar_inp, + neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].el_a); + } } void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { @@ -192,5 +269,5 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); - io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); + io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); } diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index e5ed361eb85..5195f62e46c 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -60,7 +60,7 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? + uint32_t total_synapses_per_neuron = 200; //todo should this be fixed? neuron->L = learning_signal * neuron->w_fb; @@ -71,7 +71,9 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + (1 - neuron->exp_TC) * +// + + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** @@ -154,7 +156,7 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); - io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); + io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); // io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); // io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 74221713576..667161100e2 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -48,7 +48,7 @@ static uint32_t synapse_type_mask; uint32_t num_plastic_pre_synaptic_events = 0; uint32_t plastic_saturation_count = 0; -uint32_t neurons_in_partition = 1; +uint32_t syn_dynamics_neurons_in_partition; //--------------------------------------- // Macros @@ -239,7 +239,7 @@ address_t synapse_dynamics_initialise( return NULL; } - neurons_in_partition = n_neurons; + syn_dynamics_neurons_in_partition = n_neurons; // Load weight dependence data address_t weight_result = weight_initialise( @@ -287,12 +287,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Test weight change // delta_w = -0.1k; - - // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? -// int16_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING -// int16_t delta_w_int = (int) delta_w; // >> 15; - if (delta_w){ if (PRINT_PLASTICITY){ @@ -320,7 +315,8 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } // Calculate regularisation error - REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / neurons_in_partition; + REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike +// REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 67e2ea6eb74..e867308c216 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -102,18 +102,22 @@ static inline weight_t weight_get_final(weight_state_t new_state, new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; int32_t reg_weight = new_weight; int32_t reg_change = 0; + REAL reg_boundary = 1k; // Calculate regularisation - if (new_state.weight_region->reg_rate > 0.0k && (reg_error > 0.2k || reg_error < -0.2k)){ // if reg rate is zero or error small, regularisation is turned off - reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; - if (new_weight > 0){ - reg_weight = new_weight + reg_change; - } else if (new_weight < 0){ - reg_weight = new_weight - reg_change; - } + if (new_state.weight_region->reg_rate > 0.0k && (reg_error > reg_boundary || reg_error < -reg_boundary)){ // if reg rate is zero or error small, regularisation is turned off +// reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; +// if (reg_error > 0){ +// reg_error -= reg_boundary; +// } else if (reg_error < 0){ +// reg_error += reg_boundary; +// } + reg_change = new_state.weight_region->max_weight * new_state.weight_region->reg_rate * reg_error; + reg_weight = new_weight + reg_change; +// io_printf(IO_BUF, "\tw:%d + reg_shift:%d = reg_w:%d -- err:%k\n", new_weight, reg_change, reg_weight, reg_error); } if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, /8:%d", reg_weight, reg_change, reg_change/8); + io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, max:%d", reg_weight, reg_change, new_state.weight_region->max_weight); } // Clamp new weight to bounds reg_weight = MIN(new_state.weight_region->max_weight, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 402cbab4d7f..3d72596277f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -47,6 +47,12 @@ L = "learning_signal" W_FB = "feedback_weight" +DELTA_W = "delta_w" +Z_BAR_OLD = "z_bar_old" +Z_BAR = "z_bar" +EP_A = "ep_a" +E_BAR = "e_bar" + UNITS = { V: 'mV', V_REST: 'mV', @@ -80,7 +86,7 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__z", "__a", "__psi", - + # threshold params "__B", "__small_b", @@ -89,11 +95,11 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__beta", # "_adpt" "__scalar", - + # reg params "__target_rate", "__tau_err", - + # learning signal "__l", "__w_fb", @@ -110,14 +116,14 @@ def __init__( v_reset, tau_refrac, psi, - + # threshold params B, small_b, small_b_0, tau_a, beta, - + # regularisation params target_rate, tau_err, @@ -199,7 +205,7 @@ def __init__( # learning signal self.__l = l self.__w_fb = w_fb - + self.__eta = eta @@ -231,11 +237,18 @@ def add_state_variables(self, state_variables): state_variables[PSI] = self.__psi state_variables[Z] = 0 # initalise to zero state_variables[A] = 0 # initialise to zero - + state_variables[BIG_B] = self.__B state_variables[SMALL_B] = self.__small_b - - state_variables[L] = self.__l + + state_variables[L] = self.__l + + for n in range(SYNAPSES_PER_NEURON): + state_variables[DELTA_W+str(n)] = 0 + state_variables[Z_BAR_OLD+str(n)] = 0 + state_variables[Z_BAR+str(n)] = 0 + state_variables[EP_A+str(n)] = 0 + state_variables[E_BAR+str(n)] = 0 @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -283,13 +296,14 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): ] # create synaptic state - init all state to zero - eprop_syn_init = [0, - 0, - 0, - 0, - 0] - # extend to appropriate fan-in - values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + for n in range(SYNAPSES_PER_NEURON): + eprop_syn_init = [state_variables[DELTA_W+str(n)], + state_variables[Z_BAR_OLD+str(n)], + state_variables[Z_BAR+str(n)], + state_variables[EP_A+str(n)], + state_variables[E_BAR+str(n)]] + # extend to appropriate fan-in + values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) return values @@ -314,11 +328,21 @@ def get_global_values(self, ts): @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): + delta_w = [0] * SYNAPSES_PER_NEURON + z_bar_old = [0] * SYNAPSES_PER_NEURON + z_bar = [0] * SYNAPSES_PER_NEURON + ep_a = [0] * SYNAPSES_PER_NEURON + e_bar = [0] * SYNAPSES_PER_NEURON # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb) = values # Not sure this will work with the new array of synapse!!! + l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar) = values + + # Not sure this will work with the new array of synapse!!! + # (Note that this function is only called if you do e.g. run(), set(), + # run() i.e. it's not used by auto-pause and resume, so this is + # untested) # todo check alignment on this # Copy the changed data only @@ -331,6 +355,13 @@ def update_values(self, values, parameters, state_variables): state_variables[L] = l + for n in range(SYNAPSES_PER_NEURON): + state_variables[DELTA_W+str(n)] = delta_w[n] + state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] + state_variables[Z_BAR+str(n)] = z_bar[n] + state_variables[EP_A+str(n)] = ep_a[n] + state_variables[E_BAR+str(n)] = e_bar[n] + @property def v_init(self): @@ -427,11 +458,11 @@ def beta(self): @beta.setter def beta(self, new_value): self.__beta = new_value - + @property def w_fb(self): return self.__w_fb - + @w_fb.setter def w_fb(self, new_value): self.__w_fb = new_value diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index b9d5fe1db73..2124c4e2d5a 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -100,7 +100,7 @@ def __init__( super(NeuronModelLeakyIntegrateAndFireSinusoidReadout, self).__init__( - data_types= data_types, + data_types=data_types, global_data_types=global_data_types ) From 03f1ad2a058796adb4c63f54491a66ec26a11547 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 24 Apr 2020 10:17:27 +0100 Subject: [PATCH 048/123] Basic version of left right, not currently working --- neural_modelling/makefiles/neuron/Makefile | 1 + .../Makefile | 17 + .../neuron_impl_left_right_readout.h | 586 ++++++++++++++++++ .../neuron_model_left_right_readout_impl.c | 183 ++++++ .../neuron_model_left_right_readout_impl.h | 95 +++ ...synapse_dynamics_left_right_readout_impl.c | 575 +++++++++++++++++ .../pyNN/models/neuron/builds/__init__.py | 3 +- .../neuron/builds/left_right_readout.py | 53 ++ .../models/neuron/neuron_models/__init__.py | 4 +- .../neuron_model_left_right_readout.py | 378 +++++++++++ 10 files changed, 1893 insertions(+), 2 deletions(-) create mode 100644 neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile create mode 100644 neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h create mode 100644 neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c create mode 100644 neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h create mode 100644 neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c create mode 100644 spynnaker/pyNN/models/neuron/builds/left_right_readout.py create mode 100644 spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index 4076269d5b3..c771579e2e3 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -17,6 +17,7 @@ MODELS = eprop_adaptive \ eprop_adaptive_stdp_mad_eprop_reg \ sinusoid_readout \ sinusoid_readout_stdp_mad_eprop_reg \ + left_right_readout_stdp_mad_eprop_reg \ # IF_curr_exp \ IF_cond_exp \ IZK_curr_exp \ diff --git a/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile new file mode 100644 index 00000000000..6417929a30a --- /dev/null +++ b/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile @@ -0,0 +1,17 @@ +APP = $(notdir $(CURDIR)) + +OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_left_right_readout_impl.c +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_left_right_readout.h + +#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c +SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c + +TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c +TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h +WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h + +include ../neural_build.mk + + diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h new file mode 100644 index 00000000000..e8eaccfa3bc --- /dev/null +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -0,0 +1,586 @@ +#ifndef _NEURON_IMPL_LEFT_RIGHT_READOUT_H_ +#define _NEURON_IMPL_LEFT_RIGHT_READOUT_H_ + +#include "neuron_impl.h" + +// Includes for model parts used in this implementation +#include +#include +#include +#include +#include + +// Further includes +#include +#include +#include +#include +#include +#include + +#define V_RECORDING_INDEX 0 +#define GSYN_EXCITATORY_RECORDING_INDEX 1 +#define GSYN_INHIBITORY_RECORDING_INDEX 2 + +#ifndef NUM_EXCITATORY_RECEPTORS +#define NUM_EXCITATORY_RECEPTORS 1 +#error NUM_EXCITATORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +#ifndef NUM_INHIBITORY_RECEPTORS +#define NUM_INHIBITORY_RECEPTORS 1 +#error NUM_INHIBITORY_RECEPTORS was undefined. It should be defined by a synapse\ + shaping include +#endif + +//! Array of neuron states +neuron_pointer_t neuron_array; + +//! Input states array +static input_type_pointer_t input_type_array; + +//! Additional input array +static additional_input_pointer_t additional_input_array; + +//! Threshold states array +static threshold_type_pointer_t threshold_type_array; + +//! Global parameters for the neurons +static global_neuron_params_pointer_t global_parameters; + +// The synapse shaping parameters +static synapse_param_t *neuron_synapse_shaping_params; + +static REAL next_spike_time = 0; +extern uint32_t time; +extern key_t key; +extern REAL learning_signal; +static uint32_t target_ind = 0; + +// Left right parameters +typedef enum +{ + STATE_CUE, + STATE_WAITING, + STATE_PROMPT, +} current_state_t; + +current_state_t current_state = 0; +uint32_t current_time = 0; +uint32_t current_cue = 0; +uint32_t total_cues = 7; +uint32_t current_cue_direction = 2; // 0 = left, 1 = right +uint32_t accumulative_direction = 0; // if > total_cues / 2 = right +uint32_t wait_between_cues = 50; // ms +uint32_t duration_of_cue = 100; // ms +uint32_t wait_before_result = 1000; // ms but should be a range between 500-1500 +uint32_t prompt_duration = 150; //ms +uint32_t ticks_for_mean = 0; +bool completed_broadcast = true; + + +static bool neuron_impl_initialise(uint32_t n_neurons) { + + // allocate DTCM for the global parameter details + if (sizeof(global_neuron_params_t) > 0) { + global_parameters = (global_neuron_params_t *) spin1_malloc( + sizeof(global_neuron_params_t)); + if (global_parameters == NULL) { + log_error("Unable to allocate global neuron parameters" + "- Out of DTCM"); + return false; + } + } + + // Allocate DTCM for neuron array + if (sizeof(neuron_t) != 0) { + neuron_array = (neuron_t *) spin1_malloc(n_neurons * sizeof(neuron_t)); + if (neuron_array == NULL) { + log_error("Unable to allocate neuron array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for input type array and copy block of data + if (sizeof(input_type_t) != 0) { + input_type_array = (input_type_t *) spin1_malloc( + n_neurons * sizeof(input_type_t)); + if (input_type_array == NULL) { + log_error("Unable to allocate input type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for additional input array and copy block of data + if (sizeof(additional_input_t) != 0) { + additional_input_array = (additional_input_pointer_t) spin1_malloc( + n_neurons * sizeof(additional_input_t)); + if (additional_input_array == NULL) { + log_error("Unable to allocate additional input array" + " - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for threshold type array and copy block of data + if (sizeof(threshold_type_t) != 0) { + threshold_type_array = (threshold_type_t *) spin1_malloc( + n_neurons * sizeof(threshold_type_t)); + if (threshold_type_array == NULL) { + log_error("Unable to allocate threshold type array - Out of DTCM"); + return false; + } + } + + // Allocate DTCM for synapse shaping parameters + if (sizeof(synapse_param_t) != 0) { + neuron_synapse_shaping_params = (synapse_param_t *) spin1_malloc( + n_neurons * sizeof(synapse_param_t)); + if (neuron_synapse_shaping_params == NULL) { + log_error("Unable to allocate synapse parameters array" + " - Out of DTCM"); + return false; + } + } + + // Seed the random input + validate_mars_kiss64_seed(global_parameters->kiss_seed); + + // Initialise pointers to Neuron parameters in STDP code +// synapse_dynamics_set_neuron_array(neuron_array); + log_info("set pointer to neuron array in stdp code"); + + return true; +} + +static void neuron_impl_add_inputs( + index_t synapse_type_index, index_t neuron_index, + input_t weights_this_timestep) { + // simple wrapper to synapse type input function + synapse_param_pointer_t parameters = + &(neuron_synapse_shaping_params[neuron_index]); + synapse_types_add_neuron_input(synapse_type_index, + parameters, weights_this_timestep); +} + +static void neuron_impl_load_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("reading parameters, next is %u, n_neurons is %u ", + next, n_neurons); + + //log_debug("writing neuron global parameters"); + spin1_memcpy(global_parameters, &address[next], + sizeof(global_neuron_params_t)); + next += (sizeof(global_neuron_params_t) + 3) / 4; + + log_debug("reading neuron local parameters"); + spin1_memcpy(neuron_array, &address[next], n_neurons * sizeof(neuron_t)); + next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + + log_debug("reading input type parameters"); + spin1_memcpy(input_type_array, &address[next], + n_neurons * sizeof(input_type_t)); + next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + + log_debug("reading threshold type parameters"); + spin1_memcpy(threshold_type_array, &address[next], + n_neurons * sizeof(threshold_type_t)); + next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + + log_debug("reading synapse parameters"); + spin1_memcpy(neuron_synapse_shaping_params, &address[next], + n_neurons * sizeof(synapse_param_t)); + next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + + log_debug("reading additional input type parameters"); + spin1_memcpy(additional_input_array, &address[next], + n_neurons * sizeof(additional_input_t)); + next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + + neuron_model_set_global_neuron_params(global_parameters); + + io_printf(IO_BUF, "\nPrinting global params\n"); + io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); + io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); + io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); + io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); + io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); +// io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); + io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); + io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); + io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); + io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); + io_printf(IO_BUF, "poisson key: %k \n\n", global_parameters->p_key); + io_printf(IO_BUF, "poisson pop size: %k \n\n", global_parameters->p_pop_size); + + + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + +// io_printf(IO_BUF, "size of global params: %u", +// sizeof(global_neuron_params_t)); + + + + #if LOG_LEVEL >= LOG_DEBUG + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + } + log_debug("-------------------------------------\n"); + //} + #endif // LOG_LEVEL >= LOG_DEBUG +} + + + + +static bool neuron_impl_do_timestep_update(index_t neuron_index, + input_t external_bias, state_t *recorded_variable_values) { + + // Get the neuron itself + neuron_pointer_t neuron = &neuron_array[neuron_index]; + bool spike = false; + +// current_time = time & 0x3ff; // repeats on a cycle of 1024 entries in array + +// io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); +// io_printf(IO_BUF, "Target: %k\n\n", +// global_parameters->target_V[target_ind]); + + // Get the input_type parameters and voltage for this neuron + input_type_pointer_t input_type = &input_type_array[neuron_index]; + + // Get threshold and additional input parameters for this neuron + threshold_type_pointer_t threshold_type = + &threshold_type_array[neuron_index]; + additional_input_pointer_t additional_input = + &additional_input_array[neuron_index]; + synapse_param_pointer_t synapse_type = + &neuron_synapse_shaping_params[neuron_index]; + + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + + + // Get the exc and inh values from the synapses + input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); + input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); + + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_value, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_value, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ +// total_exc += exc_input_values[i]; +// } +// for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ +// total_inh += inh_input_values[i]; +// } + + // Call functions to get the input values to be recorded +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + + // Call functions to convert exc_input and inh_input to current + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); + + external_bias += additional_input_get_input_value_as_current( + additional_input, voltage); + + if (current_cue == 0 && completed_broadcast){ // reset start of new test + completed_broadcast = false; + current_time = time; + current_state = STATE_CUE; + accumulative_direction = 0; + // error params + ticks_for_mean = 0; + global_parameters->mean_0 == 0k; + global_parameters->mean_1 == 0k; + } + + if (current_state == STATE_CUE){ + if (neuron_index == 2){ // this is the error source + recorded_variable_values[V_RECORDING_INDEX] = accumulative_direction; + } + if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ + // do nothing? + } + else{ + // pick broadcast if just entered + if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ + // pick new value and broadcast + REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 + if (random_value < 0.5k){ + current_cue_direction = 0; + } + else{ + current_cue_direction = 1; + } + accumulative_direction += current_cue_direction; + REAL payload; + payload = global_parameters->rate_on; + for (int j = current_cue_direction*global_parameters->p_pop_size; + j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + } + } + // turn off and reset if finsihed + else if ((time - current_time) % (wait_between_cues + duration_of_cue) == (wait_between_cues + duration_of_cue) - 1){ + current_cue += 1; + REAL payload; + payload = global_parameters->rate_off; + for (int j = current_cue_direction*global_parameters->p_pop_size; + j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + } + if (current_cue >= total_cues){ + current_state = (current_state + 1) % 3; + } + } + } + } + else if (current_state == STATE_WAITING){ + // waiting for prompt, all things ok + if (current_cue >= total_cues){ + current_time = time; + current_cue = 0; + } + if ((time - current_time) >= wait_before_result){ + current_state = (current_state + 1) % 3; + } + } + else if (current_state == STATE_PROMPT){ + if (!ticks_for_mean){ + current_time = time; + // send packets to the variable poissons with the updated states + for (int i = 0; i < 4; i++){ + REAL payload; + payload = global_parameters->rate_on; + for (int j = 2*global_parameters->p_pop_size; + j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + } + } + } + if (neuron_index == 0){ + recorded_variable_values[V_RECORDING_INDEX] = voltage; + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, -50k); + // Finally, set global membrane potential to updated value + global_parameters->readout_V_0 = result; + + } else if (neuron_index == 1){ + recorded_variable_values[V_RECORDING_INDEX] = voltage; + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, -50k); + + // Finally, set global membrane potential to updated value + global_parameters->readout_V_1 = result; + + } else if (neuron_index == 2){ // this is the error source + + recorded_variable_values[V_RECORDING_INDEX] = accumulative_direction; + // Switched to always broadcasting error but with packet + ticks_for_mean += 1; //todo is it a running error like this over recall? + // Softmax of the exc and inh inputs representing 1 and 0 respectively + // may need to scale to stop huge numbers going in the exp + global_parameters->mean_0 += global_parameters->readout_V_0; + global_parameters->mean_1 += global_parameters->readout_V_1; + // divide -> 1/x + accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); + accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); + accum softmax_0 = exp_0 / (exp_1 + exp_0); + accum softmax_1 = exp_1 / (exp_1 + exp_0); + // What to do if log(0)? + if (accumulative_direction > total_cues >> 1){ + global_parameters->cross_entropy = -logk(softmax_1); + } + else{ + global_parameters->cross_entropy = -logk(softmax_0); + } + while (!spin1_send_mc_packet( + key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { + spin1_delay_us(1); + } + } +// if ((time - current_time) >= wait_before_result){ +// current_state = 0; +// completed_broadcast = true; +// for (int i = 0; i < 4; i++){ +// REAL payload; +// payload = global_parameters->rate_off; +// for (int j = 2*global_parameters->p_pop_size; +// j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ +// spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); +// } +// } +// } + } +// recorded_variable_values[V_RECORDING_INDEX] = voltage; +// if (neuron_index == 0){ +// // update neuron parameters +// state_t result = neuron_model_state_update( +// NUM_EXCITATORY_RECEPTORS, exc_input_values, +// NUM_INHIBITORY_RECEPTORS, inh_input_values, +// external_bias, neuron, 0.0k); +// +// // Calculate error +// REAL error = result - global_parameters->target_V[target_ind]; +// learning_signal = error; +// // Record Error +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// error; +//// neuron->syn_state[3].delta_w; +//// neuron->syn_state[0].z_bar; +// +// // Record readout +// recorded_variable_values[V_RECORDING_INDEX] = +// result; +// // neuron->syn_state[0].z_bar; +// +// // Send error (learning signal) as packet with payload +//// while (!spin1_send_mc_packet( +//// key | neuron_index, bitsk(error), 1 )) { +//// spin1_delay_us(1); +//// } +// } +// else{ +// // Record 'Error' +// recorded_variable_values[V_RECORDING_INDEX] = +// neuron->syn_state[0].z_bar; +//// global_parameters->target_V[target_ind]; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// - global_parameters->target_V[target_ind]; +// } +// // Record target +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +//// global_parameters->target_V[target_ind]; +// neuron->syn_state[neuron_index].delta_w; +//// exc_input_values[0]; + + + // If spike occurs, communicate to relevant parts of model + if (spike) { + // Call relevant model-based functions + // Tell the neuron model +// neuron_model_has_spiked(neuron); + + // Tell the additional input + additional_input_has_spiked(additional_input); + } + + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); + + #if LOG_LEVEL >= LOG_DEBUG + neuron_model_print_state_variables(neuron); + #endif // LOG_LEVEL >= LOG_DEBUG + + // Return the boolean to the model timestep update + return spike; +} + + + + + +//! \brief stores neuron parameter back into sdram +//! \param[in] address: the address in sdram to start the store +static void neuron_impl_store_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("writing parameters"); + + //log_debug("writing neuron global parameters"); + spin1_memcpy(&address[next], global_parameters, + sizeof(global_neuron_params_t)); + next += (sizeof(global_neuron_params_t) + 3) / 4; + + log_debug("writing neuron local parameters"); + spin1_memcpy(&address[next], neuron_array, + n_neurons * sizeof(neuron_t)); + next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + + log_debug("writing input type parameters"); + spin1_memcpy(&address[next], input_type_array, + n_neurons * sizeof(input_type_t)); + next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + + log_debug("writing threshold type parameters"); + spin1_memcpy(&address[next], threshold_type_array, + n_neurons * sizeof(threshold_type_t)); + next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + + log_debug("writing synapse parameters"); + spin1_memcpy(&address[next], neuron_synapse_shaping_params, + n_neurons * sizeof(synapse_param_t)); + next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + + log_debug("writing additional input type parameters"); + spin1_memcpy(&address[next], additional_input_array, + n_neurons * sizeof(additional_input_t)); + next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; +} + +#if LOG_LEVEL >= LOG_DEBUG +void neuron_impl_print_inputs(uint32_t n_neurons) { + bool empty = true; + for (index_t i = 0; i < n_neurons; i++) { + empty = empty + && (bitsk(synapse_types_get_excitatory_input( + &(neuron_synapse_shaping_params[i])) + - synapse_types_get_inhibitory_input( + &(neuron_synapse_shaping_params[i]))) == 0); + } + + if (!empty) { + log_debug("-------------------------------------\n"); + + for (index_t i = 0; i < n_neurons; i++) { + input_t input = + synapse_types_get_excitatory_input( + &(neuron_synapse_shaping_params[i])) + - synapse_types_get_inhibitory_input( + &(neuron_synapse_shaping_params[i])); + if (bitsk(input) != 0) { + log_debug("%3u: %12.6k (= ", i, input); + synapse_types_print_input( + &(neuron_synapse_shaping_params[i])); + log_debug(")\n"); + } + } + log_debug("-------------------------------------\n"); + } +} + +void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + synapse_types_print_parameters(&(neuron_synapse_shaping_params[n])); + } + log_debug("-------------------------------------\n"); +} + +const char *neuron_impl_get_synapse_type_char(uint32_t synapse_type) { + return synapse_types_get_type_char(synapse_type); +} +#endif // LOG_LEVEL >= LOG_DEBUG + +#endif // _NEURON_IMPL_LEFT_RIGHT_READOUT_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c new file mode 100644 index 00000000000..4039efffea3 --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -0,0 +1,183 @@ +#include "neuron_model_left_right_readout_impl.h" + +#include + +extern uint32_t time; +extern REAL learning_signal; +REAL local_eta; + +// simple Leaky I&F ODE +static inline void _lif_neuron_closed_form( + neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { + + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + + // update membrane voltage + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +} + +void neuron_model_set_global_neuron_params( + global_neuron_params_pointer_t params) { + use(params); + + local_eta = params->eta; + + io_printf(IO_BUF, "local eta = %k\n", local_eta); + io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); + io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); + io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); + io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); + io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); + io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); + io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); + io_printf(IO_BUF, "p_key = %u\n", params->p_key); + io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); + io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); + io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +// io_printf(IO_BUF, "local eta = %k\n", params->); + + // Does Nothing - no params +} + +state_t neuron_model_state_update( + uint16_t num_excitatory_inputs, input_t* exc_input, + uint16_t num_inhibitory_inputs, input_t* inh_input, + input_t external_bias, neuron_pointer_t neuron, REAL dummy) { + + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); + log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); + use(dummy); + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// total_exc += exc_input[0]; +// total_inh += inh_input[0]; +// for (int i=0; i < num_excitatory_inputs; i++){ +// total_exc += exc_input[i]; +// } +// for (int i=0; i< num_inhibitory_inputs; i++){ +// total_inh += inh_input[i]; +// } + // Get the input in nA + input_t input_this_timestep = + exc_input[0] + exc_input[1] + neuron->I_offset; + + _lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep); + } else { + + // countdown refractory timer + neuron->refract_timer -= 1; + } + + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? + + neuron->L = learning_signal * neuron->w_fb; + + // All operations now need doing once per eprop synapse + for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ + // ****************************************************************** + // Low-pass filter incoming spike train + // ****************************************************************** + neuron->syn_state[syn_ind].z_bar = + neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** +// neuron->syn_state[syn_ind].el_a = +// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + +// (rho - neuron->psi * neuron->beta) * +// neuron->syn_state[syn_ind].el_a; + + + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** +// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +// neuron->beta * neuron->syn_state[syn_ind].el_a); +// +// neuron->syn_state[syn_ind].e_bar = +// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar +// + (1 - neuron->exp_TC) * temp_elig_trace; + + // ****************************************************************** + // Update cached total weight change + // ****************************************************************** + REAL this_dt_weight_change = +// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + -local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + + neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; +// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +// "L = %k = %k * %k = l * w_fb\n" +// "this dw = %k \t tot dw %k\n" +// , +// total_synapses_per_neuron, +// syn_ind, +// neuron->syn_state[syn_ind].z_bar_inp, +// neuron->syn_state[syn_ind].z_bar, +// time, +// neuron->L, learning_signal, neuron -> w_fb, +// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +// ); +// } + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; + + } + + return neuron->V_membrane; +} + +void neuron_model_has_spiked(neuron_pointer_t neuron) { + + // reset membrane voltage + neuron->V_membrane = neuron->V_reset; + + // reset refractory timer + neuron->refract_timer = neuron->T_refract; +} + +state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { + return neuron->V_membrane; +} + +void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { + log_debug("V membrane = %11.4k mv", neuron->V_membrane); +} + +void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { + io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); + io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); + + io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); + io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); + + io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); + + io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); + + io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); + + io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); + +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); +// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", +// neuron->time_to_spike_ticks); + +// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +} diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h new file mode 100644 index 00000000000..cb22e535f3b --- /dev/null +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -0,0 +1,95 @@ +#ifndef _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ +#define _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ + +#include "neuron_model.h" +#include "random.h" + +#define SYNAPSES_PER_NEURON 250 + + +typedef struct eprop_syn_state_t { + REAL delta_w; // weight change to apply + REAL z_bar_inp; + REAL z_bar; // low-pass filtered spike train +// REAL el_a; // adaptive component of eligibility vector +// REAL e_bar; // low-pass filtered eligibility trace +}eprop_syn_state_t; + +///////////////////////////////////////////////////////////// +// definition for LIF neuron parameters +typedef struct neuron_t { + // membrane voltage [mV] + REAL V_membrane; + + // membrane resting voltage [mV] + REAL V_rest; + + // membrane resistance [MOhm] + REAL R_membrane; + + // 'fixed' computation parameter - time constant multiplier for + // closed-form solution + // exp(-(machine time step in ms)/(R * C)) [.] + REAL exp_TC; + + // offset current [nA] + REAL I_offset; + + // countdown to end of next refractory period [timesteps] + int32_t refract_timer; + + // post-spike reset membrane voltage [mV] + REAL V_reset; + + // refractory time of neuron [timesteps] + int32_t T_refract; + + + // Poisson compartment params +// REAL mean_isi_ticks; +// REAL time_to_spike_ticks; +// +// int32_t time_since_last_spike; +// REAL rate_at_last_setting; +// REAL rate_update_threshold; + + REAL L; // learning signal + REAL w_fb; // feedback weight + + // array of synaptic states - peak fan-in of >250 for this case + eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; + + + // Poisson compartment params +// REAL mean_isi_ticks; +// REAL time_to_spike_ticks; +// +// int32_t time_since_last_spike; +// REAL rate_at_last_setting; +// REAL rate_update_threshold; + + +// // Should be in global params +// mars_kiss64_seed_t spike_source_seed; // array of 4 values +//// UFRACT seconds_per_tick; +// REAL ticks_per_second; + +} neuron_t; + +typedef struct global_neuron_params_t { + mars_kiss64_seed_t kiss_seed; // array of 4 values + REAL ticks_per_second; + REAL readout_V_0; + REAL readout_V_1; +// REAL prob_command; + REAL rate_on; + REAL rate_off; + REAL mean_0; + REAL mean_1; + REAL cross_entropy; + uint32_t p_key; + uint32_t p_pop_size; + REAL eta; +} global_neuron_params_t; + +#endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c new file mode 100644 index 00000000000..e322d9b1b7c --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -0,0 +1,575 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +// Spinn_common includes +#include "static-assert.h" + +// sPyNNaker neural modelling includes +#include + +// Plasticity includes +#include "maths.h" +#include "post_events.h" + +#include "weight_dependence/weight.h" +#include "timing_dependence/timing.h" +#include +#include +#include + + +#include +//#include +#include + +extern neuron_pointer_t neuron_array; +//extern global_neuron_params_pointer_t global_parameters; + +static uint32_t synapse_type_index_bits; +static uint32_t synapse_index_bits; +static uint32_t synapse_index_mask; +static uint32_t synapse_type_index_mask; +static uint32_t synapse_delay_index_type_bits; +static uint32_t synapse_type_mask; + +uint32_t num_plastic_pre_synaptic_events = 0; +uint32_t plastic_saturation_count = 0; + +//--------------------------------------- +// Macros +//--------------------------------------- +// The plastic control words used by Morrison synapses store an axonal delay +// in the upper 3 bits. +// Assuming a maximum of 16 delay slots, this is all that is required as: +// +// 1) Dendritic + Axonal <= 15 +// 2) Dendritic >= Axonal +// +// Therefore: +// +// * Maximum value of dendritic delay is 15 (with axonal delay of 0) +// - It requires 4 bits +// * Maximum value of axonal delay is 7 (with dendritic delay of 8) +// - It requires 3 bits +// +// | Axonal delay | Dendritic delay | Type | Index | +// |---------------------------|--------------------|-------------------|--------------------| +// | SYNAPSE_AXONAL_DELAY_BITS | SYNAPSE_DELAY_BITS | SYNAPSE_TYPE_BITS | SYNAPSE_INDEX_BITS | +// | | | SYNAPSE_TYPE_INDEX_BITS | +// |---------------------------|--------------------|----------------------------------------| +#ifndef SYNAPSE_AXONAL_DELAY_BITS +#define SYNAPSE_AXONAL_DELAY_BITS 3 +#endif + +#define SYNAPSE_AXONAL_DELAY_MASK \ + ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) + + +uint32_t RECURRENT_SYNAPSE_OFFSET = 100; + +//--------------------------------------- +// Structures +//--------------------------------------- +typedef struct { + pre_trace_t prev_trace; + uint32_t prev_time; +} pre_event_history_t; + +post_event_history_t *post_event_history; + +/* PRIVATE FUNCTIONS */ + +//--------------------------------------- +// Synapse update loop +//--------------------------------------- +//static inline final_state_t plasticity_update_synapse( +// uint32_t time, +// const uint32_t last_pre_time, const pre_trace_t last_pre_trace, +// const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, +// const uint32_t delay_axonal, update_state_t current_state, +// const post_event_history_t *post_event_history) { +// // Apply axonal delay to time of last presynaptic spike +// const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; +// +// // Get the post-synaptic window of events to be processed +// const uint32_t window_begin_time = +// (delayed_last_pre_time >= delay_dendritic) +// ? (delayed_last_pre_time - delay_dendritic) : 0; +// const uint32_t window_end_time = time + delay_axonal - delay_dendritic; +// post_event_window_t post_window = post_events_get_window_delayed( +// post_event_history, window_begin_time, window_end_time); +// +// log_debug("\tPerforming deferred synapse update at time:%u", time); +// log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", +// window_begin_time, window_end_time, post_window.prev_time, +// post_window.num_events); +// +// // print_event_history(post_event_history); +// // print_delayed_window_events(post_event_history, window_begin_time, +// // window_end_time, delay_dendritic); +// +// // Process events in post-synaptic window +// while (post_window.num_events > 0) { +// const uint32_t delayed_post_time = +// *post_window.next_time + delay_dendritic; +// log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", +// delayed_post_time); +// +// // Apply spike to state +// current_state = timing_apply_post_spike( +// delayed_post_time, *post_window.next_trace, delayed_last_pre_time, +// last_pre_trace, post_window.prev_time, post_window.prev_trace, +// current_state); +// +// // Go onto next event +// post_window = post_events_next_delayed(post_window, delayed_post_time); +// } +// +// const uint32_t delayed_pre_time = time + delay_axonal; +// log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", +// delayed_pre_time, post_window.prev_time); +// +// // Apply spike to state +// // **NOTE** dendritic delay is subtracted +// current_state = timing_apply_pre_spike( +// delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, +// post_window.prev_time, post_window.prev_trace, current_state); +// +// // Return final synaptic word and weight +// return synapse_structure_get_final_state(current_state); +//} + +//--------------------------------------- +// Synaptic row plastic-region implementation +//--------------------------------------- +static inline plastic_synapse_t* plastic_synapses( + address_t plastic_region_address) { + const uint32_t pre_event_history_size_words = + sizeof(pre_event_history_t) / sizeof(uint32_t); + static_assert( + pre_event_history_size_words * sizeof(uint32_t) == sizeof(pre_event_history_t), + "Size of pre_event_history_t structure should be a multiple" + " of 32-bit words"); + + return (plastic_synapse_t *) + &plastic_region_address[pre_event_history_size_words]; +} + +//--------------------------------------- +static inline pre_event_history_t *plastic_event_history( + address_t plastic_region_address) { + return (pre_event_history_t *) &plastic_region_address[0]; +} + +void synapse_dynamics_print_plastic_synapses( + address_t plastic_region_address, address_t fixed_region_address, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + use(plastic_region_address); + use(fixed_region_address); + use(ring_buffer_to_input_buffer_left_shifts); + +#if LOG_LEVEL >= LOG_DEBUG + // Extract separate arrays of weights (from plastic region), + // Control words (from fixed region) and number of plastic synapses + plastic_synapse_t *plastic_words = plastic_synapses(plastic_region_address); + const control_t *control_words = + synapse_row_plastic_controls(fixed_region_address); + size_t plastic_synapse = + synapse_row_num_plastic_controls(fixed_region_address); + + log_debug("Plastic region %u synapses\n", plastic_synapse); + + // Loop through plastic synapses + for (uint32_t i = 0; i < plastic_synapse; i++) { + // Get next control word (auto incrementing control word) + uint32_t control_word = *control_words++; + uint32_t synapse_type = synapse_row_sparse_type( + control_word, synapse_index_bits, synapse_type_mask); + + // Get weight + update_state_t update_state = synapse_structure_get_update_state( + *plastic_words++, synapse_type); + final_state_t final_state = synapse_structure_get_final_state( + update_state); + weight_t weight = synapse_structure_get_final_weight(final_state); + + log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); + synapses_print_weight( + weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); + log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", + synapse_row_sparse_delay(control_word, synapse_type_index_bits), + synapse_types_get_type_char(synapse_type), + synapse_row_sparse_index(control_word, synapse_index_mask), + SYNAPSE_DELAY_MASK, synapse_type_index_bits); + } +#endif // LOG_LEVEL >= LOG_DEBUG +} + +//--------------------------------------- +static inline index_t sparse_axonal_delay(uint32_t x) { +#if 1 + use(x); + return 0; +#else + return (x >> synapse_delay_index_type_bits) & SYNAPSE_AXONAL_DELAY_MASK; +#endif +} + +address_t synapse_dynamics_initialise( + address_t address, uint32_t n_neurons, uint32_t n_synapse_types, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + // Load timing dependence data + address_t weight_region_address = timing_initialise(address); + if (address == NULL) { + return NULL; + } + + // Load weight dependence data + address_t weight_result = weight_initialise( + weight_region_address, n_synapse_types, + ring_buffer_to_input_buffer_left_shifts); + if (weight_result == NULL) { + return NULL; + } + + post_event_history = post_events_init_buffers(n_neurons); + if (post_event_history == NULL) { + return NULL; + } + + uint32_t n_neurons_power_2 = n_neurons; + uint32_t log_n_neurons = 1; + if (n_neurons != 1) { + if (!is_power_of_2(n_neurons)) { + n_neurons_power_2 = next_power_of_2(n_neurons); + } + log_n_neurons = ilog_2(n_neurons_power_2); + } + + uint32_t n_synapse_types_power_2 = n_synapse_types; + if (!is_power_of_2(n_synapse_types)) { + n_synapse_types_power_2 = next_power_of_2(n_synapse_types); + } + uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); + + synapse_type_index_bits = log_n_neurons + log_n_synapse_types; + synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; + synapse_index_bits = log_n_neurons; + synapse_index_mask = (1 << synapse_index_bits) - 1; + synapse_delay_index_type_bits = + SYNAPSE_DELAY_BITS + synapse_type_index_bits; + synapse_type_mask = (1 << log_n_synapse_types) - 1; + + return weight_result; +} + + +static inline final_state_t eprop_plasticity_update(update_state_t current_state, + REAL delta_w){ + + // Test weight change + // delta_w = -0.1k; + + + // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? +// int32_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING + int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING +// int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING +// int16_t delta_w_int = (int) delta_w; // >> 15; + + if (delta_w){ + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" +// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" + "\n", + delta_w, delta_w_int +// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) + ); +// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", +// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); + } + + if (delta_w_int < 0){ + current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); + } else { + current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + } + } + else { +// if (PRINT_PLASTICITY){ +// io_printf(IO_BUF, "delta_w: %k\n", delta_w); +// } + current_state = current_state; + } + + // Calculate regularisation error + REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; + + + // Return final synaptic word and weight + return synapse_structure_get_final_state(current_state, reg_error); +} + + + + +bool synapse_dynamics_process_plastic_synapses( + address_t plastic_region_address, address_t fixed_region_address, + weight_t *ring_buffers, uint32_t time) { + // Extract separate arrays of plastic synapses (from plastic region), + // Control words (from fixed region) and number of plastic synapses + plastic_synapse_t *plastic_words = + plastic_synapses(plastic_region_address); + const control_t *control_words = + synapse_row_plastic_controls(fixed_region_address); + size_t plastic_synapse = + synapse_row_num_plastic_controls(fixed_region_address); + + num_plastic_pre_synaptic_events += plastic_synapse; + + // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? + + + + // Loop through plastic synapses + for (; plastic_synapse > 0; plastic_synapse--) { + // Get next control word (auto incrementing) + uint32_t control_word = *control_words++; + + // Extract control-word components + // **NOTE** cunningly, control word is just the same as lower + // 16-bits of 32-bit fixed synapse so same functions can be used +// uint32_t delay_axonal = sparse_axonal_delay(control_word); + + uint32_t delay = 1.0k; + uint32_t syn_ind_from_delay = + synapse_row_sparse_delay(control_word, synapse_type_index_bits); + +// uint32_t delay_dendritic = synapse_row_sparse_delay( +// control_word, synapse_type_index_bits); + uint32_t type = synapse_row_sparse_type( + control_word, synapse_index_bits, synapse_type_mask); + uint32_t index = + synapse_row_sparse_index(control_word, synapse_index_mask); + uint32_t type_index = synapse_row_sparse_type_index( + control_word, synapse_type_index_mask); + + + int32_t neuron_ind = synapse_row_sparse_index(control_word, synapse_index_mask); + + // For low pass filter of incoming spike train on this synapse + // Use postsynaptic neuron index to access neuron struct, + + if (type==1){ + // this is a recurrent synapse: add 100 to index to correct array location + syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; + } + + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! + + + // Create update state from the plastic synaptic word + update_state_t current_state = + synapse_structure_get_update_state(*plastic_words, type); + + if (PRINT_PLASTICITY){ +// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", +// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); + + io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", + neuron_ind, syn_ind_from_delay, type, + current_state.initial_weight, + neuron->syn_state[syn_ind_from_delay].delta_w, time); + } + + // Perform weight update: + // Go through typical weight update process to clip to limits + final_state_t final_state = eprop_plasticity_update(current_state, + neuron->syn_state[syn_ind_from_delay].delta_w); + + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // Add contribution to synaptic input + // Convert into ring buffer offset + uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + // delay_axonal + delay_dendritic + + time, type_index, + synapse_type_index_bits); + + // Check for ring buffer saturation + int16_t accumulation = ring_buffers[ring_buffer_index] + + synapse_structure_get_final_weight(final_state); + +// uint32_t sat_test = accumulation & 0x10000; +// if (sat_test) { +// accumulation = sat_test - 1; +// plastic_saturation_count++; +// } + + ring_buffers[ring_buffer_index] = accumulation; + + // Write back updated synaptic word to plastic region + *plastic_words++ = + synapse_structure_get_final_synaptic_word(final_state); + } + return true; +} + +void synapse_dynamics_process_post_synaptic_event( + uint32_t time, index_t neuron_index) { + log_debug("Adding post-synaptic event to trace at time:%u", time); + + // Add post-event + post_event_history_t *history = &post_event_history[neuron_index]; + const uint32_t last_post_time = history->times[history->count_minus_one]; + const post_trace_t last_post_trace = + history->traces[history->count_minus_one]; + post_events_add(time, history, + timing_add_post_spike(time, last_post_time, last_post_trace)); +} + +input_t synapse_dynamics_get_intrinsic_bias( + uint32_t time, index_t neuron_index) { + use(time); + use(neuron_index); + return 0.0k; +} + +uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { + return num_plastic_pre_synaptic_events; +} + +uint32_t synapse_dynamics_get_plastic_saturation_count(void) { + return plastic_saturation_count; +} + +#if SYNGEN_ENABLED == 1 + +//! \brief Searches the synaptic row for the the connection with the +//! specified post-synaptic ID +//! \param[in] id: the (core-local) ID of the neuron to search for in the +//! synaptic row +//! \param[in] row: the core-local address of the synaptic row +//! \param[out] sp_data: the address of a struct through which to return +//! weight, delay information +//! \return bool: was the search successful? +bool find_plastic_neuron_with_id( + uint32_t id, address_t row, structural_plasticity_data_t *sp_data) { + address_t fixed_region = synapse_row_fixed_region(row); + address_t plastic_region_address = synapse_row_plastic_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(plastic_region_address); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + plastic_synapse_t weight; + uint32_t delay; + + // Loop through plastic synapses + for (; plastic_synapse > 0; plastic_synapse--) { + // Get next control word (auto incrementing) + weight = *plastic_words++; + uint32_t control_word = *control_words++; + + // Check if index is the one I'm looking for + delay = synapse_row_sparse_delay(control_word, synapse_type_index_bits); + if (synapse_row_sparse_index(control_word, synapse_index_mask) == id) { + sp_data->weight = weight; + sp_data->offset = + synapse_row_num_plastic_controls(fixed_region) + - plastic_synapse; + sp_data->delay = delay; + return true; + } + } + + sp_data->weight = -1; + sp_data->offset = -1; + sp_data->delay = -1; + return false; +} + +//! \brief Remove the entry at the specified offset in the synaptic row +//! \param[in] offset: the offset in the row at which to remove the entry +//! \param[in] row: the core-local address of the synaptic row +//! \return bool: was the removal successful? +bool remove_plastic_neuron_at_offset(uint32_t offset, address_t row) { + address_t fixed_region = synapse_row_fixed_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(synapse_row_plastic_region(row)); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + + // Delete weight at offset + plastic_words[offset] = plastic_words[plastic_synapse - 1]; + plastic_words[plastic_synapse - 1] = 0; + + // Delete control word at offset + control_words[offset] = control_words[plastic_synapse - 1]; + control_words[plastic_synapse - 1] = 0; + + // Decrement FP + fixed_region[1]--; + + return true; +} + +//! ensuring the weight is of the correct type and size +static inline plastic_synapse_t weight_conversion(uint32_t weight) { + return (plastic_synapse_t) (0xFFFF & weight); +} + +//! packing all of the information into the required plastic control word +static inline control_t control_conversion( + uint32_t id, uint32_t delay, uint32_t type) { + control_t new_control = + (delay & ((1 << SYNAPSE_DELAY_BITS) - 1)) << synapse_type_index_bits; + new_control |= (type & ((1 << synapse_type_index_bits) - 1)) << synapse_index_bits; + new_control |= id & ((1 << synapse_index_bits) - 1); + return new_control; +} + +//! \brief Add a plastic entry in the synaptic row +//! \param[in] id: the (core-local) ID of the post-synaptic neuron to be added +//! \param[in] row: the core-local address of the synaptic row +//! \param[in] weight: the initial weight associated with the connection +//! \param[in] delay: the delay associated with the connection +//! \param[in] type: the type of the connection (e.g. inhibitory) +//! \return bool: was the addition successful? +bool add_plastic_neuron_with_id(uint32_t id, address_t row, + uint32_t weight, uint32_t delay, uint32_t type) { + plastic_synapse_t new_weight = weight_conversion(weight); + control_t new_control = control_conversion(id, delay, type); + + address_t fixed_region = synapse_row_fixed_region(row); + plastic_synapse_t *plastic_words = + plastic_synapses(synapse_row_plastic_region(row)); + control_t *control_words = synapse_row_plastic_controls(fixed_region); + int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + + // Add weight at offset + plastic_words[plastic_synapse] = new_weight; + + // Add control word at offset + control_words[plastic_synapse] = new_control; + + // Increment FP + fixed_region[1]++; + return true; +} +#endif diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index aa6f31179b8..865c9a4e7a4 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -30,10 +30,11 @@ from .eprop_adaptive import EPropAdaptive from .store_recall_readout import StoreRecallReadout from .sinusoid_readout import SinusoidReadout +from .left_right_readout import LeftRightReadout __all__ = ["EIFConductanceAlphaPopulation", "HHCondExp", "IFCondAlpha", "IFCondExpBase", "IFCurrAlpha", "IFCurrDualExpBase", "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", - "EPropAdaptive", "StoreRecallReadout", "SinusoidReadout"] + "EPropAdaptive", "StoreRecallReadout", "SinusoidReadout", "LeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py new file mode 100644 index 00000000000..791020408eb --- /dev/null +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -0,0 +1,53 @@ +from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard +from spynnaker.pyNN.models.defaults import default_initial_values +from spynnaker.pyNN.models.neuron.neuron_models import (NeuronModelLeftRightReadout) +from spynnaker.pyNN.models.neuron.synapse_types import ( + SynapseTypeEPropAdaptive) +from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent +from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeStatic + + +class LeftRightReadout(AbstractPyNNNeuronModelStandard): + """ + """ + + @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", + "isyn_inh2", + "l", "w_fb", "eta"}) + def __init__( + self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, + v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, + + isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, + tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, +# mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, + + rate_on=10, rate_off=10, poisson_pop_size=20, + + # Learning signal and weight update constants + l=0, w_fb=0.5, eta=1.0): + + # pylint: disable=too-many-arguments, too-many-locals + neuron_model = NeuronModelLeftRightReadout( + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, #target_data, + # Learning signal params + # l, + + # mean_isi_ticks, time_to_spike_ticks, + # rate_update_threshold, + # prob_command, + rate_on, rate_off, poisson_pop_size, w_fb, eta) + + synapse_type = SynapseTypeEPropAdaptive( + tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) + + input_type = InputTypeCurrent() + + threshold_type = ThresholdTypeStatic(v_thresh) + + super(LeftRightReadout, self).__init__( + model_name="left_right_readout", + binary="left_right_readout.aplx", + neuron_model=neuron_model, input_type=input_type, + synapse_type=synapse_type, threshold_type=threshold_type) \ No newline at end of file diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 0e6e25d8ff1..3cf65457808 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -20,7 +20,9 @@ from .neuron_model_eprop_adaptive import NeuronModelEPropAdaptive from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout from .neuron_model_sinusoid_readout import NeuronModelLeakyIntegrateAndFireSinusoidReadout +from .neuron_model_left_right_readout import NeuronModelLeftRightReadout __all__ = ["AbstractNeuronModel", "NeuronModelIzh", "NeuronModelLeakyIntegrateAndFire", "NeuronModelEPropAdaptive", - "NeuronModelStoreRecallReadout", "NeuronModelLeakyIntegrateAndFireSinusoidReadout"] + "NeuronModelStoreRecallReadout", "NeuronModelLeakyIntegrateAndFireSinusoidReadout", + "NeuronModelLeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py new file mode 100644 index 00000000000..9d6ca3ad687 --- /dev/null +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -0,0 +1,378 @@ +import numpy +from spinn_utilities.overrides import overrides +from data_specification.enums import DataType +from pacman.executor.injection_decorator import inject_items +from .abstract_neuron_model import AbstractNeuronModel + +# from pacman.model.graphs.application.application_vertex import ApplicationVertex +from spinn_front_end_common.abstract_models.abstract_provides_n_keys_for_partition import AbstractProvidesNKeysForPartition +from spinn_front_end_common.abstract_models.abstract_generates_data_specification import AbstractGeneratesDataSpecification + +SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state +MICROSECONDS_PER_SECOND = 1000000.0 +MICROSECONDS_PER_MILLISECOND = 1000.0 +V = "v" +V_REST = "v_rest" +TAU_M = "tau_m" +CM = "cm" +I_OFFSET = "i_offset" +V_RESET = "v_reset" +TAU_REFRAC = "tau_refrac" +COUNT_REFRAC = "count_refrac" +MEAN_ISI_TICKS = "mean_isi_ticks" +TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" +SEED1 = "seed1" +SEED2 = "seed2" +SEED3 = "seed3" +SEED4 = "seed4" +TICKS_PER_SECOND = "ticks_per_second" +TIME_SINCE_LAST_SPIKE = "time_since_last_spike" +RATE_AT_LAST_SETTING = "rate_at_last_setting" +# RATE_UPDATE_THRESHOLD = "rate_update_threshold" +# PROB_COMMAND = "prob_command" +RATE_ON = "rate_on" +RATE_OFF = "rate_off" + +UNITS = { + V: 'mV', + V_REST: 'mV', + TAU_M: 'ms', + CM: 'nF', + I_OFFSET: 'nA', + V_RESET: 'mV', + TAU_REFRAC: 'ms' +} + + +class NeuronModelLeftRightReadout(AbstractNeuronModel, AbstractProvidesNKeysForPartition, AbstractGeneratesDataSpecification): + __slots__ = [ + "_v_init", + "_v_rest", + "_tau_m", + "_cm", + "_i_offset", + "_v_reset", + "_tau_refrac", + # "_mean_isi_ticks", + # "_time_to_spike_ticks", + # "_time_since_last_spike", + # "_rate_at_last_setting", + # "_rate_update_threshold", + # "_prob_command", + "_rate_off", + "_rate_on", + "_w_fb", + "_eta", + "_mean_l", + "_mean_r", + "_cross_entropy", + "_poisson_key", + "_poisson_pop_size", + "_n_keys_in_target" + ] + + def __init__( + self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, + # mean_isi_ticks, time_to_spike_ticks, + # rate_update_threshold, + # prob_command, + rate_on, rate_off, poisson_pop_size, w_fb, eta): + + global_data_types = [ + DataType.UINT32, # MARS KISS seed + DataType.UINT32, # MARS KISS seed + DataType.UINT32, # MARS KISS seed + DataType.UINT32, # MARS KISS seed + DataType.S1615, # ticks_per_second + DataType.S1615, # global mem pot + DataType.S1615, # global mem pot 2 + DataType.S1615, # rate on + DataType.S1615, # rate off + DataType.S1615, # mean left activation + DataType.S1615, # mean right activation + DataType.S1615, # cross entropy + DataType.UINT32, # poisson key + DataType.UINT32, # poisson pop size + DataType.S1615, # eta + ] + data_types = [ + DataType.S1615, # v + DataType.S1615, # v_rest + DataType.S1615, # r_membrane (= tau_m / cm) + DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.S1615, # i_offset + DataType.INT32, # count_refrac + DataType.S1615, # v_reset + DataType.INT32, # tau_refrac + #### Poisson Compartment Params #### + # DataType.S1615, # REAL mean_isi_ticks + # DataType.S1615, # REAL time_to_spike_ticks + # DataType.INT32, # int32_t time_since_last_spike s + # DataType.S1615, # REAL rate_at_last_setting; s + # DataType.S1615 # REAL rate_update_threshold; p + DataType.S1615, # learning signal + DataType.S1615 # w_fb + ] + + # Synapse states - always initialise to zero + eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) + DataType.S1615, # delta_w + DataType.S1615, # z_bar_old + DataType.S1615, # z_bar + # DataType.S1615, # ep_a + # DataType.S1615, # e_bar + ] + # Extend to include fan-in for each neuron + data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) + + super(NeuronModelLeftRightReadout, self).__init__( + data_types=data_types, + + global_data_types=global_data_types + ) + + if v_init is None: + v_init = v_rest + + self._v_init = v_init + self._v_rest = v_rest + self._tau_m = tau_m + self._cm = cm + self._i_offset = i_offset + self._v_reset = v_reset + self._tau_refrac = tau_refrac + # self._mean_isi_ticks = mean_isi_ticks + # self._time_to_spike_ticks = time_to_spike_ticks + # self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation + # self._rate_at_last_setting = 0 + # self._rate_update_threshold = 2 + # self._prob_command = prob_command + self._rate_off = rate_off + self._rate_on = rate_on + self._mean_l = 0.0 + self._mean_r = 0.0 + self._cross_entropy = 0.0 + self._poisson_key = None + self._poisson_pop_size = poisson_pop_size + self._w_fb = w_fb + self._eta = eta + + self._n_keys_in_target = poisson_pop_size * 4 + + def set_poisson_key(self, p_key): + self._poisson_key = p_key + + # @overrides(AbstractProvidesNKeysForPartition.get_n_keys_for_partition) + # def get_n_keys_for_partition(self, partition, graph_mapper): + # return self._n_keys_in_target + # + # @inject_items({"routing_info": "MemoryRoutingInfos"}) + # @overrides(AbstractGeneratesDataSpecification.generate_data_specification, additional_arguments={"routing_info"}) + # def generate_data_specification(self, spec, placement, routing_info): + # key = routing_info.get_first_key_from_pre_vertex(placement.vertex, "CONTROL") + # self._poisson_key = key + + @overrides(AbstractNeuronModel.get_n_cpu_cycles) + def get_n_cpu_cycles(self, n_neurons): + # A bit of a guess + return 100 * n_neurons + + @overrides(AbstractNeuronModel.add_parameters) + def add_parameters(self, parameters): + parameters[V_REST] = self._v_rest + parameters[TAU_M] = self._tau_m + parameters[CM] = self._cm + parameters[I_OFFSET] = self._i_offset + parameters[V_RESET] = self._v_reset + parameters[TAU_REFRAC] = self._tau_refrac + parameters[SEED1] = 10065 + parameters[SEED2] = 232 + parameters[SEED3] = 3634 + parameters[SEED4] = 4877 + + # parameters[PROB_COMMAND] = self._prob_command + parameters[RATE_ON] = self._rate_on + parameters[RATE_OFF] = self._rate_off + + parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() + # parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold +# parameters[TARGET_DATA] = self._target_data + + @overrides(AbstractNeuronModel.add_state_variables) + def add_state_variables(self, state_variables): + state_variables[V] = self._v_init + state_variables[COUNT_REFRAC] = 0 + # state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks + # state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential + # state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike + # state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting + + + @overrides(AbstractNeuronModel.get_units) + def get_units(self, variable): + return UNITS[variable] + + @overrides(AbstractNeuronModel.has_variable) + def has_variable(self, variable): + return variable in UNITS + + @inject_items({"ts": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + def get_values(self, parameters, state_variables, vertex_slice, ts): + + # Add the rest of the data + return [state_variables[V], + parameters[V_REST], + parameters[TAU_M] / parameters[CM], + parameters[TAU_M].apply_operation( + operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), + parameters[I_OFFSET], state_variables[COUNT_REFRAC], + parameters[V_RESET], + parameters[TAU_REFRAC].apply_operation( + operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + # state_variables[MEAN_ISI_TICKS], + # state_variables[TIME_TO_SPIKE_TICKS], + # state_variables[TIME_SINCE_LAST_SPIKE], + # state_variables[RATE_AT_LAST_SETTING], + # parameters[RATE_UPDATE_THRESHOLD] + ] + + @overrides(AbstractNeuronModel.update_values) + def update_values(self, values, parameters, state_variables): + + # Read the data + (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, + _v_reset, _tau_refrac, + mean_isi_ticks, time_to_spike_ticks, time_since_last_spike, + rate_at_last_setting, #_rate_update_threshold +# _seed1, _seed2, _seed3, _seed4, _ticks_per_second + ) = values + + # Copy the changed data only + state_variables[V] = v + state_variables[COUNT_REFRAC] = count_refrac + state_variables[MEAN_ISI_TICKS] = mean_isi_ticks + state_variables[TIME_TO_SPIKE_TICKS] = time_to_spike_ticks + state_variables[TIME_SINCE_LAST_SPIKE] = time_since_last_spike + state_variables[RATE_AT_LAST_SETTING] = rate_at_last_setting + + # Global params + @inject_items({"machine_time_step": "MachineTimeStep"}) + @overrides(AbstractNeuronModel.get_global_values, + additional_arguments={'machine_time_step'}) + def get_global_values(self, machine_time_step): + vals = [ + 1, # seed 1 + 2, # seed 2 + 3, # seed 3 + 4, # seed 4 + MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second + 0.0, # set to 0, as will be set in first timestep of model anyway + 0.0, # set to 0, as will be set in first timestep of model anyway + self._rate_on, + self._rate_off, + self._mean_l, + self._mean_r, + self._cross_entropy, + self._poisson_key, + self._poisson_pop_size, + self._eta + ] + + return vals + + @property + def prob_command(self): + return self._prob_command + + # @prob_command.setter + # def prob_command(self, prob_command): + # self._prob_command = prob_command + + @property + def rate_on(self): + return self._rate_on + + @rate_on.setter + def rate_on(self, rate_on): + self._rate_on = rate_on + + @property + def rate_off(self): + return self._rate_off + + @rate_on.setter + def rate_on(self, rate_off): + self._rate_off = rate_off + + @property + def v_init(self): + return self._v + + @v_init.setter + def v_init(self, v_init): + self._v = v_init + + @property + def v_rest(self): + return self._v_rest + + @v_rest.setter + def v_rest(self, v_rest): + self._v_rest = v_rest + + @property + def tau_m(self): + return self._tau_m + + @tau_m.setter + def tau_m(self, tau_m): + self._tau_m = tau_m + + @property + def cm(self): + return self._cm + + @cm.setter + def cm(self, cm): + self._cm = cm + + @property + def i_offset(self): + return self._i_offset + + @i_offset.setter + def i_offset(self, i_offset): + self._i_offset = i_offset + + @property + def v_reset(self): + return self._v_reset + + @v_reset.setter + def v_reset(self, v_reset): + self._v_reset = v_reset + + @property + def tau_refrac(self): + return self._tau_refrac + + @tau_refrac.setter + def tau_refrac(self, tau_refrac): + self._tau_refrac = tau_refrac + + # @property + # def mean_isi_ticks(self): + # return self._mean_isi_ticks + # + # @mean_isi_ticks.setter + # def mean_isi_ticks(self, new_mean_isi_ticks): + # self._mean_isi_ticks = new_mean_isi_ticks + # + # @property + # def time_to_spike_ticks(self): + # return self._time_to_spike_ticks + # + # @mean_isi_ticks.setter + # def time_to_spike_ticks(self, new_time_to_spike_ticks): + # self._time_to_spike_ticks = new_time_to_spike_ticks \ No newline at end of file From 5220c26d16998002ef77753c8ae2e54f8aebd4c5 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Wed, 29 Apr 2020 14:33:51 +0100 Subject: [PATCH 049/123] Poisson key collected and given to model, runs but poisson not updating rate --- .../neuron_impl_left_right_readout.h | 12 +++++++---- .../neuron/abstract_population_vertex.py | 6 ++++++ .../implementations/neuron_impl_standard.py | 5 +++++ .../neuron_model_left_right_readout.py | 20 +++++++------------ 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index e8eaccfa3bc..637c3c5af07 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -211,8 +211,8 @@ static void neuron_impl_load_neuron_parameters( io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); - io_printf(IO_BUF, "poisson key: %k \n\n", global_parameters->p_key); - io_printf(IO_BUF, "poisson pop size: %k \n\n", global_parameters->p_pop_size); + io_printf(IO_BUF, "poisson key: %u \n\n", global_parameters->p_key); + io_printf(IO_BUF, "poisson pop size: %u \n\n", global_parameters->p_pop_size); for (index_t n = 0; n < n_neurons; n++) { @@ -309,7 +309,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, global_parameters->mean_0 == 0k; global_parameters->mean_1 == 0k; } - + io_printf(IO_BUF, "current_state = %u, current_cue = %u, time = %u\n", current_state, current_cue, time); + // if (current_state == STATE_CUE){ if (neuron_index == 2){ // this is the error source recorded_variable_values[V_RECORDING_INDEX] = accumulative_direction; @@ -331,16 +332,18 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, accumulative_direction += current_cue_direction; REAL payload; payload = global_parameters->rate_on; + io_printf(IO_BUF, "poisson setting 1\n"); for (int j = current_cue_direction*global_parameters->p_pop_size; j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); } } - // turn off and reset if finsihed + // turn off and reset if finished else if ((time - current_time) % (wait_between_cues + duration_of_cue) == (wait_between_cues + duration_of_cue) - 1){ current_cue += 1; REAL payload; payload = global_parameters->rate_off; + io_printf(IO_BUF, "poisson setting 2\n"); for (int j = current_cue_direction*global_parameters->p_pop_size; j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); @@ -368,6 +371,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, for (int i = 0; i < 4; i++){ REAL payload; payload = global_parameters->rate_on; + io_printf(IO_BUF, "poisson setting 3\n"); for (int j = 2*global_parameters->p_pop_size; j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index c84f7b1243c..82d3a057941 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -50,6 +50,8 @@ from .synaptic_manager import SynapticManager from .population_machine_vertex import PopulationMachineVertex +from spynnaker.pyNN.models.neuron.neuron_models import (NeuronModelLeftRightReadout) + logger = logging.getLogger(__name__) # TODO: Make sure these values are correct (particularly CPU cycles) @@ -499,6 +501,10 @@ def generate_data_specification( key = routing_info.get_first_key_from_pre_vertex( vertex, constants.SPIKE_PARTITION_ID) + if isinstance(self.__pynn_model._model.neuron_model, NeuronModelLeftRightReadout): + poisson_key = routing_info.get_first_key_from_pre_vertex(placement.vertex, "CONTROL") + self.__pynn_model._model.neuron_model.set_poisson_key(poisson_key) + # Get the poisson key p_key = routing_info.get_first_key_from_pre_vertex( vertex, constants.LIVE_POISSON_CONTROL_PARTITION_ID) diff --git a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py index 3c0344ad826..d950a0f194e 100644 --- a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py +++ b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py @@ -59,6 +59,11 @@ def __init__( if self.__additional_input_type is not None: self.__components.append(self.__additional_input_type) + @property + # @overrides(AbstractNeuronImpl.neuron_model) + def neuron_model(self): + return self.__neuron_model + @property @overrides(AbstractNeuronImpl.model_name) def model_name(self): diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 9d6ca3ad687..a9c73ee1235 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -5,8 +5,10 @@ from .abstract_neuron_model import AbstractNeuronModel # from pacman.model.graphs.application.application_vertex import ApplicationVertex -from spinn_front_end_common.abstract_models.abstract_provides_n_keys_for_partition import AbstractProvidesNKeysForPartition -from spinn_front_end_common.abstract_models.abstract_generates_data_specification import AbstractGeneratesDataSpecification +# from spinn_front_end_common.abstract_models.abstract_provides_n_keys_for_partition import AbstractProvidesNKeysForPartition +# from spynnaker.pyNN.models.neuron import AbstractPopulationVertex +# from spynnaker.pyNN.models.neuron.implementations import NeuronImplStandard + SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state MICROSECONDS_PER_SECOND = 1000000.0 @@ -32,6 +34,7 @@ # PROB_COMMAND = "prob_command" RATE_ON = "rate_on" RATE_OFF = "rate_off" +POISSON_POP_SIZE = 'poisson_pop_size' UNITS = { V: 'mV', @@ -44,7 +47,7 @@ } -class NeuronModelLeftRightReadout(AbstractNeuronModel, AbstractProvidesNKeysForPartition, AbstractGeneratesDataSpecification): +class NeuronModelLeftRightReadout(AbstractNeuronModel): __slots__ = [ "_v_init", "_v_rest", @@ -162,16 +165,6 @@ def __init__( def set_poisson_key(self, p_key): self._poisson_key = p_key - # @overrides(AbstractProvidesNKeysForPartition.get_n_keys_for_partition) - # def get_n_keys_for_partition(self, partition, graph_mapper): - # return self._n_keys_in_target - # - # @inject_items({"routing_info": "MemoryRoutingInfos"}) - # @overrides(AbstractGeneratesDataSpecification.generate_data_specification, additional_arguments={"routing_info"}) - # def generate_data_specification(self, spec, placement, routing_info): - # key = routing_info.get_first_key_from_pre_vertex(placement.vertex, "CONTROL") - # self._poisson_key = key - @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): # A bit of a guess @@ -195,6 +188,7 @@ def add_parameters(self, parameters): parameters[RATE_OFF] = self._rate_off parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() + parameters[POISSON_POP_SIZE] = self._poisson_pop_size # parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold # parameters[TARGET_DATA] = self._target_data From 0c7202387110a9d5447e67c129b0846d84666e97 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 30 Apr 2020 17:11:36 +0100 Subject: [PATCH 050/123] Poisson rates updated properly, operation not ok --- .../neuron_impl_eprop_adaptive.h | 8 +- .../neuron_impl_left_right_readout.h | 152 ++++++++++-------- .../models/neuron_model_eprop_adaptive_impl.c | 2 +- .../models/neuron_model_eprop_adaptive_impl.h | 4 +- .../neuron/builds/left_right_readout.py | 2 +- 5 files changed, 95 insertions(+), 73 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 9c3e39d9c26..1ac74d18236 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -303,13 +303,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B - if (neuron_index == 1){ + if (neuron_index == 0){ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate } else{ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - // B_t; // neuron->B; - neuron->L; + B_t; // neuron->B; +// neuron->L; // neuron->syn_state[0].z_bar; // global_parameters->core_target_rate; // neuron->syn_state[0].e_bar; @@ -364,9 +364,11 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // If spike occurs, communicate to relevant parts of model if (spike) { +// io_printf(IO_BUF, "neuron %u spiked with beta = %k, B_t = %k\n", neuron_index, neuron->beta, neuron->B); // Call relevant model-based functions // Tell the neuron model neuron_model_has_spiked(neuron); +// io_printf(IO_BUF, "neuron %u thresholded beta = %k, B_t = %k\n", neuron_index, neuron->beta, neuron->B); // Tell the additional input additional_input_has_spiked(additional_input); diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 637c3c5af07..b533423f36c 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -68,7 +68,7 @@ typedef enum current_state_t current_state = 0; uint32_t current_time = 0; -uint32_t current_cue = 0; +uint32_t cue_number = 0; uint32_t total_cues = 7; uint32_t current_cue_direction = 2; // 0 = left, 1 = right uint32_t accumulative_direction = 0; // if > total_cues / 2 = right @@ -299,87 +299,99 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); - if (current_cue == 0 && completed_broadcast){ // reset start of new test + recorded_variable_values[V_RECORDING_INDEX] = voltage; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->cross_entropy; + if (neuron_index == 2){ + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = accumulative_direction; + } + else { + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; + } +// io_printf(IO_BUF, "state = %u - %u\n", current_state, time); + if (cue_number == 0 && completed_broadcast){ // reset start of new test + io_printf(IO_BUF, "Resetting\n"); completed_broadcast = false; current_time = time; current_state = STATE_CUE; accumulative_direction = 0; // error params ticks_for_mean = 0; - global_parameters->mean_0 == 0k; - global_parameters->mean_1 == 0k; + global_parameters->cross_entropy = 0.k; + global_parameters->mean_0 == 0.k; + global_parameters->mean_1 == 0.k; } - io_printf(IO_BUF, "current_state = %u, current_cue = %u, time = %u\n", current_state, current_cue, time); - // +// io_printf(IO_BUF, "current_state = %u, cue_number = %u, direction = %u, time = %u\n", current_state, cue_number, current_cue_direction, time); + // In this state the environment is giving the left/right cues to the agent if (current_state == STATE_CUE){ - if (neuron_index == 2){ // this is the error source - recorded_variable_values[V_RECORDING_INDEX] = accumulative_direction; - } - if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ - // do nothing? - } - else{ - // pick broadcast if just entered - if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ - // pick new value and broadcast - REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 - if (random_value < 0.5k){ - current_cue_direction = 0; - } - else{ - current_cue_direction = 1; - } - accumulative_direction += current_cue_direction; - REAL payload; - payload = global_parameters->rate_on; - io_printf(IO_BUF, "poisson setting 1\n"); - for (int j = current_cue_direction*global_parameters->p_pop_size; - j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); - } + if (neuron_index == 0){ + // if it's current in the waiting time between cues do nothing + if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ + // do nothing? } - // turn off and reset if finished - else if ((time - current_time) % (wait_between_cues + duration_of_cue) == (wait_between_cues + duration_of_cue) - 1){ - current_cue += 1; - REAL payload; - payload = global_parameters->rate_off; - io_printf(IO_BUF, "poisson setting 2\n"); - for (int j = current_cue_direction*global_parameters->p_pop_size; - j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + // begin sending left/right cue + else{ + // pick broadcast if just entered + if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ + // pick new value and broadcast + REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 + if (random_value < 0.5k){ + current_cue_direction = 0; + } + else{ + current_cue_direction = 1; + } + accumulative_direction += current_cue_direction; + REAL payload; + payload = global_parameters->rate_on; +// io_printf(IO_BUF, "poisson setting 1, direction = %u\n", current_cue_direction); + for (int j = current_cue_direction*global_parameters->p_pop_size; + j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); + } } - if (current_cue >= total_cues){ - current_state = (current_state + 1) % 3; + // turn off and reset if finished + else if ((time - current_time) % (wait_between_cues + duration_of_cue) == (wait_between_cues + duration_of_cue) - 1){ + cue_number += 1; + REAL payload; + payload = global_parameters->rate_off; +// io_printf(IO_BUF, "poisson setting 2, direction = %u\n", current_cue_direction); + for (int j = current_cue_direction*global_parameters->p_pop_size; + j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); + } + if (cue_number >= total_cues){ + current_state = (current_state + 1) % 3; + } } } } } else if (current_state == STATE_WAITING){ // waiting for prompt, all things ok - if (current_cue >= total_cues){ + if (cue_number >= total_cues){ current_time = time; - current_cue = 0; + cue_number = 0; } if ((time - current_time) >= wait_before_result){ current_state = (current_state + 1) % 3; } } else if (current_state == STATE_PROMPT){ - if (!ticks_for_mean){ +// io_printf(IO_BUF, "ticks_for_mean = %u, n_idx = %u\n", ticks_for_mean, neuron_index); + if (!ticks_for_mean && neuron_index == 1){ current_time = time; // send packets to the variable poissons with the updated states for (int i = 0; i < 4; i++){ REAL payload; payload = global_parameters->rate_on; - io_printf(IO_BUF, "poisson setting 3\n"); +// io_printf(IO_BUF, "poisson setting 3, turning on prompt\n"); for (int j = 2*global_parameters->p_pop_size; j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); } } } if (neuron_index == 0){ - recorded_variable_values[V_RECORDING_INDEX] = voltage; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, @@ -389,7 +401,6 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, global_parameters->readout_V_0 = result; } else if (neuron_index == 1){ - recorded_variable_values[V_RECORDING_INDEX] = voltage; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, @@ -400,19 +411,23 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, global_parameters->readout_V_1 = result; } else if (neuron_index == 2){ // this is the error source - - recorded_variable_values[V_RECORDING_INDEX] = accumulative_direction; // Switched to always broadcasting error but with packet - ticks_for_mean += 1; //todo is it a running error like this over recall? + ticks_for_mean += 1; //todo is it a running error like this over prompt? + io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); + io_printf(IO_BUF, "ticks %u - accum %k - ", ticks_for_mean, (accum)ticks_for_mean); // Softmax of the exc and inh inputs representing 1 and 0 respectively // may need to scale to stop huge numbers going in the exp + io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); global_parameters->mean_0 += global_parameters->readout_V_0; global_parameters->mean_1 += global_parameters->readout_V_1; - // divide -> 1/x - accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); - accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); + // divide -> * 1/x + io_printf(IO_BUF, " umm "); + accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); + accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); + io_printf(IO_BUF, "or here - "); accum softmax_0 = exp_0 / (exp_1 + exp_0); accum softmax_1 = exp_1 / (exp_1 + exp_0); + io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); // What to do if log(0)? if (accumulative_direction > total_cues >> 1){ global_parameters->cross_entropy = -logk(softmax_1); @@ -420,23 +435,26 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, else{ global_parameters->cross_entropy = -logk(softmax_0); } +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +// io_printf(IO_BUF, "broadcasting error\n"); while (!spin1_send_mc_packet( key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { spin1_delay_us(1); } } -// if ((time - current_time) >= wait_before_result){ -// current_state = 0; -// completed_broadcast = true; -// for (int i = 0; i < 4; i++){ -// REAL payload; -// payload = global_parameters->rate_off; -// for (int j = 2*global_parameters->p_pop_size; -// j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ -// spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); -// } -// } -// } + if ((time - current_time) >= prompt_duration && neuron_index == 0){ +// io_printf(IO_BUF, "poisson setting 4, turning off prompt\n"); + current_state = 0; + completed_broadcast = true; + for (int i = 0; i < 4; i++){ + REAL payload; + payload = global_parameters->rate_off; + for (int j = 2*global_parameters->p_pop_size; + j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); + } + } + } } // recorded_variable_values[V_RECORDING_INDEX] = voltage; // if (neuron_index == 0){ diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index aad791bf42e..9e2bb03c2a2 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -168,7 +168,7 @@ state_t neuron_model_state_update( } - // All operations now need doing once per eprop synapse + // All operations now need doing once per recurrent eprop synapse for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ // ****************************************************************** // Low-pass filter incoming spike train diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index a9ffc609c87..8115e605c9a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -116,10 +116,12 @@ static inline void threshold_type_update_threshold(state_t z, // + decay_s1615(1000k, threshold_type->adpt) // fold scaling into decay to increase precision // * z; // stored on neuron // +// io_printf(IO_BUF, "before B = %k, temp1 = %k, temp2 = %k, b = %k, b_0 = %k, beta = %k", +// threshold_type->B, temp1, temp2, threshold_type->b, threshold_type->b_0, threshold_type->beta); // Update large B threshold_type->B = threshold_type->b_0 + threshold_type->beta*threshold_type->b; - +// io_printf(IO_BUF, "\nafter B = %k\n", threshold_type->B); } diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index 791020408eb..9842596ab4a 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -22,7 +22,7 @@ def __init__( tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, # mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, - rate_on=10, rate_off=10, poisson_pop_size=20, + rate_on=50, rate_off=0, poisson_pop_size=20, # Learning signal and weight update constants l=0, w_fb=0.5, eta=1.0): From 67a4802c003751cf5597ebd638c7e8e95a208010 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Wed, 20 May 2020 17:43:49 +0100 Subject: [PATCH 051/123] different recording, rho changed but not calculated on the fly, rates changed, updated left right variable and parameter loading in python and a little in sinusoid --- .../neuron_impl_eprop_adaptive.h | 39 ++-- .../neuron_impl_left_right_readout.h | 204 ++++++++++-------- .../models/neuron_model_eprop_adaptive_impl.c | 10 +- .../neuron_model_left_right_readout_impl.c | 9 +- .../neuron/builds/left_right_readout.py | 4 +- .../neuron_model_left_right_readout.py | 53 +++-- .../neuron_model_sinusoid_readout.py | 31 +-- 7 files changed, 201 insertions(+), 149 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 1ac74d18236..77a514b69f2 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -303,20 +303,31 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Record B - if (neuron_index == 0){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate +// if (neuron_index == 0){ +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate +//// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; // divide by neurons on core to get average per neuron contribution to core pop rate +// } +// else{ +// +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// B_t; // neuron->B; +//// neuron->L; +// // neuron->syn_state[0].z_bar; +// // global_parameters->core_target_rate; +// // neuron->syn_state[0].e_bar; +// // neuron->syn_state[neuron_index].el_a; +// // exc_input_values[0]; // record input input (signed) +// // learning_signal * neuron->w_fb; +// } + if(neuron_index > 3){ + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15].el_a; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15].delta_w; } else{ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - B_t; // neuron->B; -// neuron->L; - // neuron->syn_state[0].z_bar; - // global_parameters->core_target_rate; - // neuron->syn_state[0].e_bar; - // neuron->syn_state[neuron_index].el_a; - // exc_input_values[0]; // record input input (signed) - // learning_signal * neuron->w_fb; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0].el_a; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; } +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, @@ -343,13 +354,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[V_RECORDING_INDEX] = voltage; // result; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - neuron->syn_state[0].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +// neuron->syn_state[0].delta_w; // neuron->syn_state[0].z_bar; // exc_input_values[0]; // record input input (signed) // z_t; // global_parameters->core_pop_rate; -// neuron->psi; +// neuron->B; // neuron->syn_state[0].z_bar; // // Record B diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index b533423f36c..0e709b1065c 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -69,14 +69,18 @@ typedef enum current_state_t current_state = 0; uint32_t current_time = 0; uint32_t cue_number = 0; -uint32_t total_cues = 7; +uint32_t total_cues = 1; uint32_t current_cue_direction = 2; // 0 = left, 1 = right uint32_t accumulative_direction = 0; // if > total_cues / 2 = right uint32_t wait_between_cues = 50; // ms uint32_t duration_of_cue = 100; // ms uint32_t wait_before_result = 1000; // ms but should be a range between 500-1500 uint32_t prompt_duration = 150; //ms -uint32_t ticks_for_mean = 0; +//uint32_t ticks_for_mean = 0; +bool start_prompt = false; +accum softmax_0 = 0k; +accum softmax_1 = 0k; +//REAL payload; bool completed_broadcast = true; @@ -299,37 +303,60 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, external_bias += additional_input_get_input_value_as_current( additional_input, voltage); - recorded_variable_values[V_RECORDING_INDEX] = voltage; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->cross_entropy; - if (neuron_index == 2){ - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = accumulative_direction; - } - else { - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; + if (neuron_index == 0){ + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, -50k); + // Finally, set global membrane potential to updated value + global_parameters->readout_V_0 = result; + + } else if (neuron_index == 1){ + // update neuron parameters + learning_signal *= -1.k; + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron, -50k); + learning_signal *= -1.k; + // Finally, set global membrane potential to updated value + global_parameters->readout_V_1 = result; } +// if (neuron_index == 0){ +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_0; +// } +// else if (neuron_index == 1){ +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_1; +// } // io_printf(IO_BUF, "state = %u - %u\n", current_state, time); if (cue_number == 0 && completed_broadcast){ // reset start of new test - io_printf(IO_BUF, "Resetting\n"); +// io_printf(IO_BUF, "Resetting\n"); completed_broadcast = false; current_time = time; current_state = STATE_CUE; accumulative_direction = 0; // error params - ticks_for_mean = 0; global_parameters->cross_entropy = 0.k; - global_parameters->mean_0 == 0.k; - global_parameters->mean_1 == 0.k; + global_parameters->mean_0 = 0.k; + global_parameters->mean_1 = 0.k; + softmax_0 = 0k; + softmax_1 = 0k; + while (!spin1_send_mc_packet( + key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { + spin1_delay_us(1); + } } // io_printf(IO_BUF, "current_state = %u, cue_number = %u, direction = %u, time = %u\n", current_state, cue_number, current_cue_direction, time); // In this state the environment is giving the left/right cues to the agent if (current_state == STATE_CUE){ if (neuron_index == 0){ // if it's current in the waiting time between cues do nothing - if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ - // do nothing? - } +// if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ +// do nothing? +// } // begin sending left/right cue - else{ + if ((time - current_time) % (wait_between_cues + duration_of_cue) >= wait_between_cues){ // pick broadcast if just entered if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ // pick new value and broadcast @@ -374,11 +401,12 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } if ((time - current_time) >= wait_before_result){ current_state = (current_state + 1) % 3; + start_prompt = true; } } else if (current_state == STATE_PROMPT){ // io_printf(IO_BUF, "ticks_for_mean = %u, n_idx = %u\n", ticks_for_mean, neuron_index); - if (!ticks_for_mean && neuron_index == 1){ + if (start_prompt && neuron_index == 1){ current_time = time; // send packets to the variable poissons with the updated states for (int i = 0; i < 4; i++){ @@ -391,56 +419,59 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } } } - if (neuron_index == 0){ - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, -50k); - // Finally, set global membrane potential to updated value - global_parameters->readout_V_0 = result; - - } else if (neuron_index == 1){ - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, -50k); - - // Finally, set global membrane potential to updated value - global_parameters->readout_V_1 = result; - - } else if (neuron_index == 2){ // this is the error source + if (neuron_index == 2){ // this is the error source // Switched to always broadcasting error but with packet - ticks_for_mean += 1; //todo is it a running error like this over prompt? - io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); - io_printf(IO_BUF, "ticks %u - accum %k - ", ticks_for_mean, (accum)ticks_for_mean); +// ticks_for_mean += 1; //todo is it a running error like this over prompt? + start_prompt = false; +// io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); +// io_printf(IO_BUF, "ticks %u - accum %k - ", ticks_for_mean, (accum)ticks_for_mean); // Softmax of the exc and inh inputs representing 1 and 0 respectively // may need to scale to stop huge numbers going in the exp - io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); - global_parameters->mean_0 += global_parameters->readout_V_0; - global_parameters->mean_1 += global_parameters->readout_V_1; +// io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); +// global_parameters->mean_0 += global_parameters->readout_V_0; +// global_parameters->mean_1 += global_parameters->readout_V_1; // divide -> * 1/x - io_printf(IO_BUF, " umm "); - accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); - accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); - io_printf(IO_BUF, "or here - "); - accum softmax_0 = exp_0 / (exp_1 + exp_0); - accum softmax_1 = exp_1 / (exp_1 + exp_0); - io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); +// io_printf(IO_BUF, " umm "); +// accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); +// accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); + accum exp_0 = expk(global_parameters->readout_V_0 * 0.1k); + accum exp_1 = expk(global_parameters->readout_V_1 * 0.1k); +// io_printf(IO_BUF, "or here - "); + if (exp_0 == 0k && exp_1 == 0k){ + if (global_parameters->readout_V_0 > global_parameters->readout_V_1){ + softmax_0 = 10k; + softmax_1 = 0k; + } + else{ + softmax_0 = 0k; + softmax_1 = 10k; + } + } + else{ +// accum denominator = 1.k / (exp_1 + exp_0); + softmax_0 = exp_0 / (exp_1 + exp_0); + softmax_1 = exp_1 / (exp_1 + exp_0); + } +// io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); // What to do if log(0)? if (accumulative_direction > total_cues >> 1){ global_parameters->cross_entropy = -logk(softmax_1); + learning_signal = softmax_0; } else{ global_parameters->cross_entropy = -logk(softmax_0); + learning_signal = softmax_0 - 1.k; } -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// io_printf(IO_BUF, "broadcasting error\n"); while (!spin1_send_mc_packet( - key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { + key | neuron_index, bitsk(learning_signal), 1 )) { spin1_delay_us(1); } +// if(learning_signal){ +// io_printf(IO_BUF, "learning signal before cast = %k\n", learning_signal); +// } +// learning_signal = global_parameters->cross_entropy; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +// io_printf(IO_BUF, "broadcasting error\n"); } if ((time - current_time) >= prompt_duration && neuron_index == 0){ // io_printf(IO_BUF, "poisson setting 4, turning off prompt\n"); @@ -456,48 +487,33 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } } } -// recorded_variable_values[V_RECORDING_INDEX] = voltage; -// if (neuron_index == 0){ -// // update neuron parameters -// state_t result = neuron_model_state_update( -// NUM_EXCITATORY_RECEPTORS, exc_input_values, -// NUM_INHIBITORY_RECEPTORS, inh_input_values, -// external_bias, neuron, 0.0k); -// -// // Calculate error -// REAL error = result - global_parameters->target_V[target_ind]; -// learning_signal = error; -// // Record Error -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// error; -//// neuron->syn_state[3].delta_w; -//// neuron->syn_state[0].z_bar; -// -// // Record readout -// recorded_variable_values[V_RECORDING_INDEX] = -// result; -// // neuron->syn_state[0].z_bar; -// -// // Send error (learning signal) as packet with payload -//// while (!spin1_send_mc_packet( -//// key | neuron_index, bitsk(error), 1 )) { -//// spin1_delay_us(1); -//// } + +// learning_signal = global_parameters->cross_entropy; + + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->cross_entropy; + recorded_variable_values[V_RECORDING_INDEX] = voltage; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = ; +// if (neuron_index == 2){ +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = accumulative_direction; // } -// else{ -// // Record 'Error' -// recorded_variable_values[V_RECORDING_INDEX] = -// neuron->syn_state[0].z_bar; -//// global_parameters->target_V[target_ind]; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// - global_parameters->target_V[target_ind]; +// else { +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; // } -// // Record target -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -//// global_parameters->target_V[target_ind]; -// neuron->syn_state[neuron_index].delta_w; -//// exc_input_values[0]; - + if (neuron_index == 2){ +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; +// recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].delta_w; + } + else if (neuron_index == 1){ +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].z_bar; +// recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].delta_w; + } + else{ +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[1].z_bar; +// recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[1].delta_w; + } // If spike occurs, communicate to relevant parts of model if (spike) { diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 9e2bb03c2a2..cb8b2acb056 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -93,14 +93,14 @@ state_t neuron_model_state_update( (1.0k - psi_temp2) : 0.0k; // This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. This should make it possible to do a pause and resume cycle and have reliable unloading of data. - uint32_t total_input_synapses_per_neuron = 200; //todo should this be fixed - uint32_t total_recurrent_synapses_per_neuron = 0; //todo should this be fixed + uint32_t total_input_synapses_per_neuron = 100; //todo should this be fixed + uint32_t total_recurrent_synapses_per_neuron = 100; //todo should this be fixed uint32_t recurrent_offset = 100; // neuron->psi = neuron->psi << 10; - REAL rho = 0.998; + REAL rho = expk(-1.k / 1000.k); // adpt neuron->L = learning_signal * neuron->w_fb; @@ -270,4 +270,8 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); + + io_printf(IO_BUF, "e_to_dt_on_tau_a = %k n/a\n\n", neuron->e_to_dt_on_tau_a); + + io_printf(IO_BUF, "adpt = %k n/a\n\n", neuron->adpt); } diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 4039efffea3..ef1c801ca7a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -75,8 +75,12 @@ state_t neuron_model_state_update( uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? - neuron->L = learning_signal * neuron->w_fb; +// if(learning_signal){ +// io_printf(IO_BUF, "learning signal = %k\n", learning_signal); +// } + neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); +// REAL tau_decay = expk(-1.k / 1500.k); // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ // ****************************************************************** @@ -84,7 +88,7 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** @@ -109,6 +113,7 @@ state_t neuron_model_state_update( // ****************************************************************** // Update cached total weight change // ****************************************************************** + REAL this_dt_weight_change = // -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; -local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index 9842596ab4a..3c05a559744 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -22,7 +22,7 @@ def __init__( tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, # mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, - rate_on=50, rate_off=0, poisson_pop_size=20, + rate_on=40, rate_off=0, poisson_pop_size=10, # Learning signal and weight update constants l=0, w_fb=0.5, eta=1.0): @@ -36,7 +36,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, w_fb, eta) + rate_on, rate_off, poisson_pop_size, l, w_fb, eta) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index a9c73ee1235..49d7e7c292e 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -21,6 +21,10 @@ V_RESET = "v_reset" TAU_REFRAC = "tau_refrac" COUNT_REFRAC = "count_refrac" +# Learning signal +L = "learning_signal" +W_FB = "feedback_weight" + MEAN_ISI_TICKS = "mean_isi_ticks" TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" SEED1 = "seed1" @@ -64,6 +68,7 @@ class NeuronModelLeftRightReadout(AbstractNeuronModel): # "_prob_command", "_rate_off", "_rate_on", + "_l", "_w_fb", "_eta", "_mean_l", @@ -79,7 +84,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, w_fb, eta): + rate_on, rate_off, poisson_pop_size, l, w_fb, eta): global_data_types = [ DataType.UINT32, # MARS KISS seed @@ -157,6 +162,7 @@ def __init__( self._cross_entropy = 0.0 self._poisson_key = None self._poisson_pop_size = poisson_pop_size + self._l = l self._w_fb = w_fb self._eta = eta @@ -178,6 +184,8 @@ def add_parameters(self, parameters): parameters[I_OFFSET] = self._i_offset parameters[V_RESET] = self._v_reset parameters[TAU_REFRAC] = self._tau_refrac + parameters[L] = self._l + parameters[W_FB] = self._w_fb parameters[SEED1] = 10065 parameters[SEED2] = 232 parameters[SEED3] = 3634 @@ -196,6 +204,9 @@ def add_parameters(self, parameters): def add_state_variables(self, state_variables): state_variables[V] = self._v_init state_variables[COUNT_REFRAC] = 0 + + #learning params + state_variables[L] = self._l # state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks # state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential # state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike @@ -215,7 +226,7 @@ def has_variable(self, variable): def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data - return [state_variables[V], + values = [state_variables[V], parameters[V_REST], parameters[TAU_M] / parameters[CM], parameters[TAU_M].apply_operation( @@ -224,31 +235,35 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[V_RESET], parameters[TAU_REFRAC].apply_operation( operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), - # state_variables[MEAN_ISI_TICKS], - # state_variables[TIME_TO_SPIKE_TICKS], - # state_variables[TIME_SINCE_LAST_SPIKE], - # state_variables[RATE_AT_LAST_SETTING], - # parameters[RATE_UPDATE_THRESHOLD] + + state_variables[L], + parameters[W_FB] ] + # create synaptic state - init all state to zero + eprop_syn_init = [0, # delta w + 0, # z_bar_inp + 0]#, # z_bar + # 0, # el_a + # 0] # e_bar + # extend to appropriate fan-in + values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + + return values + @overrides(AbstractNeuronModel.update_values) def update_values(self, values, parameters, state_variables): # Read the data - (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac, - mean_isi_ticks, time_to_spike_ticks, time_since_last_spike, - rate_at_last_setting, #_rate_update_threshold -# _seed1, _seed2, _seed3, _seed4, _ticks_per_second - ) = values + (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, + _v_reset, _tau_refrac, + _l, _w_fb) = values # Not sure this will work with the new array of synapse!!! + # todo check alignment on this # Copy the changed data only - state_variables[V] = v - state_variables[COUNT_REFRAC] = count_refrac - state_variables[MEAN_ISI_TICKS] = mean_isi_ticks - state_variables[TIME_TO_SPIKE_TICKS] = time_to_spike_ticks - state_variables[TIME_SINCE_LAST_SPIKE] = time_since_last_spike - state_variables[RATE_AT_LAST_SETTING] = rate_at_last_setting + state_variables[V] = _v + + state_variables[L] = _l # Global params @inject_items({"machine_time_step": "MachineTimeStep"}) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 2124c4e2d5a..852de2e97b8 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -46,6 +46,7 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): __slots__ = [ + "_v", "_v_init", "_v_rest", "_tau_m", @@ -56,9 +57,9 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): "_target_data", # learning signal - "__l", - "__w_fb", - "__eta" + "_l", + "_w_fb", + "_eta" ] def __init__( @@ -118,10 +119,10 @@ def __init__( self._target_data = target_data # learning signal - self.__l = l - self.__w_fb = w_fb + self._l = l + self._w_fb = w_fb - self.__eta = eta + self._eta = eta @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): @@ -139,7 +140,7 @@ def add_parameters(self, parameters): parameters[TARGET_DATA] = 0.0 #learning params - parameters[W_FB] = self.__w_fb + parameters[W_FB] = self._w_fb @overrides(AbstractNeuronModel.add_state_variables) @@ -148,7 +149,7 @@ def add_state_variables(self, state_variables): state_variables[COUNT_REFRAC] = 0 #learning params - state_variables[L] = self.__l + state_variables[L] = self._l @overrides(AbstractNeuronModel.get_units) @@ -193,15 +194,15 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): def update_values(self, values, parameters, state_variables): # Read the data - (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, + (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, _v_reset, _tau_refrac, - l, __w_fb) = values # Not sure this will work with the new array of synapse!!! + _l, _w_fb) = values # Not sure this will work with the new array of synapse!!! # todo check alignment on this # Copy the changed data only - state_variables[V] = v + state_variables[V] = _v - state_variables[L] = l + state_variables[L] = _l # Global params @@ -212,7 +213,7 @@ def get_global_values(self, machine_time_step): vals = [] vals.extend(self._target_data) - vals.extend([self.__eta]) + vals.extend([self._eta]) return vals @property @@ -281,9 +282,9 @@ def tau_refrac(self, tau_refrac): @property def w_fb(self): - return self.__w_fb + return self._w_fb @w_fb.setter def w_fb(self, new_value): - self.__w_fb = new_value + self._w_fb = new_value From c70c68bb3a17bd13dc2df9f2738f54ae8023b7e4 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 21 May 2020 16:40:11 +0100 Subject: [PATCH 052/123] rho correctly calculated for each neuron and used --- .../models/neuron_model_eprop_adaptive_impl.c | 4 +++- .../models/neuron_model_eprop_adaptive_impl.h | 1 + .../neuron_model_eprop_adaptive.py | 20 ++++++++++++++++--- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index cb8b2acb056..3298cfe84b3 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -100,7 +100,9 @@ state_t neuron_model_state_update( // neuron->psi = neuron->psi << 10; - REAL rho = expk(-1.k / 1000.k); // adpt + REAL rho = neuron->rho;//expk(-1.k / 1500.k); // adpt +// REAL rho_2 = (accum)decay_s1615(1000.k, neuron->adpt); +// io_printf(IO_BUF, "1:%k, 2:%k, 3:%k\n", rho, rho_2, neuron->rho); neuron->L = learning_signal * neuron->w_fb; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 8115e605c9a..b75eea84ce1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -82,6 +82,7 @@ typedef struct neuron_t { REAL L; // learning signal REAL w_fb; // feedback weight + REAL rho; // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 3d72596277f..d1052db1972 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -46,6 +46,7 @@ # Learning signal L = "learning_signal" W_FB = "feedback_weight" +RHO = 'rho' DELTA_W = "delta_w" Z_BAR_OLD = "z_bar_old" @@ -103,6 +104,7 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): # learning signal "__l", "__w_fb", + "__rho", "__eta" ] @@ -155,7 +157,8 @@ def __init__( DataType.S1615, # Learning signal DataType.S1615, # L - DataType.S1615 # w_fb + DataType.S1615, # w_fb + DataType.S1615 # rho ] # Synapse states - always initialise to zero @@ -205,6 +208,7 @@ def __init__( # learning signal self.__l = l self.__w_fb = w_fb + self.__rho = numpy.exp(-1. / tau_a) self.__eta = eta @@ -228,6 +232,7 @@ def add_parameters(self, parameters): parameters[BETA] = self.__beta parameters[SCALAR] = self.__scalar parameters[W_FB] = self.__w_fb + parameters[RHO] = self.__rho @overrides(AbstractNeuronModel.add_state_variables) @@ -292,7 +297,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[SCALAR], state_variables[L], - parameters[W_FB] + parameters[W_FB], + parameters[RHO] ] # create synaptic state - init all state to zero @@ -337,7 +343,7 @@ def update_values(self, values, parameters, state_variables): (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar) = values + l, __w_fb, rho, delta_w, z_bar_old, z_bar, ep_a, e_bar) = values # Not sure this will work with the new array of synapse!!! # (Note that this function is only called if you do e.g. run(), set(), @@ -466,3 +472,11 @@ def w_fb(self): @w_fb.setter def w_fb(self, new_value): self.__w_fb = new_value + + @property + def rho(self): + return self.__w_fb + + @rho.setter + def rho(self, new_value): + self.__rho = new_value From 08e877fd75ef13587b4bdb9b6e4021b9a041cace Mon Sep 17 00:00:00 2001 From: oliverrhodes Date: Tue, 26 May 2020 13:45:03 +0100 Subject: [PATCH 053/123] Add functionality to apply weight updates after an elapsed period of time --- .../models/neuron_model_eprop_adaptive_impl.c | 10 ++++- .../models/neuron_model_eprop_adaptive_impl.h | 1 + .../neuron_model_sinusoid_readout_impl.c | 6 +++ .../neuron_model_sinusoid_readout_impl.h | 1 + .../synapse_dynamics_eprop_adaptive_impl.c | 37 ++++++++++++++++--- .../synapse_dynamics_sinusoid_readout_impl.c | 34 ++++++++++++++--- .../neuron_model_eprop_adaptive.py | 11 +++++- .../neuron_model_sinusoid_readout.py | 10 +++-- 8 files changed, 93 insertions(+), 17 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index d2735edbd8d..cc7ffe85d6a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -111,7 +111,9 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + +// (1 - neuron->exp_TC) * + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** @@ -158,6 +160,12 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; + + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ + neuron->syn_state[syn_ind].update_ready -= 1; + } + // io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", // local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index a9ffc609c87..85450ce63f4 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -30,6 +30,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train REAL el_a; // adaptive component of eligibility vector REAL e_bar; // low-pass filtered eligibility trace + uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index e5ed361eb85..ab20a9c4150 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -119,6 +119,12 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; + + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ + neuron->syn_state[syn_ind].update_ready -= 1; + } + } return neuron->V_membrane; diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index fa9684ca597..11ed38f80ae 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -13,6 +13,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train // REAL el_a; // adaptive component of eligibility vector // REAL e_bar; // low-pass filtered eligibility trace + uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 1a5a9879654..e292ab2dcb5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -290,7 +290,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? // int16_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING + int32_t delta_w_int = (int32_t) roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; @@ -324,6 +324,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); + // Return final synaptic word and weight return synapse_structure_get_final_state(current_state, reg_error); } @@ -399,13 +400,37 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w, time); } - // Perform weight update: - // Go through typical weight update process to clip to limits - final_state_t final_state = eprop_plasticity_update(current_state, + + // Perform weight update: only if batch time has elapsed + final_state_t final_state; + if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + // enough time has elapsed - perform weight update + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready=0\n"); + } + + // Go through typical weight update process to clip to limits + final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); - // reset delta_w as weight change has now been applied - neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // reset update_ready counter based on pattern cycle time + neuron->syn_state[syn_ind_from_delay].update_ready += 10240; + + } else { + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready: %u - no update performed\n", + neuron->syn_state[syn_ind_from_delay].update_ready); + } + // don't update weight - get update state based on state cached in SDRAM + // assume reg rate is zero to avoid + + final_state = synapse_structure_get_final_state(current_state, 0); + // Don't reset delta_w -> keep this accumulating and apply weight change in future + } + // Add contribution to synaptic input // Convert into ring buffer offset diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index a0cfa729334..7dce10edf4d 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -397,13 +397,37 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w, time); } - // Perform weight update: - // Go through typical weight update process to clip to limits - final_state_t final_state = eprop_plasticity_update(current_state, + // Perform weight update: only if batch time has elapsed + final_state_t final_state; + + if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + + // enough time has elapsed - perform weight update + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready=0\n"); + } + + // Go through typical weight update process to clip to limits + final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); - // reset delta_w as weight change has now been applied - neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // reset update_ready counter based on pattern cycle time + neuron->syn_state[syn_ind_from_delay].update_ready += 1024; + + } else { + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready: %u - no update performed\n", + neuron->syn_state[syn_ind_from_delay].update_ready); + } + // don't update weight - get update state based on state cached in SDRAM + // assume reg rate is zero to avoid + + final_state = synapse_structure_get_final_state(current_state, 0); + // Don't reset delta_w -> keep this accumulating and apply weight change in future + } // Add contribution to synaptic input // Convert into ring buffer offset diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 3d72596277f..f4565ae6cbe 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -52,6 +52,7 @@ Z_BAR = "z_bar" EP_A = "ep_a" E_BAR = "e_bar" +UPDATE_READY = "update_ready" UNITS = { V: 'mV', @@ -165,6 +166,7 @@ def __init__( DataType.S1615, # z_bar DataType.S1615, # ep_a DataType.S1615, # e_bar + DataType.UINT32 # update_ready ] # Extend to include fan-in for each neuron datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -249,6 +251,7 @@ def add_state_variables(self, state_variables): state_variables[Z_BAR+str(n)] = 0 state_variables[EP_A+str(n)] = 0 state_variables[E_BAR+str(n)] = 0 + state_variables[UPDATE_READY+str(n)] = 1024 @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -301,7 +304,9 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): state_variables[Z_BAR_OLD+str(n)], state_variables[Z_BAR+str(n)], state_variables[EP_A+str(n)], - state_variables[E_BAR+str(n)]] + state_variables[E_BAR+str(n)], + state_variables[UPDATE_READY+str(n)] + ] # extend to appropriate fan-in values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) @@ -333,11 +338,12 @@ def update_values(self, values, parameters, state_variables): z_bar = [0] * SYNAPSES_PER_NEURON ep_a = [0] * SYNAPSES_PER_NEURON e_bar = [0] * SYNAPSES_PER_NEURON + update_ready = [0] * SYNAPSES_PER_NEURON # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar) = values + l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! # (Note that this function is only called if you do e.g. run(), set(), @@ -361,6 +367,7 @@ def update_values(self, values, parameters, state_variables): state_variables[Z_BAR+str(n)] = z_bar[n] state_variables[EP_A+str(n)] = ep_a[n] state_variables[E_BAR+str(n)] = e_bar[n] + state_variables[UPDATE_READY] = update_ready[n] @property diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index b9d5fe1db73..47b5f4ac4e8 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -90,6 +90,7 @@ def __init__( DataType.S1615, # z_bar # DataType.S1615, # ep_a # DataType.S1615, # e_bar + DataType.UINT32 # update_ready ] # Extend to include fan-in for each neuron data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -179,13 +180,16 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): ] # create synaptic state - init all state to zero - eprop_syn_init = [0, # delta w + for n in range(SYNAPSES_PER_NEURON): + eprop_syn_init = [0, # delta w 0, # z_bar_inp - 0]#, # z_bar + 0,#, # z_bar # 0, # el_a # 0] # e_bar + 0, #int(numpy.random.rand()*1024) # update_ready + ] # extend to appropriate fan-in - values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) return values From 5023019c73f9e726a4c90ad9a28cc83956a59194 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 28 May 2020 16:50:04 +0100 Subject: [PATCH 054/123] updates to firing reg making it averaged over time, updates are staggered accumulated over 10 tests, updates to prints, removed unecessary rho calc and import, --- .../neuron_impl_eprop_adaptive.h | 44 ++++++++---- .../neuron_impl_left_right_readout.h | 20 +++++- .../models/neuron_model_eprop_adaptive_impl.c | 68 ++++++++++++++++--- .../models/neuron_model_eprop_adaptive_impl.h | 2 +- .../neuron_model_left_right_readout_impl.c | 9 +++ .../neuron_model_left_right_readout_impl.h | 1 + .../neuron_model_sinusoid_readout_impl.h | 1 + .../synapse_dynamics_eprop_adaptive_impl.c | 36 ++++++++-- ...synapse_dynamics_left_right_readout_impl.c | 34 ++++++++-- .../synapse_dynamics_sinusoid_readout_impl.c | 34 ++++++++-- .../neuron_model_eprop_adaptive.py | 27 +++----- .../neuron_model_left_right_readout.py | 46 +++++++++---- .../neuron_model_sinusoid_readout.py | 5 +- 13 files changed, 252 insertions(+), 75 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 77a514b69f2..6cf08957583 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -49,10 +49,10 @@ shaping include #endif - +extern uint32_t time; extern REAL learning_signal; -uint32_t neurons_in_pop; - +//uint32_t neurons_in_pop; +uint32_t syn_dynamics_neurons_in_partition; //! Array of neuron states neuron_pointer_t neuron_array; @@ -158,7 +158,7 @@ static void neuron_impl_load_neuron_parameters( log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); - neurons_in_pop = n_neurons; // get number of neurons running on this core for use during execution + syn_dynamics_neurons_in_partition = n_neurons; // get number of neurons running on this core for use during execution if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); @@ -208,10 +208,10 @@ static void neuron_impl_load_neuron_parameters( // ******** for eprop regularisation ************ // ********************************************** if (initial_regularise) { - global_parameters->core_target_rate = global_parameters->core_target_rate - * n_neurons; // scales target rate depending on number of neurons - global_parameters->core_pop_rate = global_parameters->core_pop_rate - * n_neurons; // scale initial value, too + global_parameters->core_target_rate = global_parameters->core_target_rate; +// * n_neurons; // scales target rate depending on number of neurons + global_parameters->core_pop_rate = 0.k;//global_parameters->core_pop_rate; +// * n_neurons; // scale initial value, too initial_regularise = false; } @@ -235,11 +235,11 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, input_t external_bias, state_t *recorded_variable_values) { - if (neuron_index == 0) { - // Decay global rate trace (only done once per core per timestep) - global_parameters->core_pop_rate = global_parameters->core_pop_rate - * global_parameters->rate_exp_TC; - } +// if (neuron_index == 0) { +// // Decay global rate trace (only done once per core per timestep) +// global_parameters->core_pop_rate = global_parameters->core_pop_rate +// * global_parameters->rate_exp_TC; +// } // Get the neuron itself @@ -320,11 +320,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // // learning_signal * neuron->w_fb; // } if(neuron_index > 3){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15].el_a; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15].el_a; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15].delta_w; } +// else if (neuron_index == 0){ +// } else{ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0].el_a; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0].el_a; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; } // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; @@ -334,6 +336,18 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, NUM_INHIBITORY_RECEPTORS, inh_input_values, external_bias, neuron, B_t); + REAL accum_time = (accum)(time%13000) * 0.001; + if (!accum_time){ + accum_time += 1.k; + } + REAL reg_learning_signal = (global_parameters->core_pop_rate +// / ((accum)(time%1300) +// / (1.225k + / (accum_time + * (accum)syn_dynamics_neurons_in_partition)) + - global_parameters->core_target_rate; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; + diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 0e709b1065c..7db707911df 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -58,6 +58,10 @@ extern key_t key; extern REAL learning_signal; static uint32_t target_ind = 0; +// recording prams +uint32_t is_it_right = 0; +//uint32_t choice = 0; + // Left right parameters typedef enum { @@ -338,6 +342,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, accumulative_direction = 0; // error params global_parameters->cross_entropy = 0.k; + learning_signal = 0.k; global_parameters->mean_0 = 0.k; global_parameters->mean_1 = 0.k; softmax_0 = 0k; @@ -457,11 +462,19 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, if (accumulative_direction > total_cues >> 1){ global_parameters->cross_entropy = -logk(softmax_1); learning_signal = softmax_0; + is_it_right = 1; } else{ global_parameters->cross_entropy = -logk(softmax_0); learning_signal = softmax_0 - 1.k; + is_it_right = 0; } +// if (softmax_0 > 0.5){ +// choice = 0; +// } +// else{ +// choice = 1; +// } while (!spin1_send_mc_packet( key | neuron_index, bitsk(learning_signal), 1 )) { spin1_delay_us(1); @@ -490,7 +503,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // learning_signal = global_parameters->cross_entropy; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->cross_entropy; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = learning_signal; recorded_variable_values[V_RECORDING_INDEX] = voltage; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = ; // if (neuron_index == 2){ @@ -503,16 +516,19 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = is_it_right; } else if (neuron_index == 1){ // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } else{ // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[1].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[1].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } // If spike occurs, communicate to relevant parts of model diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 3298cfe84b3..423e7f0770f 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -19,8 +19,12 @@ #include +REAL new_learning_signal; extern REAL learning_signal; REAL local_eta; +extern uint32_t time; +extern global_neuron_params_pointer_t global_parameters; +extern uint32_t syn_dynamics_neurons_in_partition; // simple Leaky I&F ODE static inline void lif_neuron_closed_form( @@ -100,13 +104,43 @@ state_t neuron_model_state_update( // neuron->psi = neuron->psi << 10; - REAL rho = neuron->rho;//expk(-1.k / 1500.k); // adpt -// REAL rho_2 = (accum)decay_s1615(1000.k, neuron->adpt); -// io_printf(IO_BUF, "1:%k, 2:%k, 3:%k\n", rho, rho_2, neuron->rho); - - - neuron->L = learning_signal * neuron->w_fb; +// REAL rho = neuron->rho;//expk(-1.k / 1500.k); // adpt + REAL rho = (accum)decay_s1615(1.k, neuron->e_to_dt_on_tau_a); +// REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); +// io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); + REAL accum_time = (accum)(time%13000) * 0.001; + if (!accum_time){ + accum_time += 1.k; + } +// io_printf(IO_BUF, "time = %u, mod = %u, accum = %k, /s:%k, rate:%k, accum t:%k\n", time, time%1300, (accum)(time%1300), +// (accum)(time%1300) * 0.001k, (accum)(time%1300) * 0.001k * (accum)syn_dynamics_neurons_in_partition, +// accum_time); + +// REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; + REAL reg_learning_signal = (global_parameters->core_pop_rate // make it work for different ts +// / ((accum)(time%1300) +// / (1.225k // 00000!!!!! + / (accum_time + * (accum)syn_dynamics_neurons_in_partition)) + - global_parameters->core_target_rate; +// io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); + if (time % 13000 == 12999 && global_parameters->core_pop_rate){ //hardcoded time of reset + new_learning_signal = 0.k; + io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\n", time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate); + global_parameters->core_pop_rate = 0.k; +// REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) +// / (accum)syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; +// io_printf(IO_BUF, "2 %u, rate at reset:%k, L:%k, rate:%k\n", time, reg_learning_signal, learning_signal, global_parameters->core_pop_rate); + } +// neuron->L = learning_signal * neuron->w_fb; +// if (learning_signal != 0.k && new_learning_signal != learning_signal){ + if (new_learning_signal != learning_signal){// && time%1300 > 1100){ +// io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); + learning_signal += reg_learning_signal;// * 0.1; + new_learning_signal = learning_signal; + } + neuron->L = learning_signal; // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ @@ -115,8 +149,8 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * -// + +// + (1 - neuron->exp_TC) * + + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update @@ -164,6 +198,15 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ +// io_printf(IO_BUF, "ff reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); + neuron->syn_state[syn_ind].update_ready -= 1; + } +// else{ +// io_printf(IO_BUF, "ff not reducing %u\n", syn_ind); +// } + // io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", // local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); @@ -224,6 +267,15 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ +// io_printf(IO_BUF, "recducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); + neuron->syn_state[syn_ind].update_ready -= 1; + } +// else{ +// io_printf(IO_BUF, "not recducing %u\n", syn_ind); +// } + // io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", // local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index b75eea84ce1..451ea9664ba 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -30,6 +30,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train REAL el_a; // adaptive component of eligibility vector REAL e_bar; // low-pass filtered eligibility trace + uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// @@ -82,7 +83,6 @@ typedef struct neuron_t { REAL L; // learning signal REAL w_fb; // feedback weight - REAL rho; // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index ef1c801ca7a..2b7ad22ef72 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -137,6 +137,15 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ +// io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); + neuron->syn_state[syn_ind].update_ready -= 1; + } +// else{ +// io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); +// } + } return neuron->V_membrane; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index cb22e535f3b..7a0a000dc66 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -13,6 +13,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train // REAL el_a; // adaptive component of eligibility vector // REAL e_bar; // low-pass filtered eligibility trace + uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index fa9684ca597..11ed38f80ae 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -13,6 +13,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train // REAL el_a; // adaptive component of eligibility vector // REAL e_bar; // low-pass filtered eligibility trace + uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 667161100e2..242892bc373 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -315,7 +315,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } // Calculate regularisation error - REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike + REAL reg_error = 0.k;//(global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); @@ -395,13 +395,37 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w, time); } - // Perform weight update: - // Go through typical weight update process to clip to limits - final_state_t final_state = eprop_plasticity_update(current_state, + + // Perform weight update: only if batch time has elapsed + final_state_t final_state; + if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + // enough time has elapsed - perform weight update + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready=0\n"); + } + + // Go through typical weight update process to clip to limits + final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); - // reset delta_w as weight change has now been applied - neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // reset update_ready counter based on pattern cycle time + neuron->syn_state[syn_ind_from_delay].update_ready += 13000; + + } else { + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready: %u/%u - no update performed\n", + neuron->syn_state[syn_ind_from_delay].update_ready, syn_ind_from_delay); + } + // don't update weight - get update state based on state cached in SDRAM + // assume reg rate is zero to avoid + + final_state = synapse_structure_get_final_state(current_state, 0); + // Don't reset delta_w -> keep this accumulating and apply weight change in future + } + // Add contribution to synaptic input // Convert into ring buffer offset diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index e322d9b1b7c..a41dfcf3cca 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -397,13 +397,37 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w, time); } - // Perform weight update: - // Go through typical weight update process to clip to limits - final_state_t final_state = eprop_plasticity_update(current_state, + // Perform weight update: only if batch time has elapsed + final_state_t final_state; + + if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + + // enough time has elapsed - perform weight update + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready=0\n"); + } + + // Go through typical weight update process to clip to limits + final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); - // reset delta_w as weight change has now been applied - neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // reset update_ready counter based on pattern cycle time + neuron->syn_state[syn_ind_from_delay].update_ready += 13000; + + } else { + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready: %u - no update performed\n", + neuron->syn_state[syn_ind_from_delay].update_ready); + } + // don't update weight - get update state based on state cached in SDRAM + // assume reg rate is zero to avoid + + final_state = synapse_structure_get_final_state(current_state, 0); + // Don't reset delta_w -> keep this accumulating and apply weight change in future + } // Add contribution to synaptic input // Convert into ring buffer offset diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index a0cfa729334..7dce10edf4d 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -397,13 +397,37 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w, time); } - // Perform weight update: - // Go through typical weight update process to clip to limits - final_state_t final_state = eprop_plasticity_update(current_state, + // Perform weight update: only if batch time has elapsed + final_state_t final_state; + + if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + + // enough time has elapsed - perform weight update + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready=0\n"); + } + + // Go through typical weight update process to clip to limits + final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); - // reset delta_w as weight change has now been applied - neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + // reset delta_w as weight change has now been applied + neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; + + // reset update_ready counter based on pattern cycle time + neuron->syn_state[syn_ind_from_delay].update_ready += 1024; + + } else { + if (PRINT_PLASTICITY){ + io_printf(IO_BUF, "update_ready: %u - no update performed\n", + neuron->syn_state[syn_ind_from_delay].update_ready); + } + // don't update weight - get update state based on state cached in SDRAM + // assume reg rate is zero to avoid + + final_state = synapse_structure_get_final_state(current_state, 0); + // Don't reset delta_w -> keep this accumulating and apply weight change in future + } // Add contribution to synaptic input // Convert into ring buffer offset diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index d1052db1972..f956ec0174a 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -46,13 +46,13 @@ # Learning signal L = "learning_signal" W_FB = "feedback_weight" -RHO = 'rho' DELTA_W = "delta_w" Z_BAR_OLD = "z_bar_old" Z_BAR = "z_bar" EP_A = "ep_a" E_BAR = "e_bar" +UPDATE_READY = "update_ready" UNITS = { V: 'mV', @@ -104,7 +104,6 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): # learning signal "__l", "__w_fb", - "__rho", "__eta" ] @@ -158,7 +157,6 @@ def __init__( # Learning signal DataType.S1615, # L DataType.S1615, # w_fb - DataType.S1615 # rho ] # Synapse states - always initialise to zero @@ -168,6 +166,7 @@ def __init__( DataType.S1615, # z_bar DataType.S1615, # ep_a DataType.S1615, # e_bar + DataType.UINT32 # update_ready ] # Extend to include fan-in for each neuron datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -208,7 +207,6 @@ def __init__( # learning signal self.__l = l self.__w_fb = w_fb - self.__rho = numpy.exp(-1. / tau_a) self.__eta = eta @@ -232,7 +230,6 @@ def add_parameters(self, parameters): parameters[BETA] = self.__beta parameters[SCALAR] = self.__scalar parameters[W_FB] = self.__w_fb - parameters[RHO] = self.__rho @overrides(AbstractNeuronModel.add_state_variables) @@ -254,6 +251,7 @@ def add_state_variables(self, state_variables): state_variables[Z_BAR+str(n)] = 0 state_variables[EP_A+str(n)] = 0 state_variables[E_BAR+str(n)] = 0 + state_variables[UPDATE_READY+str(n)] = 13000 @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -297,8 +295,7 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[SCALAR], state_variables[L], - parameters[W_FB], - parameters[RHO] + parameters[W_FB] ] # create synaptic state - init all state to zero @@ -307,7 +304,9 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): state_variables[Z_BAR_OLD+str(n)], state_variables[Z_BAR+str(n)], state_variables[EP_A+str(n)], - state_variables[E_BAR+str(n)]] + state_variables[E_BAR+str(n)], + state_variables[UPDATE_READY+str(n)] + ] # extend to appropriate fan-in values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) @@ -339,11 +338,12 @@ def update_values(self, values, parameters, state_variables): z_bar = [0] * SYNAPSES_PER_NEURON ep_a = [0] * SYNAPSES_PER_NEURON e_bar = [0] * SYNAPSES_PER_NEURON + update_ready = [0] * SYNAPSES_PER_NEURON # Read the data (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb, rho, delta_w, z_bar_old, z_bar, ep_a, e_bar) = values + l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! # (Note that this function is only called if you do e.g. run(), set(), @@ -367,6 +367,7 @@ def update_values(self, values, parameters, state_variables): state_variables[Z_BAR+str(n)] = z_bar[n] state_variables[EP_A+str(n)] = ep_a[n] state_variables[E_BAR+str(n)] = e_bar[n] + state_variables[UPDATE_READY] = update_ready[n] @property @@ -472,11 +473,3 @@ def w_fb(self): @w_fb.setter def w_fb(self, new_value): self.__w_fb = new_value - - @property - def rho(self): - return self.__w_fb - - @rho.setter - def rho(self, new_value): - self.__rho = new_value diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 49d7e7c292e..7b31fc094a2 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -4,12 +4,7 @@ from pacman.executor.injection_decorator import inject_items from .abstract_neuron_model import AbstractNeuronModel -# from pacman.model.graphs.application.application_vertex import ApplicationVertex -# from spinn_front_end_common.abstract_models.abstract_provides_n_keys_for_partition import AbstractProvidesNKeysForPartition -# from spynnaker.pyNN.models.neuron import AbstractPopulationVertex -# from spynnaker.pyNN.models.neuron.implementations import NeuronImplStandard - - +# constants SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state MICROSECONDS_PER_SECOND = 1000000.0 MICROSECONDS_PER_MILLISECOND = 1000.0 @@ -40,6 +35,13 @@ RATE_OFF = "rate_off" POISSON_POP_SIZE = 'poisson_pop_size' +DELTA_W = "delta_w" +Z_BAR_OLD = "z_bar_old" +Z_BAR = "z_bar" +# EP_A = "ep_a" +# E_BAR = "e_bar" +UPDATE_READY = "update_ready" + UNITS = { V: 'mV', V_REST: 'mV', @@ -112,13 +114,8 @@ def __init__( DataType.INT32, # count_refrac DataType.S1615, # v_reset DataType.INT32, # tau_refrac - #### Poisson Compartment Params #### - # DataType.S1615, # REAL mean_isi_ticks - # DataType.S1615, # REAL time_to_spike_ticks - # DataType.INT32, # int32_t time_since_last_spike s - # DataType.S1615, # REAL rate_at_last_setting; s - # DataType.S1615 # REAL rate_update_threshold; p - DataType.S1615, # learning signal + # Learning signal + DataType.S1615, # L DataType.S1615 # w_fb ] @@ -129,6 +126,7 @@ def __init__( DataType.S1615, # z_bar # DataType.S1615, # ep_a # DataType.S1615, # e_bar + DataType.UINT32 # update_ready ] # Extend to include fan-in for each neuron data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -212,6 +210,14 @@ def add_state_variables(self, state_variables): # state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike # state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting + for n in range(SYNAPSES_PER_NEURON): + state_variables[DELTA_W+str(n)] = 0 + state_variables[Z_BAR_OLD+str(n)] = 0 + state_variables[Z_BAR+str(n)] = 0 + # state_variables[EP_A+str(n)] = 0 + # state_variables[E_BAR+str(n)] = 0 + state_variables[UPDATE_READY+str(n)] = 13000 + @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): @@ -243,9 +249,11 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): # create synaptic state - init all state to zero eprop_syn_init = [0, # delta w 0, # z_bar_inp - 0]#, # z_bar + 0,#, # z_bar # 0, # el_a # 0] # e_bar + 13000, #int(numpy.random.rand()*1024) # update_ready + ] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) @@ -257,7 +265,7 @@ def update_values(self, values, parameters, state_variables): # Read the data (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, _v_reset, _tau_refrac, - _l, _w_fb) = values # Not sure this will work with the new array of synapse!!! + _l, _w_fb, delta_w, z_bar_old, z_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! # todo check alignment on this # Copy the changed data only @@ -265,6 +273,14 @@ def update_values(self, values, parameters, state_variables): state_variables[L] = _l + for n in range(SYNAPSES_PER_NEURON): + state_variables[DELTA_W+str(n)] = delta_w[n] + state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] + state_variables[Z_BAR+str(n)] = z_bar[n] + # state_variables[EP_A+str(n)] = ep_a[n] + # state_variables[E_BAR+str(n)] = e_bar[n] + state_variables[UPDATE_READY] = update_ready[n] + # Global params @inject_items({"machine_time_step": "MachineTimeStep"}) @overrides(AbstractNeuronModel.get_global_values, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 852de2e97b8..945216c5f0a 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -91,6 +91,7 @@ def __init__( DataType.S1615, # z_bar # DataType.S1615, # ep_a # DataType.S1615, # e_bar + DataType.UINT32 # update_ready ] # Extend to include fan-in for each neuron data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -182,9 +183,11 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): # create synaptic state - init all state to zero eprop_syn_init = [0, # delta w 0, # z_bar_inp - 0]#, # z_bar + 0,#, # z_bar # 0, # el_a # 0] # e_bar + 0, #int(numpy.random.rand()*1024) # update_ready + ] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) From 731af5d3514197fe2b27657e5ef49344b600224d Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 1 Jun 2020 19:32:49 +0100 Subject: [PATCH 055/123] changes to recording, left right always alternated, reg error not included if small enough, learning signal not included after waiting an --- .../neuron_impl_eprop_adaptive.h | 18 +++++++------- .../neuron_impl_left_right_readout.h | 17 ++++++------- .../models/neuron_model_eprop_adaptive_impl.c | 24 ++++++++++++++----- 3 files changed, 36 insertions(+), 23 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 6cf08957583..dd480278ddb 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -320,13 +320,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // // learning_signal * neuron->w_fb; // } if(neuron_index > 3){ -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15].el_a; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15].el_a; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15].delta_w; } // else if (neuron_index == 0){ // } else{ -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0].el_a; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0].el_a; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; } // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; @@ -340,13 +340,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, if (!accum_time){ accum_time += 1.k; } - REAL reg_learning_signal = (global_parameters->core_pop_rate -// / ((accum)(time%1300) -// / (1.225k - / (accum_time - * (accum)syn_dynamics_neurons_in_partition)) - - global_parameters->core_target_rate; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; +// REAL reg_learning_signal = (global_parameters->core_pop_rate +//// / ((accum)(time%1300) +//// / (1.225k +// / (accum_time +// * (accum)syn_dynamics_neurons_in_partition)) +// - global_parameters->core_target_rate; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 7db707911df..ba024a2cc7a 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -365,13 +365,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // pick broadcast if just entered if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ // pick new value and broadcast - REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 - if (random_value < 0.5k){ - current_cue_direction = 0; - } - else{ - current_cue_direction = 1; - } +// REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 +// if (random_value < 0.5k){ +// current_cue_direction = 0; +// } +// else{ +// current_cue_direction = 1; +// } + current_cue_direction = (current_cue_direction + 1) % 2; accumulative_direction += current_cue_direction; REAL payload; payload = global_parameters->rate_on; @@ -512,7 +513,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // else { // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; // } - if (neuron_index == 2){ + if (neuron_index == 2){ //this neuron does nothing // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].delta_w; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 423e7f0770f..50b8a7c66d2 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -19,6 +19,7 @@ #include +bool printed_value = false; REAL new_learning_signal; extern REAL learning_signal; REAL local_eta; @@ -125,22 +126,33 @@ state_t neuron_model_state_update( * (accum)syn_dynamics_neurons_in_partition)) - global_parameters->core_target_rate; // io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); - if (time % 13000 == 12999 && global_parameters->core_pop_rate){ //hardcoded time of reset - new_learning_signal = 0.k; + if (time % 13000 == 12999 & !printed_value){ //hardcoded time of reset io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\n", time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate); - global_parameters->core_pop_rate = 0.k; +// global_parameters->core_pop_rate = 0.k; // REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) // / (accum)syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; // io_printf(IO_BUF, "2 %u, rate at reset:%k, L:%k, rate:%k\n", time, reg_learning_signal, learning_signal, global_parameters->core_pop_rate); + printed_value = true; + } + if (time % 13000 == 0){ + new_learning_signal = 0.k; + global_parameters->core_pop_rate = 0.k; + printed_value = false; } // neuron->L = learning_signal * neuron->w_fb; // if (learning_signal != 0.k && new_learning_signal != learning_signal){ - if (new_learning_signal != learning_signal){// && time%1300 > 1100){ + if (new_learning_signal != learning_signal && abs(reg_learning_signal) > 0.5){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); - learning_signal += reg_learning_signal;// * 0.1; + learning_signal += reg_learning_signal * 0.1; new_learning_signal = learning_signal; } - neuron->L = learning_signal; +// neuron->L = learning_signal; + if (time % 13000 > 1300){ + neuron->L = new_learning_signal; + } + else{ + neuron->L = 0.k; + } // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ From 2f256b98e1812c80e218a957407a5a2dc7a85b15 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 2 Jun 2020 13:24:46 +0100 Subject: [PATCH 056/123] removed abs and switched to accum to be sure --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 50b8a7c66d2..fb73f9a68d4 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -110,7 +110,7 @@ state_t neuron_model_state_update( // REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); // io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); - REAL accum_time = (accum)(time%13000) * 0.001; + REAL accum_time = (accum)(time%13000) * 0.001k; if (!accum_time){ accum_time += 1.k; } @@ -141,9 +141,9 @@ state_t neuron_model_state_update( } // neuron->L = learning_signal * neuron->w_fb; // if (learning_signal != 0.k && new_learning_signal != learning_signal){ - if (new_learning_signal != learning_signal && abs(reg_learning_signal) > 0.5){// && time%1300 > 1100){ + if (new_learning_signal != learning_signal && (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); - learning_signal += reg_learning_signal * 0.1; + learning_signal += reg_learning_signal * 0.1k; new_learning_signal = learning_signal; } // neuron->L = learning_signal; From ae86f7bda66b2cc933b39634f28b37dcafc29e07 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 2 Jun 2020 13:49:13 +0100 Subject: [PATCH 057/123] hot fix --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index fb73f9a68d4..d0fd5bf6159 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -141,7 +141,7 @@ state_t neuron_model_state_update( } // neuron->L = learning_signal * neuron->w_fb; // if (learning_signal != 0.k && new_learning_signal != learning_signal){ - if (new_learning_signal != learning_signal && (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){// && time%1300 > 1100){ + if (new_learning_signal != learning_signal && (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k)){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); learning_signal += reg_learning_signal * 0.1k; new_learning_signal = learning_signal; From 5a11a6a5fea0a545730a43d2cadc2d84b9ab36cd Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 2 Jun 2020 17:10:48 +0100 Subject: [PATCH 058/123] reset the synapse params between tests, window size a single variable except for synapse dynamics, changes to recording --- .../neuron_impl_eprop_adaptive.h | 8 ++++---- .../models/neuron_model_eprop_adaptive_impl.c | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index dd480278ddb..253ade1bf3b 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -320,14 +320,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // // learning_signal * neuron->w_fb; // } if(neuron_index > 3){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15].el_a; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15].delta_w; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].el_a; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].delta_w; } // else if (neuron_index == 0){ // } else{ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0].el_a; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; } // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; // update neuron parameters diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index d0fd5bf6159..f4c2013cf77 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -26,6 +26,7 @@ REAL local_eta; extern uint32_t time; extern global_neuron_params_pointer_t global_parameters; extern uint32_t syn_dynamics_neurons_in_partition; +uint32_t window_size = 13000; // simple Leaky I&F ODE static inline void lif_neuron_closed_form( @@ -110,7 +111,7 @@ state_t neuron_model_state_update( // REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); // io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); - REAL accum_time = (accum)(time%13000) * 0.001k; + REAL accum_time = (accum)(time%window_size) * 0.001k; if (!accum_time){ accum_time += 1.k; } @@ -126,7 +127,7 @@ state_t neuron_model_state_update( * (accum)syn_dynamics_neurons_in_partition)) - global_parameters->core_target_rate; // io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); - if (time % 13000 == 12999 & !printed_value){ //hardcoded time of reset + if (time % window_size == window_size - 1 & !printed_value){ //hardcoded time of reset io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\n", time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate); // global_parameters->core_pop_rate = 0.k; // REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) @@ -134,7 +135,7 @@ state_t neuron_model_state_update( // io_printf(IO_BUF, "2 %u, rate at reset:%k, L:%k, rate:%k\n", time, reg_learning_signal, learning_signal, global_parameters->core_pop_rate); printed_value = true; } - if (time % 13000 == 0){ + if (time % window_size == 0){ new_learning_signal = 0.k; global_parameters->core_pop_rate = 0.k; printed_value = false; @@ -147,15 +148,21 @@ state_t neuron_model_state_update( new_learning_signal = learning_signal; } // neuron->L = learning_signal; - if (time % 13000 > 1300){ + if (time % window_size > 1300){ neuron->L = new_learning_signal; } else{ - neuron->L = 0.k; + neuron->L = learning_signal;//0.k; } // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ + if (time % 1300 == 0){ + neuron->syn_state[syn_ind].z_bar_inp = 0.k; + neuron->syn_state[syn_ind].z_bar = 0.k; + neuron->syn_state[syn_ind].el_a = 0.k; + neuron->syn_state[syn_ind].e_bar = 0.k; + } // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** From 4faf879195a13d017e4ef50507552f34b9f93db4 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 9 Jun 2020 13:08:30 +0100 Subject: [PATCH 059/123] added voltage regularisation, neuron/synapse dynamics reset between runs, merged master, fixed bug causing 1 time step to be skipped --- .../neuron_impl_eprop_adaptive.h | 5 +- .../neuron_impl_left_right_readout.h | 34 ++++++------ .../models/neuron_model_eprop_adaptive_impl.c | 52 +++++++++++++++---- .../neuron_model_left_right_readout_impl.c | 13 +++++ .../neuron_model_sinusoid_readout_impl.c | 6 +++ 5 files changed, 84 insertions(+), 26 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 253ade1bf3b..8b8ff08653e 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -321,15 +321,16 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // } if(neuron_index > 3){ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].el_a; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].delta_w; } // else if (neuron_index == 0){ // } else{ recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; } // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index ba024a2cc7a..f41f902987b 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -335,6 +335,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // } // io_printf(IO_BUF, "state = %u - %u\n", current_state, time); if (cue_number == 0 && completed_broadcast){ // reset start of new test + io_printf(IO_BUF, "time entering reset %u\n", time); // io_printf(IO_BUF, "Resetting\n"); completed_broadcast = false; current_time = time; @@ -355,6 +356,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // io_printf(IO_BUF, "current_state = %u, cue_number = %u, direction = %u, time = %u\n", current_state, cue_number, current_cue_direction, time); // In this state the environment is giving the left/right cues to the agent if (current_state == STATE_CUE){ +// io_printf(IO_BUF, "time entering cue %u\n", time); if (neuron_index == 0){ // if it's current in the waiting time between cues do nothing // if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ @@ -382,24 +384,25 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); } } - // turn off and reset if finished - else if ((time - current_time) % (wait_between_cues + duration_of_cue) == (wait_between_cues + duration_of_cue) - 1){ - cue_number += 1; - REAL payload; - payload = global_parameters->rate_off; + } + // turn off and reset if finished + else if ((time - current_time) % (wait_between_cues + duration_of_cue) == 0 && (time - current_time) > 0){//(wait_between_cues + duration_of_cue) - 1){ + cue_number += 1; + REAL payload; + payload = global_parameters->rate_off; // io_printf(IO_BUF, "poisson setting 2, direction = %u\n", current_cue_direction); - for (int j = current_cue_direction*global_parameters->p_pop_size; - j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); - } - if (cue_number >= total_cues){ - current_state = (current_state + 1) % 3; - } + for (int j = current_cue_direction*global_parameters->p_pop_size; + j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ + spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); + } + if (cue_number >= total_cues){ + current_state = (current_state + 1) % 3; } } } } else if (current_state == STATE_WAITING){ +// io_printf(IO_BUF, "time entering wait %u\n", time); // waiting for prompt, all things ok if (cue_number >= total_cues){ current_time = time; @@ -411,7 +414,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } } else if (current_state == STATE_PROMPT){ -// io_printf(IO_BUF, "ticks_for_mean = %u, n_idx = %u\n", ticks_for_mean, neuron_index); +// io_printf(IO_BUF, "time entering prompt %u\n", time); if (start_prompt && neuron_index == 1){ current_time = time; // send packets to the variable poissons with the updated states @@ -488,6 +491,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // io_printf(IO_BUF, "broadcasting error\n"); } if ((time - current_time) >= prompt_duration && neuron_index == 0){ +// io_printf(IO_BUF, "time entering end of test %u\n", time); // io_printf(IO_BUF, "poisson setting 4, turning off prompt\n"); current_state = 0; completed_broadcast = true; @@ -522,13 +526,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, else if (neuron_index == 1){ // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].delta_w; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } else{ // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[1].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index f4c2013cf77..63307ae68eb 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -20,6 +20,7 @@ #include bool printed_value = false; +REAL v_mem_error; REAL new_learning_signal; extern REAL learning_signal; REAL local_eta; @@ -119,6 +120,19 @@ state_t neuron_model_state_update( // (accum)(time%1300) * 0.001k, (accum)(time%1300) * 0.001k * (accum)syn_dynamics_neurons_in_partition, // accum_time); + if (neuron->V_membrane > neuron->B){ + v_mem_error = neuron->V_membrane - neuron->B; +// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); + } + else if (neuron->V_membrane < -neuron->B){ + v_mem_error = neuron->V_membrane + neuron->B; +// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); + } + else{ + v_mem_error = 0.k; + } + learning_signal += v_mem_error; + // REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; REAL reg_learning_signal = (global_parameters->core_pop_rate // make it work for different ts // / ((accum)(time%1300) @@ -128,7 +142,9 @@ state_t neuron_model_state_update( - global_parameters->core_target_rate; // io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); if (time % window_size == window_size - 1 & !printed_value){ //hardcoded time of reset - io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\n", time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate); + io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\tL:%k, v_mem:%k\n", + time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate, + learning_signal-v_mem_error, v_mem_error); // global_parameters->core_pop_rate = 0.k; // REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) // / (accum)syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; @@ -136,25 +152,37 @@ state_t neuron_model_state_update( printed_value = true; } if (time % window_size == 0){ - new_learning_signal = 0.k; +// new_learning_signal = 0.k; global_parameters->core_pop_rate = 0.k; printed_value = false; } // neuron->L = learning_signal * neuron->w_fb; + learning_signal *= neuron->w_fb; // if (learning_signal != 0.k && new_learning_signal != learning_signal){ - if (new_learning_signal != learning_signal && (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k)){// && time%1300 > 1100){ +// if (new_learning_signal != learning_signal){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); - learning_signal += reg_learning_signal * 0.1k; - new_learning_signal = learning_signal; - } +// if (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){ + new_learning_signal = learning_signal + (reg_learning_signal * 0.1k); +// } +// new_learning_signal = learning_signal; +// } // neuron->L = learning_signal; if (time % window_size > 1300){ neuron->L = new_learning_signal; } else{ - neuron->L = learning_signal;//0.k; + neuron->L = learning_signal; } +// if (time % 99 == 0){ +// io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); +// } + if (time % 1300 == 0){ +// io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); + neuron->B = 10.k; + neuron->b = 0.k; +// io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); + } // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ if (time % 1300 == 0){ @@ -196,8 +224,8 @@ state_t neuron_model_state_update( // Update cached total weight change // ****************************************************************** REAL this_dt_weight_change = - -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; + local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) // if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ // io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " @@ -234,6 +262,12 @@ state_t neuron_model_state_update( // All operations now need doing once per recurrent eprop synapse for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ + if (time % 1300 == 0){ + neuron->syn_state[syn_ind].z_bar_inp = 0.k; + neuron->syn_state[syn_ind].z_bar = 0.k; + neuron->syn_state[syn_ind].el_a = 0.k; + neuron->syn_state[syn_ind].e_bar = 0.k; + } // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 2b7ad22ef72..26f3e8d584f 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -5,6 +5,7 @@ extern uint32_t time; extern REAL learning_signal; REAL local_eta; +REAL v_mem_error; // simple Leaky I&F ODE static inline void _lif_neuron_closed_form( @@ -78,6 +79,18 @@ state_t neuron_model_state_update( // if(learning_signal){ // io_printf(IO_BUF, "learning signal = %k\n", learning_signal); // } +// if (neuron->V_membrane > 10.k){ +// v_mem_error = neuron->V_membrane - 10.k; +//// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); +// } +// else if (neuron->V_membrane < -10.k){ +// v_mem_error = neuron->V_membrane + 10.k; +//// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); +// } +// else{ +// v_mem_error = 0.k; +// } +// learning_signal += v_mem_error * 0.1; neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); // REAL tau_decay = expk(-1.k / 1500.k); diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index 5195f62e46c..bce4f8d6a01 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -121,6 +121,12 @@ state_t neuron_model_state_update( // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; + + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ + neuron->syn_state[syn_ind].update_ready -= 1; + } + } return neuron->V_membrane; From dd2a207c506f7184d9075416960a9a28b53964c3 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 11 Jun 2020 14:20:40 +0100 Subject: [PATCH 060/123] window size is variable, reset now includes membrane and refract timer --- .../models/neuron_model_eprop_adaptive_impl.c | 16 ++++++---- .../models/neuron_model_eprop_adaptive_impl.h | 1 + .../neuron_model_left_right_readout_impl.c | 2 ++ .../neuron_model_left_right_readout_impl.h | 1 + .../synapse_dynamics_eprop_adaptive_impl.c | 4 +-- ...synapse_dynamics_left_right_readout_impl.c | 4 +-- .../models/neuron/builds/eprop_adaptive.py | 6 ++-- .../neuron/builds/left_right_readout.py | 4 +-- .../neuron_model_eprop_adaptive.py | 24 +++++++++++---- .../neuron_model_left_right_readout.py | 30 ++++++++++++++++--- 10 files changed, 68 insertions(+), 24 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 63307ae68eb..195e671c288 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -27,7 +27,7 @@ REAL local_eta; extern uint32_t time; extern global_neuron_params_pointer_t global_parameters; extern uint32_t syn_dynamics_neurons_in_partition; -uint32_t window_size = 13000; +//uint32_t window_size = 13000; // simple Leaky I&F ODE static inline void lif_neuron_closed_form( @@ -112,7 +112,7 @@ state_t neuron_model_state_update( // REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); // io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); - REAL accum_time = (accum)(time%window_size) * 0.001k; + REAL accum_time = (accum)(time%neuron->window_size) * 0.001k; if (!accum_time){ accum_time += 1.k; } @@ -141,7 +141,7 @@ state_t neuron_model_state_update( * (accum)syn_dynamics_neurons_in_partition)) - global_parameters->core_target_rate; // io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); - if (time % window_size == window_size - 1 & !printed_value){ //hardcoded time of reset + if (time % neuron->window_size == neuron->window_size - 1 & !printed_value){ //hardcoded time of reset io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\tL:%k, v_mem:%k\n", time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate, learning_signal-v_mem_error, v_mem_error); @@ -151,7 +151,7 @@ state_t neuron_model_state_update( // io_printf(IO_BUF, "2 %u, rate at reset:%k, L:%k, rate:%k\n", time, reg_learning_signal, learning_signal, global_parameters->core_pop_rate); printed_value = true; } - if (time % window_size == 0){ + if (time % neuron->window_size == 0){ // new_learning_signal = 0.k; global_parameters->core_pop_rate = 0.k; printed_value = false; @@ -162,12 +162,12 @@ state_t neuron_model_state_update( // if (new_learning_signal != learning_signal){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); // if (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){ - new_learning_signal = learning_signal + (reg_learning_signal * 0.1k); + new_learning_signal = learning_signal + (reg_learning_signal);// * 0.1k); // } // new_learning_signal = learning_signal; // } // neuron->L = learning_signal; - if (time % window_size > 1300){ + if (time % neuron->window_size > 1300 * 2){ neuron->L = new_learning_signal; } else{ @@ -181,6 +181,8 @@ state_t neuron_model_state_update( // io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); neuron->B = 10.k; neuron->b = 0.k; + neuron->V_membrane = neuron->V_rest; + neuron->refract_timer = 0; // io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); } // All operations now need doing once per eprop synapse @@ -378,6 +380,8 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); + io_printf(IO_BUF, "window size = %u ts\n\n", neuron->window_size); + io_printf(IO_BUF, "e_to_dt_on_tau_a = %k n/a\n\n", neuron->e_to_dt_on_tau_a); io_printf(IO_BUF, "adpt = %k n/a\n\n", neuron->adpt); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 451ea9664ba..7d6ee517aa1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -83,6 +83,7 @@ typedef struct neuron_t { REAL L; // learning signal REAL w_fb; // feedback weight + uint32_t window_size; // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 26f3e8d584f..491cadcfc61 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -196,6 +196,8 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); + io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->window_size); + // io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); // io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); // io_printf(IO_BUF, "time_to_spike_ticks = %k \n", diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 7a0a000dc66..e963799f083 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -56,6 +56,7 @@ typedef struct neuron_t { REAL L; // learning signal REAL w_fb; // feedback weight + uint32_t window_size; // array of synaptic states - peak fan-in of >250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 242892bc373..d62d0da81e5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -398,7 +398,7 @@ bool synapse_dynamics_process_plastic_synapses( // Perform weight update: only if batch time has elapsed final_state_t final_state; - if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + if (neuron->syn_state[syn_ind_from_delay].update_ready <= 0){ // enough time has elapsed - perform weight update if (PRINT_PLASTICITY){ io_printf(IO_BUF, "update_ready=0\n"); @@ -412,7 +412,7 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; // reset update_ready counter based on pattern cycle time - neuron->syn_state[syn_ind_from_delay].update_ready += 13000; + neuron->syn_state[syn_ind_from_delay].update_ready += neuron->window_size; } else { if (PRINT_PLASTICITY){ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index a41dfcf3cca..a68f9148304 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -400,7 +400,7 @@ bool synapse_dynamics_process_plastic_synapses( // Perform weight update: only if batch time has elapsed final_state_t final_state; - if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + if (neuron->syn_state[syn_ind_from_delay].update_ready <= 0){ // enough time has elapsed - perform weight update if (PRINT_PLASTICITY){ @@ -415,7 +415,7 @@ bool synapse_dynamics_process_plastic_synapses( neuron->syn_state[syn_ind_from_delay].delta_w = 0.0k; // reset update_ready counter based on pattern cycle time - neuron->syn_state[syn_ind_from_delay].update_ready += 13000; + neuron->syn_state[syn_ind_from_delay].update_ready += neuron->window_size; } else { if (PRINT_PLASTICITY){ diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index e3c79e79a27..da783920f19 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -30,7 +30,7 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", "psi", "target_rate", "tau_err", "B", "small_b", - "l", "w_fb", "eta" + "l", "w_fb", "eta", "window_size" }) def __init__( self, @@ -49,7 +49,7 @@ def __init__( B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0 + l=0, w_fb=0.5, eta=1.0, window_size=13000 ): # pylint: disable=too-many-arguments, too-many-locals @@ -64,7 +64,7 @@ def __init__( # Regularisation params target_rate, tau_err, # Learning signal params - l, w_fb, eta + l, w_fb, eta, window_size ) synapse_type = SynapseTypeEPropAdaptive( diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index 3c05a559744..51453a096e0 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -25,7 +25,7 @@ def __init__( rate_on=40, rate_off=0, poisson_pop_size=10, # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0): + l=0, w_fb=0.5, eta=1.0, window_size=13000): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeftRightReadout( @@ -36,7 +36,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta) + rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index f956ec0174a..034af05c2de 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -46,6 +46,7 @@ # Learning signal L = "learning_signal" W_FB = "feedback_weight" +WINDOW_SIZE = "window_size" DELTA_W = "delta_w" Z_BAR_OLD = "z_bar_old" @@ -104,7 +105,8 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): # learning signal "__l", "__w_fb", - "__eta" + "__eta", + "__window_size" ] def __init__( @@ -130,7 +132,8 @@ def __init__( tau_err, l, w_fb, - eta + eta, + window_size ): datatype_list = [ @@ -157,6 +160,7 @@ def __init__( # Learning signal DataType.S1615, # L DataType.S1615, # w_fb + DataType.UINT32 # window_size ] # Synapse states - always initialise to zero @@ -207,8 +211,8 @@ def __init__( # learning signal self.__l = l self.__w_fb = w_fb - self.__eta = eta + self.__window_size = window_size @overrides(AbstractNeuronModel.get_n_cpu_cycles) @@ -230,6 +234,7 @@ def add_parameters(self, parameters): parameters[BETA] = self.__beta parameters[SCALAR] = self.__scalar parameters[W_FB] = self.__w_fb + parameters[WINDOW_SIZE] = self.__window_size @overrides(AbstractNeuronModel.add_state_variables) @@ -295,7 +300,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): parameters[SCALAR], state_variables[L], - parameters[W_FB] + parameters[W_FB], + parameters[WINDOW_SIZE] ] # create synaptic state - init all state to zero @@ -343,7 +349,7 @@ def update_values(self, values, parameters, state_variables): (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values + l, __w_fb, window_size, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! # (Note that this function is only called if you do e.g. run(), set(), @@ -473,3 +479,11 @@ def w_fb(self): @w_fb.setter def w_fb(self, new_value): self.__w_fb = new_value + + @property + def window_size(self): + return self.__window_size + + @window_size.setter + def window_size(self, new_value): + self.__window_size = new_value diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 7b31fc094a2..66c3d694c17 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -19,6 +19,7 @@ # Learning signal L = "learning_signal" W_FB = "feedback_weight" +WINDOW_SIZE = "window_size" MEAN_ISI_TICKS = "mean_isi_ticks" TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" @@ -72,6 +73,7 @@ class NeuronModelLeftRightReadout(AbstractNeuronModel): "_rate_on", "_l", "_w_fb", + "_window_size", "_eta", "_mean_l", "_mean_r", @@ -86,7 +88,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta): + rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size): global_data_types = [ DataType.UINT32, # MARS KISS seed @@ -116,7 +118,8 @@ def __init__( DataType.INT32, # tau_refrac # Learning signal DataType.S1615, # L - DataType.S1615 # w_fb + DataType.S1615, # w_fb + DataType.UINT32 # window_size ] # Synapse states - always initialise to zero @@ -163,6 +166,7 @@ def __init__( self._l = l self._w_fb = w_fb self._eta = eta + self._window_size = window_size self._n_keys_in_target = poisson_pop_size * 4 @@ -184,6 +188,7 @@ def add_parameters(self, parameters): parameters[TAU_REFRAC] = self._tau_refrac parameters[L] = self._l parameters[W_FB] = self._w_fb + parameters[WINDOW_SIZE] = self._window_size parameters[SEED1] = 10065 parameters[SEED2] = 232 parameters[SEED3] = 3634 @@ -243,7 +248,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), state_variables[L], - parameters[W_FB] + parameters[W_FB], + parameters[WINDOW_SIZE] ] # create synaptic state - init all state to zero @@ -265,7 +271,7 @@ def update_values(self, values, parameters, state_variables): # Read the data (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, _v_reset, _tau_refrac, - _l, _w_fb, delta_w, z_bar_old, z_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! + _l, _w_fb, window_size, delta_w, z_bar_old, z_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! # todo check alignment on this # Copy the changed data only @@ -386,6 +392,22 @@ def tau_refrac(self): def tau_refrac(self, tau_refrac): self._tau_refrac = tau_refrac + @property + def w_fb(self): + return self._w_fb + + @w_fb.setter + def w_fb(self, new_value): + self._w_fb = new_value + + @property + def window_size(self): + return self._window_size + + @window_size.setter + def window_size(self, new_value): + self._window_size = new_value + # @property # def mean_isi_ticks(self): # return self._mean_isi_ticks From 9cf2d986266305617499ffe7f52361f5723f95ab Mon Sep 17 00:00:00 2001 From: such-a-git Date: Wed, 17 Jun 2020 16:01:29 +0100 Subject: [PATCH 061/123] saturation check and fix added, removed reg for testing, update ready synchronised, window size bug --- .../neuron_impl_left_right_readout.h | 10 +++++--- .../models/neuron_model_eprop_adaptive_impl.c | 25 ++++++++++--------- .../models/neuron_model_eprop_adaptive_impl.h | 2 +- .../neuron_model_left_right_readout_impl.c | 10 +++++--- .../neuron_model_left_right_readout_impl.h | 2 +- ...synapse_dynamics_left_right_readout_impl.c | 17 +++++++++++-- .../neuron_model_eprop_adaptive.py | 4 +-- .../neuron_model_left_right_readout.py | 6 ++--- 8 files changed, 47 insertions(+), 29 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index f41f902987b..244172f4604 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -308,6 +308,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, additional_input, voltage); if (neuron_index == 0){ +// io_printf(IO_BUF, "n0 - "); // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, @@ -317,13 +318,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, global_parameters->readout_V_0 = result; } else if (neuron_index == 1){ +// io_printf(IO_BUF, "n1 - "); // update neuron parameters - learning_signal *= -1.k; +// learning_signal *= -1.k; state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, external_bias, neuron, -50k); - learning_signal *= -1.k; +// learning_signal *= -1.k; // Finally, set global membrane potential to updated value global_parameters->readout_V_1 = result; } @@ -335,7 +337,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // } // io_printf(IO_BUF, "state = %u - %u\n", current_state, time); if (cue_number == 0 && completed_broadcast){ // reset start of new test - io_printf(IO_BUF, "time entering reset %u\n", time); +// io_printf(IO_BUF, "time entering reset %u\n", time); // io_printf(IO_BUF, "Resetting\n"); completed_broadcast = false; current_time = time; @@ -508,7 +510,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // learning_signal = global_parameters->cross_entropy; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = learning_signal; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = learning_signal;//exc_input_values[0];//neuron->syn_state[1].update_ready;// recorded_variable_values[V_RECORDING_INDEX] = voltage; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = ; // if (neuron_index == 2){ diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 195e671c288..84657d38b86 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -131,7 +131,7 @@ state_t neuron_model_state_update( else{ v_mem_error = 0.k; } - learning_signal += v_mem_error; +// learning_signal += v_mem_error; // REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; REAL reg_learning_signal = (global_parameters->core_pop_rate // make it work for different ts @@ -162,7 +162,7 @@ state_t neuron_model_state_update( // if (new_learning_signal != learning_signal){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); // if (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){ - new_learning_signal = learning_signal + (reg_learning_signal);// * 0.1k); + new_learning_signal = learning_signal;// + (reg_learning_signal);// * 0.1k); // } // new_learning_signal = learning_signal; // } @@ -185,6 +185,7 @@ state_t neuron_model_state_update( neuron->refract_timer = 0; // io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); } +// io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ if (time % 1300 == 0){ @@ -248,10 +249,10 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar_inp = 0; // decrease timestep counter preventing rapid updates - if (neuron->syn_state[syn_ind].update_ready > 0){ +// if (neuron->syn_state[syn_ind].update_ready > 0){ // io_printf(IO_BUF, "ff reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); - neuron->syn_state[syn_ind].update_ready -= 1; - } + neuron->syn_state[syn_ind].update_ready -= 1; +// } // else{ // io_printf(IO_BUF, "ff not reducing %u\n", syn_ind); // } @@ -323,10 +324,10 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar_inp = 0; // decrease timestep counter preventing rapid updates - if (neuron->syn_state[syn_ind].update_ready > 0){ +// if (neuron->syn_state[syn_ind].update_ready > 0){ // io_printf(IO_BUF, "recducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); - neuron->syn_state[syn_ind].update_ready -= 1; - } + neuron->syn_state[syn_ind].update_ready -= 1; +// } // else{ // io_printf(IO_BUF, "not recducing %u\n", syn_ind); // } @@ -366,7 +367,7 @@ void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { } void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { - io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); + io_printf(IO_BUF, "V reset = %11.4k mv\n\n", neuron->V_reset); io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); @@ -380,9 +381,9 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); - io_printf(IO_BUF, "window size = %u ts\n\n", neuron->window_size); + io_printf(IO_BUF, "window size = %u ts\n", neuron->window_size); - io_printf(IO_BUF, "e_to_dt_on_tau_a = %k n/a\n\n", neuron->e_to_dt_on_tau_a); + io_printf(IO_BUF, "beta = %k n/a\n", neuron->beta); - io_printf(IO_BUF, "adpt = %k n/a\n\n", neuron->adpt); + io_printf(IO_BUF, "adpt = %k n/a\n", neuron->adpt); } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 7d6ee517aa1..76eec9ce4e2 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -30,7 +30,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train REAL el_a; // adaptive component of eligibility vector REAL e_bar; // low-pass filtered eligibility trace - uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). + int32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 491cadcfc61..7b1cf84cd60 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -47,6 +47,8 @@ state_t neuron_model_state_update( log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); +// io_printf(IO_BUF, "Exc 1: %12.6k, Exc 2: %12.6k - ", exc_input[0], exc_input[1]); +// io_printf(IO_BUF, "Inh 1: %12.6k, Inh 2: %12.6k - %u\n", inh_input[0], inh_input[1], time); use(dummy); // If outside of the refractory period @@ -151,10 +153,10 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar_inp = 0; // decrease timestep counter preventing rapid updates - if (neuron->syn_state[syn_ind].update_ready > 0){ +// if (neuron->syn_state[syn_ind].update_ready > 0){ // io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); - neuron->syn_state[syn_ind].update_ready -= 1; - } + neuron->syn_state[syn_ind].update_ready -= 1; +// } // else{ // io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); // } @@ -196,7 +198,7 @@ void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); - io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->window_size); + io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); // io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); // io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index e963799f083..49273c2e783 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -13,7 +13,7 @@ typedef struct eprop_syn_state_t { REAL z_bar; // low-pass filtered spike train // REAL el_a; // adaptive component of eligibility vector // REAL e_bar; // low-pass filtered eligibility trace - uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). + int32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; ///////////////////////////////////////////////////////////// diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index a68f9148304..cda1990ad46 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -439,10 +439,23 @@ bool synapse_dynamics_process_plastic_synapses( // Check for ring buffer saturation int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); +// io_printf(IO_BUF, "d acc:%d, rb:%d, syn:%d\n", accumulation, ring_buffers[ring_buffer_index], synapse_structure_get_final_weight(final_state)); +// io_printf(IO_BUF, "u acc:%u, rb:%u, syn:%u\n", accumulation, ring_buffers[ring_buffer_index], synapse_structure_get_final_weight(final_state)); +// io_printf(IO_BUF, "k acc:%k, rb:%k, syn:%k\n", accumulation, ring_buffers[ring_buffer_index], synapse_structure_get_final_weight(final_state)); + // overflow check + if (accumulation < ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) + && ring_buffers[ring_buffer_index] > 0 && synapse_structure_get_final_weight(final_state) > 0){ + accumulation = ring_buffers[ring_buffer_index]; + } + // underflow check + if (accumulation > ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) + && ring_buffers[ring_buffer_index] < 0 && synapse_structure_get_final_weight(final_state) < 0){ + accumulation = ring_buffers[ring_buffer_index]; + } -// uint32_t sat_test = accumulation & 0x10000; +// uint32_t sat_test = accumulation & 0x20000; // if (sat_test) { -// accumulation = sat_test - 1; +// accumulation = 0x10000 - 1; // plastic_saturation_count++; // } diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 034af05c2de..d5ff8c342b5 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -170,7 +170,7 @@ def __init__( DataType.S1615, # z_bar DataType.S1615, # ep_a DataType.S1615, # e_bar - DataType.UINT32 # update_ready + DataType.INT32 # update_ready ] # Extend to include fan-in for each neuron datatype_list.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -256,7 +256,7 @@ def add_state_variables(self, state_variables): state_variables[Z_BAR+str(n)] = 0 state_variables[EP_A+str(n)] = 0 state_variables[E_BAR+str(n)] = 0 - state_variables[UPDATE_READY+str(n)] = 13000 + state_variables[UPDATE_READY+str(n)] = self.__window_size @overrides(AbstractNeuronModel.get_units) def get_units(self, variable): diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 66c3d694c17..aff71e13ca6 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -129,7 +129,7 @@ def __init__( DataType.S1615, # z_bar # DataType.S1615, # ep_a # DataType.S1615, # e_bar - DataType.UINT32 # update_ready + DataType.INT32 # update_ready ] # Extend to include fan-in for each neuron data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) @@ -221,7 +221,7 @@ def add_state_variables(self, state_variables): state_variables[Z_BAR+str(n)] = 0 # state_variables[EP_A+str(n)] = 0 # state_variables[E_BAR+str(n)] = 0 - state_variables[UPDATE_READY+str(n)] = 13000 + state_variables[UPDATE_READY+str(n)] = self._window_size @overrides(AbstractNeuronModel.get_units) @@ -258,7 +258,7 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): 0,#, # z_bar # 0, # el_a # 0] # e_bar - 13000, #int(numpy.random.rand()*1024) # update_ready + self._window_size, #int(numpy.random.rand()*1024) # update_ready ] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) From f690e7a0cc51160bce34c5042035a6f4a142e74b Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 26 Jun 2020 19:00:18 +0100 Subject: [PATCH 062/123] update to learning signal calculation --- neural_modelling/src/core_pop_rate | 113 ++++++++++++++++++ .../models/neuron_model_eprop_adaptive_impl.c | 9 +- 2 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 neural_modelling/src/core_pop_rate diff --git a/neural_modelling/src/core_pop_rate b/neural_modelling/src/core_pop_rate new file mode 100644 index 00000000000..e50435b2880 --- /dev/null +++ b/neural_modelling/src/core_pop_rate @@ -0,0 +1,113 @@ +neuron/implementations/neuron_impl_left_right_readout.h:152: validate_mars_kiss64_seed(global_parameters->kiss_seed); +neuron/implementations/neuron_impl_left_right_readout.h:208: io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); +neuron/implementations/neuron_impl_left_right_readout.h:209: io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); +neuron/implementations/neuron_impl_left_right_readout.h:210: io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); +neuron/implementations/neuron_impl_left_right_readout.h:211: io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); +neuron/implementations/neuron_impl_left_right_readout.h:212: io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); +neuron/implementations/neuron_impl_left_right_readout.h:213:// io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); +neuron/implementations/neuron_impl_left_right_readout.h:214: io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); +neuron/implementations/neuron_impl_left_right_readout.h:215: io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); +neuron/implementations/neuron_impl_left_right_readout.h:216: io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); +neuron/implementations/neuron_impl_left_right_readout.h:217: io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); +neuron/implementations/neuron_impl_left_right_readout.h:218: io_printf(IO_BUF, "poisson key: %u \n\n", global_parameters->p_key); +neuron/implementations/neuron_impl_left_right_readout.h:219: io_printf(IO_BUF, "poisson pop size: %u \n\n", global_parameters->p_pop_size); +neuron/implementations/neuron_impl_left_right_readout.h:255:// global_parameters->target_V[target_ind]); +neuron/implementations/neuron_impl_left_right_readout.h:313: global_parameters->readout_V_0 = result; +neuron/implementations/neuron_impl_left_right_readout.h:324: global_parameters->readout_V_1 = result; +neuron/implementations/neuron_impl_left_right_readout.h:327:// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_0; +neuron/implementations/neuron_impl_left_right_readout.h:330:// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_1; +neuron/implementations/neuron_impl_left_right_readout.h:340: global_parameters->cross_entropy = 0.k; +neuron/implementations/neuron_impl_left_right_readout.h:341: global_parameters->mean_0 = 0.k; +neuron/implementations/neuron_impl_left_right_readout.h:342: global_parameters->mean_1 = 0.k; +neuron/implementations/neuron_impl_left_right_readout.h:346: key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { +neuron/implementations/neuron_impl_left_right_readout.h:363: REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 +neuron/implementations/neuron_impl_left_right_readout.h:372: payload = global_parameters->rate_on; +neuron/implementations/neuron_impl_left_right_readout.h:374: for (int j = current_cue_direction*global_parameters->p_pop_size; +neuron/implementations/neuron_impl_left_right_readout.h:375: j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ +neuron/implementations/neuron_impl_left_right_readout.h:376: spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); +neuron/implementations/neuron_impl_left_right_readout.h:383: payload = global_parameters->rate_off; +neuron/implementations/neuron_impl_left_right_readout.h:385: for (int j = current_cue_direction*global_parameters->p_pop_size; +neuron/implementations/neuron_impl_left_right_readout.h:386: j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ +neuron/implementations/neuron_impl_left_right_readout.h:387: spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); +neuron/implementations/neuron_impl_left_right_readout.h:414: payload = global_parameters->rate_on; +neuron/implementations/neuron_impl_left_right_readout.h:416: for (int j = 2*global_parameters->p_pop_size; +neuron/implementations/neuron_impl_left_right_readout.h:417: j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ +neuron/implementations/neuron_impl_left_right_readout.h:418: spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); +neuron/implementations/neuron_impl_left_right_readout.h:426:// io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); +neuron/implementations/neuron_impl_left_right_readout.h:430:// io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); +neuron/implementations/neuron_impl_left_right_readout.h:431:// global_parameters->mean_0 += global_parameters->readout_V_0; +neuron/implementations/neuron_impl_left_right_readout.h:432:// global_parameters->mean_1 += global_parameters->readout_V_1; +neuron/implementations/neuron_impl_left_right_readout.h:435:// accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); +neuron/implementations/neuron_impl_left_right_readout.h:436:// accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); +neuron/implementations/neuron_impl_left_right_readout.h:437: accum exp_0 = expk(global_parameters->readout_V_0 * 0.1k); +neuron/implementations/neuron_impl_left_right_readout.h:438: accum exp_1 = expk(global_parameters->readout_V_1 * 0.1k); +neuron/implementations/neuron_impl_left_right_readout.h:441: if (global_parameters->readout_V_0 > global_parameters->readout_V_1){ +neuron/implementations/neuron_impl_left_right_readout.h:455:// io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); +neuron/implementations/neuron_impl_left_right_readout.h:458: global_parameters->cross_entropy = -logk(softmax_1); +neuron/implementations/neuron_impl_left_right_readout.h:462: global_parameters->cross_entropy = -logk(softmax_0); +neuron/implementations/neuron_impl_left_right_readout.h:472:// learning_signal = global_parameters->cross_entropy; +neuron/implementations/neuron_impl_left_right_readout.h:482: payload = global_parameters->rate_off; +neuron/implementations/neuron_impl_left_right_readout.h:483: for (int j = 2*global_parameters->p_pop_size; +neuron/implementations/neuron_impl_left_right_readout.h:484: j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ +neuron/implementations/neuron_impl_left_right_readout.h:485: spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); +neuron/implementations/neuron_impl_left_right_readout.h:491:// learning_signal = global_parameters->cross_entropy; +neuron/implementations/neuron_impl_left_right_readout.h:493: recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->cross_entropy; +neuron/implementations/neuron_impl_store_recall_readout.h:139: validate_mars_kiss64_seed(global_parameters->kiss_seed); +neuron/implementations/neuron_impl_store_recall_readout.h:195: io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); +neuron/implementations/neuron_impl_store_recall_readout.h:196: io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); +neuron/implementations/neuron_impl_store_recall_readout.h:197: io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); +neuron/implementations/neuron_impl_store_recall_readout.h:198: io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); +neuron/implementations/neuron_impl_store_recall_readout.h:199: io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); +neuron/implementations/neuron_impl_store_recall_readout.h:200: io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); +neuron/implementations/neuron_impl_store_recall_readout.h:201: io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); +neuron/implementations/neuron_impl_store_recall_readout.h:202: io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); +neuron/implementations/neuron_impl_store_recall_readout.h:203: io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); +neuron/implementations/neuron_impl_store_recall_readout.h:204: io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); +neuron/implementations/neuron_impl_store_recall_readout.h:205: io_printf(IO_BUF, "poisson key: %k \n\n", global_parameters->p_key); +neuron/implementations/neuron_impl_store_recall_readout.h:206: io_printf(IO_BUF, "poisson pop size: %k \n\n", global_parameters->p_pop_size); +neuron/implementations/neuron_impl_store_recall_readout.h:241: REAL random_number = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); +neuron/implementations/neuron_impl_store_recall_readout.h:242: if (random_number < global_parameters->prob_command){ +neuron/implementations/neuron_impl_store_recall_readout.h:246: REAL switch_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); +neuron/implementations/neuron_impl_store_recall_readout.h:259: payload = global_parameters->rate_on; +neuron/implementations/neuron_impl_store_recall_readout.h:262: payload = global_parameters->rate_off; +neuron/implementations/neuron_impl_store_recall_readout.h:264: for (int j = i*global_parameters->p_pop_size; +neuron/implementations/neuron_impl_store_recall_readout.h:265: j < i*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ +neuron/implementations/neuron_impl_store_recall_readout.h:266: spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); +neuron/implementations/neuron_impl_store_recall_readout.h:323: global_parameters->mean_0 == 0; +neuron/implementations/neuron_impl_store_recall_readout.h:324: global_parameters->mean_1 == 0; +neuron/implementations/neuron_impl_store_recall_readout.h:336: global_parameters->readout_V_0 = result; +neuron/implementations/neuron_impl_store_recall_readout.h:347: global_parameters->readout_V_1 = result; +neuron/implementations/neuron_impl_store_recall_readout.h:359: global_parameters->mean_0 += global_parameters->readout_V_0; +neuron/implementations/neuron_impl_store_recall_readout.h:360: global_parameters->mean_1 += global_parameters->readout_V_1; +neuron/implementations/neuron_impl_store_recall_readout.h:361: accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); +neuron/implementations/neuron_impl_store_recall_readout.h:362: accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); +neuron/implementations/neuron_impl_store_recall_readout.h:367: global_parameters->cross_entropy = -logk(softmax_1); +neuron/implementations/neuron_impl_store_recall_readout.h:370: global_parameters->cross_entropy = -logk(softmax_0); +neuron/implementations/neuron_impl_store_recall_readout.h:382:// if (global_parameters->cross_entropy < -0.7){ +neuron/implementations/neuron_impl_eprop_adaptive.h:211: global_parameters->core_target_rate = global_parameters->core_target_rate +neuron/implementations/neuron_impl_eprop_adaptive.h:213: global_parameters->core_pop_rate = global_parameters->core_pop_rate +neuron/implementations/neuron_impl_eprop_adaptive.h:240: global_parameters->core_pop_rate = global_parameters->core_pop_rate +neuron/implementations/neuron_impl_eprop_adaptive.h:241: * global_parameters->rate_exp_TC; +neuron/implementations/neuron_impl_eprop_adaptive.h:290:// global_parameters->core_pop_rate; +neuron/implementations/neuron_impl_eprop_adaptive.h:307:// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate +neuron/implementations/neuron_impl_eprop_adaptive.h:316:// // global_parameters->core_target_rate; +neuron/implementations/neuron_impl_eprop_adaptive.h:362:// global_parameters->core_pop_rate; +neuron/implementations/neuron_impl_eprop_adaptive.h:369://// global_parameters->core_target_rate; +neuron/implementations/neuron_impl_eprop_adaptive.h:388: global_parameters->core_pop_rate += 1.0k; +neuron/implementations/neuron_impl_eprop_adaptive.h:464: global_parameters->core_target_rate, global_parameters->core_pop_rate); +neuron/implementations/neuron_impl_sinusoid_readout.h:182:// io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); +neuron/implementations/neuron_impl_sinusoid_readout.h:183:// io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); +neuron/implementations/neuron_impl_sinusoid_readout.h:184:// io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); +neuron/implementations/neuron_impl_sinusoid_readout.h:185:// io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); +neuron/implementations/neuron_impl_sinusoid_readout.h:186: io_printf(IO_BUF, "eta: %k \n\n", global_parameters->eta); +neuron/implementations/neuron_impl_sinusoid_readout.h:222:// global_parameters->target_V[target_ind]); +neuron/implementations/neuron_impl_sinusoid_readout.h:282: REAL error = result - global_parameters->target_V[target_ind]; +neuron/implementations/neuron_impl_sinusoid_readout.h:306: global_parameters->target_V[target_ind]; +neuron/implementations/neuron_impl_sinusoid_readout.h:308: - global_parameters->target_V[target_ind]; +neuron/implementations/neuron_impl_sinusoid_readout.h:312:// global_parameters->target_V[target_ind]; +neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c:320: REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; +neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c:318: REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike +neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c:319:// REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike +neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c:320:// io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); +neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c:320: REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; +neuron/models/neuron_model_eprop_adaptive_impl.c:109: REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 84657d38b86..73aa7982c88 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -157,22 +157,23 @@ state_t neuron_model_state_update( printed_value = false; } // neuron->L = learning_signal * neuron->w_fb; - learning_signal *= neuron->w_fb; +// learning_signal *= neuron->w_fb; // if (learning_signal != 0.k && new_learning_signal != learning_signal){ // if (new_learning_signal != learning_signal){// && time%1300 > 1100){ // io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); // if (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){ - new_learning_signal = learning_signal;// + (reg_learning_signal);// * 0.1k); + new_learning_signal = (learning_signal * neuron->w_fb) + v_mem_error; // } // new_learning_signal = learning_signal; // } // neuron->L = learning_signal; if (time % neuron->window_size > 1300 * 2){ - neuron->L = new_learning_signal; + neuron->L = new_learning_signal + (reg_learning_signal);// * 0.1k); } else{ - neuron->L = learning_signal; + neuron->L = new_learning_signal; } +// neuron->L = learning_signal * neuron->w_fb; // turns of all reg // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); From c4c0892ab023812f28c4eec7aa99ed2adf57c660 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Thu, 16 Jul 2020 15:01:09 +0100 Subject: [PATCH 063/123] exp un-scaled, reset works if spike received at t=0, io_prints removed to save buffer overflow on repeats, bad version of over/underflow check on ring buffer, --- .../neuron_impl_eprop_adaptive.h | 44 ++++++++++--- .../neuron_impl_left_right_readout.h | 21 ++++--- .../models/neuron_model_eprop_adaptive_impl.c | 63 +++++++++++-------- .../neuron_model_left_right_readout_impl.c | 32 +++++----- .../synapse_dynamics_eprop_adaptive_impl.c | 31 +++++++-- ...synapse_dynamics_left_right_readout_impl.c | 10 +-- 6 files changed, 134 insertions(+), 67 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 8b8ff08653e..611aa624623 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -319,18 +319,48 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // // exc_input_values[0]; // record input input (signed) // // learning_signal * neuron->w_fb; // } - if(neuron_index > 3){ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].el_a; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[15+neuron_index].delta_w; +// if(neuron_index % 2 == 0){ +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; +//// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; +//// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; +// } +//// else if (neuron_index == 0){ +//// } +// else{ +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; +//// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; +//// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; +// } + if(neuron_index == 0){ +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } // else if (neuron_index == 0){ // } + else if(neuron_index == 1){ +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; + } + else if(neuron_index == 2){ +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; + } else{ - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; } -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->B; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 244172f4604..5bc2073f056 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -445,17 +445,17 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // io_printf(IO_BUF, " umm "); // accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); // accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); - accum exp_0 = expk(global_parameters->readout_V_0 * 0.1k); - accum exp_1 = expk(global_parameters->readout_V_1 * 0.1k); + accum exp_0 = expk(global_parameters->readout_V_0);// * 0.1k); + accum exp_1 = expk(global_parameters->readout_V_1);// * 0.1k); // io_printf(IO_BUF, "or here - "); if (exp_0 == 0k && exp_1 == 0k){ if (global_parameters->readout_V_0 > global_parameters->readout_V_1){ - softmax_0 = 10k; + softmax_0 = 1k; softmax_1 = 0k; } else{ softmax_0 = 0k; - softmax_1 = 10k; + softmax_1 = 1k; } } else{ @@ -475,11 +475,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, learning_signal = softmax_0 - 1.k; is_it_right = 0; } -// if (softmax_0 > 0.5){ -// choice = 0; +// if (learning_signal > 0.5){ +// learning_signal = 1k; +// } +// else if (learning_signal < -0.5){ +// learning_signal = -1k; // } // else{ -// choice = 1; +// learning_signal = 0k; // } while (!spin1_send_mc_packet( key | neuron_index, bitsk(learning_signal), 1 )) { @@ -526,13 +529,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = is_it_right; } else if (neuron_index == 1){ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[55].z_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].delta_w; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } else{ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[1].z_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 73aa7982c88..0d445221a55 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -96,12 +96,16 @@ state_t neuron_model_state_update( REAL psi_temp2 = ((absk(psi_temp1))); neuron->psi = ((1.0k - psi_temp2) > 0.0k)? (1.0k/neuron->b_0) * -// 0.3k * + 0.3k * //todo why is this commented? (1.0k - psi_temp2) : 0.0k; +// if (neuron->refract_timer){ +// neuron->psi = 0.0k; +// } + neuron->psi *= neuron->A; // This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. This should make it possible to do a pause and resume cycle and have reliable unloading of data. - uint32_t total_input_synapses_per_neuron = 100; //todo should this be fixed - uint32_t total_recurrent_synapses_per_neuron = 100; //todo should this be fixed + uint32_t total_input_synapses_per_neuron = 40; //todo should this be fixed? + uint32_t total_recurrent_synapses_per_neuron = 20; //todo should this be fixed? uint32_t recurrent_offset = 100; @@ -173,23 +177,24 @@ state_t neuron_model_state_update( else{ neuron->L = new_learning_signal; } -// neuron->L = learning_signal * neuron->w_fb; // turns of all reg + neuron->L = learning_signal * neuron->w_fb; // turns of all reg // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // } - if (time % 1300 == 0){ + if (time % 1300 == 0 || time % 1300 == 1){ // io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); - neuron->B = 10.k; + neuron->B = neuron->b_0; neuron->b = 0.k; neuron->V_membrane = neuron->V_rest; neuron->refract_timer = 0; + neuron->z = 0.k; // io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); } // io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ - if (time % 1300 == 0){ + if (time % 1300 == 0, time % 1300 == 1){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; @@ -212,6 +217,7 @@ state_t neuron_model_state_update( (neuron->psi * neuron->syn_state[syn_ind].z_bar) + (rho - neuron->psi * neuron->beta) * neuron->syn_state[syn_ind].el_a; +// (rho) * neuron->syn_state[syn_ind].el_a; // ****************************************************************** @@ -219,6 +225,7 @@ state_t neuron_model_state_update( // ****************************************************************** REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - neuron->beta * neuron->syn_state[syn_ind].el_a); +// 0); neuron->syn_state[syn_ind].e_bar = neuron->exp_TC * neuron->syn_state[syn_ind].e_bar @@ -287,6 +294,7 @@ state_t neuron_model_state_update( (neuron->psi * neuron->syn_state[syn_ind].z_bar) + (rho - neuron->psi * neuron->beta) * neuron->syn_state[syn_ind].el_a; +// (rho) * neuron->syn_state[syn_ind].el_a; // ****************************************************************** @@ -294,6 +302,7 @@ state_t neuron_model_state_update( // ****************************************************************** REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - neuron->beta * neuron->syn_state[syn_ind].el_a); +// 0); neuron->syn_state[syn_ind].e_bar = neuron->exp_TC * neuron->syn_state[syn_ind].e_bar @@ -344,7 +353,7 @@ state_t neuron_model_state_update( void neuron_model_has_spiked(neuron_pointer_t neuron) { // reset z to zero neuron->z = 0; - +// neuron->V_membrane = neuron->V_rest; // Set refractory timer neuron->refract_timer = neuron->T_refract - 1; neuron->A = 0; @@ -368,23 +377,23 @@ void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { } void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { - io_printf(IO_BUF, "V reset = %11.4k mv\n\n", neuron->V_reset); - io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); - - io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); - io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); - - io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); - - io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); - - io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); - - io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); - - io_printf(IO_BUF, "window size = %u ts\n", neuron->window_size); - - io_printf(IO_BUF, "beta = %k n/a\n", neuron->beta); - - io_printf(IO_BUF, "adpt = %k n/a\n", neuron->adpt); +// io_printf(IO_BUF, "V reset = %11.4k mv\n\n", neuron->V_reset); +// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); +// +// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); +// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); +// +// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); +// +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// +// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); +// +// io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); +// +// io_printf(IO_BUF, "window size = %u ts\n", neuron->window_size); +// +// io_printf(IO_BUF, "beta = %k n/a\n", neuron->beta); +// +// io_printf(IO_BUF, "adpt = %k n/a\n", neuron->adpt); } diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 7b1cf84cd60..251e48d7749 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -76,7 +76,7 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? + uint32_t total_synapses_per_neuron = 20; //todo should this be fixed? // if(learning_signal){ // io_printf(IO_BUF, "learning signal = %k\n", learning_signal); @@ -184,21 +184,21 @@ void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { } void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { - io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); - io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); - - io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); - io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); - - io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); - - io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); - - io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); - - io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); - - io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); +// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); +// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); +// +// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); +// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); +// +// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); +// +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// +// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); +// +// io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); +// +// io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); // io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); // io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index d62d0da81e5..08f2ea49860 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -377,14 +377,26 @@ bool synapse_dynamics_process_plastic_synapses( syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } - neuron_pointer_t neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! - - // Create update state from the plastic synaptic word update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024k; // !!!! Check what units this is in - same as weight? !!!! + +// io_printf(IO_BUF, "initial_weight: d%d, k%k, u%u - ", current_state.initial_weight, current_state.initial_weight, current_state.initial_weight); +// if (current_state.initial_weight > 0){ +// io_printf(IO_BUF, "+ve\n"); +// } +// else if(current_state.initial_weight < 0){ +// io_printf(IO_BUF, "-ve\n"); +// neuron->syn_state[syn_ind_from_delay].z_bar_inp *= -1k; +// } +// else{ +// io_printf(IO_BUF, "0\n"); +// } + + if (PRINT_PLASTICITY){ // io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", // neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); @@ -444,6 +456,17 @@ bool synapse_dynamics_process_plastic_synapses( // plastic_saturation_count++; // } + // overflow check + if (accumulation < ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) + && ring_buffers[ring_buffer_index] > 0 && synapse_structure_get_final_weight(final_state) > 0){ + accumulation = ring_buffers[ring_buffer_index]; + } + // underflow check + if (accumulation > ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) + && ring_buffers[ring_buffer_index] < 0 && synapse_structure_get_final_weight(final_state) < 0){ + accumulation = ring_buffers[ring_buffer_index]; + } + ring_buffers[ring_buffer_index] = accumulation; // Write back updated synaptic word to plastic region diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index cda1990ad46..e66278504ce 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -379,14 +379,14 @@ bool synapse_dynamics_process_plastic_synapses( syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } - neuron_pointer_t neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! - - // Create update state from the plastic synaptic word update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); + neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! + + if (PRINT_PLASTICITY){ // io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", // neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); @@ -446,11 +446,13 @@ bool synapse_dynamics_process_plastic_synapses( if (accumulation < ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) && ring_buffers[ring_buffer_index] > 0 && synapse_structure_get_final_weight(final_state) > 0){ accumulation = ring_buffers[ring_buffer_index]; + plastic_saturation_count++; } // underflow check if (accumulation > ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) && ring_buffers[ring_buffer_index] < 0 && synapse_structure_get_final_weight(final_state) < 0){ accumulation = ring_buffers[ring_buffer_index]; + plastic_saturation_count++; } // uint32_t sat_test = accumulation & 0x20000; From ec335d08b74865317a74fcf452b7ba3bb89086bb Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 22 Jul 2020 16:47:06 +0100 Subject: [PATCH 064/123] remove some print statements --- .../neuron_impl_left_right_readout.h | 26 +++++++++---------- .../neuron_model_left_right_readout_impl.c | 24 ++++++++--------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 5bc2073f056..80112856152 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -208,19 +208,19 @@ static void neuron_impl_load_neuron_parameters( neuron_model_set_global_neuron_params(global_parameters); - io_printf(IO_BUF, "\nPrinting global params\n"); - io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); - io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); - io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); - io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); - io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); -// io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); - io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); - io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); - io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); - io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); - io_printf(IO_BUF, "poisson key: %u \n\n", global_parameters->p_key); - io_printf(IO_BUF, "poisson pop size: %u \n\n", global_parameters->p_pop_size); +// io_printf(IO_BUF, "\nPrinting global params\n"); +// io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); +// io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); +// io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); +// io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); +// io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); +//// io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); +// io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); +// io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); +// io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); +// io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); +// io_printf(IO_BUF, "poisson key: %u \n\n", global_parameters->p_key); +// io_printf(IO_BUF, "poisson pop size: %u \n\n", global_parameters->p_pop_size); for (index_t n = 0; n < n_neurons; n++) { diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 251e48d7749..46f2e5944e3 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -23,18 +23,18 @@ void neuron_model_set_global_neuron_params( local_eta = params->eta; - io_printf(IO_BUF, "local eta = %k\n", local_eta); - io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); - io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); - io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); - io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); - io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); - io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); - io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); - io_printf(IO_BUF, "p_key = %u\n", params->p_key); - io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); - io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); - io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +// io_printf(IO_BUF, "local eta = %k\n", local_eta); +// io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); +// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +// io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); +// io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); +// io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); +// io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); +// io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); +// io_printf(IO_BUF, "p_key = %u\n", params->p_key); +// io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); +// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); // io_printf(IO_BUF, "local eta = %k\n", params->); // Does Nothing - no params From da6d0f3d516b31afbbe136e8ac87a6b55b5ab9bd Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 25 Sep 2020 15:35:15 +0100 Subject: [PATCH 065/123] number of cues now an input variable for incremental learning --- .../implementations/neuron_impl_left_right_readout.h | 7 +++---- .../models/neuron_model_left_right_readout_impl.h | 1 + .../pyNN/models/neuron/builds/left_right_readout.py | 4 ++-- .../neuron_models/neuron_model_left_right_readout.py | 10 +++++++--- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 80112856152..cb4928c88c6 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -73,7 +73,6 @@ typedef enum current_state_t current_state = 0; uint32_t current_time = 0; uint32_t cue_number = 0; -uint32_t total_cues = 1; uint32_t current_cue_direction = 2; // 0 = left, 1 = right uint32_t accumulative_direction = 0; // if > total_cues / 2 = right uint32_t wait_between_cues = 50; // ms @@ -397,7 +396,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); } - if (cue_number >= total_cues){ + if (cue_number >= global_parameters->number_of_cues){ current_state = (current_state + 1) % 3; } } @@ -406,7 +405,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, else if (current_state == STATE_WAITING){ // io_printf(IO_BUF, "time entering wait %u\n", time); // waiting for prompt, all things ok - if (cue_number >= total_cues){ + if (cue_number >= global_parameters->number_of_cues){ current_time = time; cue_number = 0; } @@ -465,7 +464,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } // io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); // What to do if log(0)? - if (accumulative_direction > total_cues >> 1){ + if (accumulative_direction > global_parameters->number_of_cues >> 1){ global_parameters->cross_entropy = -logk(softmax_1); learning_signal = softmax_0; is_it_right = 1; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 49273c2e783..7c966732fd4 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -92,6 +92,7 @@ typedef struct global_neuron_params_t { uint32_t p_key; uint32_t p_pop_size; REAL eta; + uint32_t number_of_cues; } global_neuron_params_t; #endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index 51453a096e0..ad0bc0e55c1 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -25,7 +25,7 @@ def __init__( rate_on=40, rate_off=0, poisson_pop_size=10, # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0, window_size=13000): + l=0, w_fb=0.5, eta=1.0, window_size=13000, number_of_cues=1): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeftRightReadout( @@ -36,7 +36,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size) + rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, number_of_cues) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index aff71e13ca6..57040469623 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -80,7 +80,8 @@ class NeuronModelLeftRightReadout(AbstractNeuronModel): "_cross_entropy", "_poisson_key", "_poisson_pop_size", - "_n_keys_in_target" + "_n_keys_in_target", + "_number_of_cues" ] def __init__( @@ -88,7 +89,7 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size): + rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, number_of_cues): global_data_types = [ DataType.UINT32, # MARS KISS seed @@ -106,6 +107,7 @@ def __init__( DataType.UINT32, # poisson key DataType.UINT32, # poisson pop size DataType.S1615, # eta + DataType.UINT32, # number of cues ] data_types = [ DataType.S1615, # v @@ -167,6 +169,7 @@ def __init__( self._w_fb = w_fb self._eta = eta self._window_size = window_size + self._number_of_cues = number_of_cues self._n_keys_in_target = poisson_pop_size * 4 @@ -307,7 +310,8 @@ def get_global_values(self, machine_time_step): self._cross_entropy, self._poisson_key, self._poisson_pop_size, - self._eta + self._eta, + self._number_of_cues ] return vals From 1b7a51057d3fc5bd3786fe37f0086befb8ba0c66 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 25 Sep 2020 16:30:12 +0100 Subject: [PATCH 066/123] added #of cues to default init values --- spynnaker/pyNN/models/neuron/builds/left_right_readout.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index ad0bc0e55c1..af4f5d73c7d 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -13,7 +13,7 @@ class LeftRightReadout(AbstractPyNNNeuronModelStandard): @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", - "l", "w_fb", "eta"}) + "l", "w_fb", "eta", "number_of_cues"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, From 8f220ae852b4c897da72c1bfa375d57cb5f1d2eb Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 29 Sep 2020 10:23:17 +0100 Subject: [PATCH 067/123] vmem used in learning --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 0d445221a55..46896983ded 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -177,7 +177,8 @@ state_t neuron_model_state_update( else{ neuron->L = new_learning_signal; } - neuron->L = learning_signal * neuron->w_fb; // turns of all reg +// neuron->L = learning_signal * neuron->w_fb; // turns of all reg + neuron->L = new_learning_signal; // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); From 071baca86aab98a1333265777f8eb81781134fe6 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 29 Sep 2020 16:27:31 +0100 Subject: [PATCH 068/123] number of cues added to eprop for membrane resetting, changes to recording --- neural_modelling/makefiles/neuron/Makefile | 4 +-- .../neuron_impl_eprop_adaptive.h | 10 ++++---- .../neuron_impl_left_right_readout.h | 2 +- .../models/neuron_model_eprop_adaptive_impl.c | 12 ++++----- .../models/neuron_model_eprop_adaptive_impl.h | 1 + .../neuron_model_left_right_readout_impl.c | 6 ++--- .../models/neuron/builds/eprop_adaptive.py | 6 ++--- .../neuron_model_eprop_adaptive.py | 25 +++++++++++++++---- 8 files changed, 41 insertions(+), 25 deletions(-) diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index c771579e2e3..fd1517945af 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -38,8 +38,8 @@ MODELS = eprop_adaptive \ IF_curr_exp_stdp_mad_nearest_pair_multiplicative \ IF_curr_exp_stdp_mad_pfister_triplet_additive \ IF_cond_exp_stdp_mad_nearest_pair_additive \ - IF_curr_alpha \ - IF_curr_alpha_stdp_mad_pair_additive \ + IF_curr_alpha \ + IF_curr_alpha_stdp_mad_pair_additive \ IF_cond_exp_structural \ IF_curr_exp_stdp_mad_pair_additive_structural \ IF_curr_exp_structural \ diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 611aa624623..32b530f03da 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -336,7 +336,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, if(neuron_index == 0){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } // else if (neuron_index == 0){ @@ -344,23 +344,23 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, else if(neuron_index == 1){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } else if(neuron_index == 2){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } else{ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; } // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->B; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index cb4928c88c6..53193b0fc13 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -524,7 +524,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, if (neuron_index == 2){ //this neuron does nothing // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].delta_w; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[50].delta_w; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = is_it_right; } else if (neuron_index == 1){ diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 46896983ded..4f28c91d1f4 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -27,7 +27,6 @@ REAL local_eta; extern uint32_t time; extern global_neuron_params_pointer_t global_parameters; extern uint32_t syn_dynamics_neurons_in_partition; -//uint32_t window_size = 13000; // simple Leaky I&F ODE static inline void lif_neuron_closed_form( @@ -105,7 +104,7 @@ state_t neuron_model_state_update( // This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. This should make it possible to do a pause and resume cycle and have reliable unloading of data. uint32_t total_input_synapses_per_neuron = 40; //todo should this be fixed? - uint32_t total_recurrent_synapses_per_neuron = 20; //todo should this be fixed? + uint32_t total_recurrent_synapses_per_neuron = 0; //todo should this be fixed? uint32_t recurrent_offset = 100; @@ -171,7 +170,7 @@ state_t neuron_model_state_update( // new_learning_signal = learning_signal; // } // neuron->L = learning_signal; - if (time % neuron->window_size > 1300 * 2){ + if (time % neuron->window_size > test_length * 2){ //todo make this relative to number of cues neuron->L = new_learning_signal + (reg_learning_signal);// * 0.1k); } else{ @@ -180,10 +179,11 @@ state_t neuron_model_state_update( // neuron->L = learning_signal * neuron->w_fb; // turns of all reg neuron->L = new_learning_signal; + uint32_t test_length = (150*neuron->number_of_cues)+1000+150; // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // } - if (time % 1300 == 0 || time % 1300 == 1){ + if (time % test_length == 0 || time % test_length == 1){ // io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); neuron->B = neuron->b_0; neuron->b = 0.k; @@ -195,7 +195,7 @@ state_t neuron_model_state_update( // io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ - if (time % 1300 == 0, time % 1300 == 1){ + if (time % test_length == 0 || time % test_length == 1){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; @@ -274,7 +274,7 @@ state_t neuron_model_state_update( // All operations now need doing once per recurrent eprop synapse for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ - if (time % 1300 == 0){ + if (time % test_length == 0 || time % test_length == 1){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 76eec9ce4e2..0e313c90a35 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -84,6 +84,7 @@ typedef struct neuron_t { REAL L; // learning signal REAL w_fb; // feedback weight uint32_t window_size; + uint32_t number_of_cues; // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index 46f2e5944e3..ed965b16595 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -76,7 +76,7 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - uint32_t total_synapses_per_neuron = 20; //todo should this be fixed? + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? // if(learning_signal){ // io_printf(IO_BUF, "learning signal = %k\n", learning_signal); @@ -131,9 +131,9 @@ state_t neuron_model_state_update( REAL this_dt_weight_change = // -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - -local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; - neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ // io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " // "z_bar_inp = %k \t z_bar = %k \t time:%u\n" diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index da783920f19..e7e19e76836 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -30,7 +30,7 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", "psi", "target_rate", "tau_err", "B", "small_b", - "l", "w_fb", "eta", "window_size" + "l", "w_fb", "eta", "window_size", "number_of_cues" }) def __init__( self, @@ -49,7 +49,7 @@ def __init__( B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0, window_size=13000 + l=0, w_fb=0.5, eta=1.0, window_size=13000, number_of_cues=1 ): # pylint: disable=too-many-arguments, too-many-locals @@ -64,7 +64,7 @@ def __init__( # Regularisation params target_rate, tau_err, # Learning signal params - l, w_fb, eta, window_size + l, w_fb, eta, window_size, number_of_cues ) synapse_type = SynapseTypeEPropAdaptive( diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index d5ff8c342b5..40a0ca3567b 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -47,6 +47,7 @@ L = "learning_signal" W_FB = "feedback_weight" WINDOW_SIZE = "window_size" +NUMBER_OF_CUES = "number_of_cues" DELTA_W = "delta_w" Z_BAR_OLD = "z_bar_old" @@ -106,7 +107,8 @@ class NeuronModelEPropAdaptive(AbstractNeuronModel): "__l", "__w_fb", "__eta", - "__window_size" + "__window_size", + "__number_of_cues" ] def __init__( @@ -133,7 +135,8 @@ def __init__( l, w_fb, eta, - window_size + window_size, + number_of_cues ): datatype_list = [ @@ -160,7 +163,8 @@ def __init__( # Learning signal DataType.S1615, # L DataType.S1615, # w_fb - DataType.UINT32 # window_size + DataType.UINT32, # window_size + DataType.UINT32 # number_of_cues ] # Synapse states - always initialise to zero @@ -213,6 +217,7 @@ def __init__( self.__w_fb = w_fb self.__eta = eta self.__window_size = window_size + self.__number_of_cues = number_of_cues @overrides(AbstractNeuronModel.get_n_cpu_cycles) @@ -235,6 +240,7 @@ def add_parameters(self, parameters): parameters[SCALAR] = self.__scalar parameters[W_FB] = self.__w_fb parameters[WINDOW_SIZE] = self.__window_size + parameters[NUMBER_OF_CUES] = self.__number_of_cues @overrides(AbstractNeuronModel.add_state_variables) @@ -301,7 +307,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): state_variables[L], parameters[W_FB], - parameters[WINDOW_SIZE] + parameters[WINDOW_SIZE], + parameters[NUMBER_OF_CUES] ] # create synaptic state - init all state to zero @@ -349,7 +356,7 @@ def update_values(self, values, parameters, state_variables): (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, _v_reset, _tau_refrac, psi, big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - l, __w_fb, window_size, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values + l, __w_fb, window_size, number_of_cues, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! # (Note that this function is only called if you do e.g. run(), set(), @@ -487,3 +494,11 @@ def window_size(self): @window_size.setter def window_size(self, new_value): self.__window_size = new_value + + @property + def number_of_cues(self): + return self.__number_of_cues + + @window_size.setter + def window_size(self, new_value): + self.__number_of_cues = new_value From 8ddf983bff0bf3d12ce14d02a35c4974512951d4 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 29 Sep 2020 16:29:29 +0100 Subject: [PATCH 069/123] moved test_length --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 4f28c91d1f4..b8f0b7b3f69 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -170,6 +170,9 @@ state_t neuron_model_state_update( // new_learning_signal = learning_signal; // } // neuron->L = learning_signal; + + uint32_t test_length = (150*neuron->number_of_cues)+1000+150; + if (time % neuron->window_size > test_length * 2){ //todo make this relative to number of cues neuron->L = new_learning_signal + (reg_learning_signal);// * 0.1k); } @@ -178,8 +181,6 @@ state_t neuron_model_state_update( } // neuron->L = learning_signal * neuron->w_fb; // turns of all reg neuron->L = new_learning_signal; - - uint32_t test_length = (150*neuron->number_of_cues)+1000+150; // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // } From 01abcad31de52c3b1e91400b656498e21aead3e4 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Fri, 2 Oct 2020 18:02:10 +0100 Subject: [PATCH 070/123] random cue turned back on --- .../neuron_impl_left_right_readout.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 53193b0fc13..af90d31274a 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -368,14 +368,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // pick broadcast if just entered if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ // pick new value and broadcast -// REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 -// if (random_value < 0.5k){ -// current_cue_direction = 0; -// } -// else{ -// current_cue_direction = 1; -// } - current_cue_direction = (current_cue_direction + 1) % 2; + REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 + if (random_value < 0.5k){ + current_cue_direction = 0; + } + else{ + current_cue_direction = 1; + } +// current_cue_direction = (current_cue_direction + 1) % 2; accumulative_direction += current_cue_direction; REAL payload; payload = global_parameters->rate_on; From 01d0688f5260c28139cda1fca8f5967ef7f6890a Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 13:54:33 +0100 Subject: [PATCH 071/123] Update_ready fully added to sinusoid --- .../implementations/neuron_impl_eprop_adaptive.h | 8 ++++---- .../stdp/synapse_dynamics_sinusoid_readout_impl.c | 2 +- .../pyNN/models/neuron/builds/sinusoid_readout.py | 4 ++-- .../neuron_models/neuron_model_sinusoid_readout.py | 10 +++++++--- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 32b530f03da..bd1bd85e987 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -344,23 +344,23 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, else if(neuron_index == 1){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } else if(neuron_index == 2){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].e_bar; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } else{ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; } // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 7dce10edf4d..cea041ae669 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -400,7 +400,7 @@ bool synapse_dynamics_process_plastic_synapses( // Perform weight update: only if batch time has elapsed final_state_t final_state; - if (neuron->syn_state[syn_ind_from_delay].update_ready == 0){ + if (neuron->syn_state[syn_ind_from_delay].update_ready <= 0){ // enough time has elapsed - perform weight update if (PRINT_PLASTICITY){ diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index 77a39e29a75..d8ac424e615 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -26,13 +26,13 @@ def __init__( target_data =[], # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0): + l=0, w_fb=0.5, eta=1.0, update_ready=1024): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeakyIntegrateAndFireSinusoidReadout( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, target_data, # Learning signal params - l, w_fb, eta) + l, w_fb, eta, update_ready) synapse_type = SynapseTypeEPropAdaptive( tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 945216c5f0a..a36d29f46a2 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -59,7 +59,8 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): # learning signal "_l", "_w_fb", - "_eta" + "_eta", + "_update_ready" ] def __init__( @@ -68,7 +69,8 @@ def __init__( target_data, l, w_fb, - eta): + eta, + update_ready): data_types = [ DataType.S1615, # v @@ -125,6 +127,8 @@ def __init__( self._eta = eta + self._update_ready = update_ready + @overrides(AbstractNeuronModel.get_n_cpu_cycles) def get_n_cpu_cycles(self, n_neurons): # A bit of a guess @@ -186,7 +190,7 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): 0,#, # z_bar # 0, # el_a # 0] # e_bar - 0, #int(numpy.random.rand()*1024) # update_ready + self._update_ready, #int(numpy.random.rand()*1024) # update_ready ] # extend to appropriate fan-in values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) From c77ce84bd0200ccc8efcaa19101d4c28a4c3da59 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 14:33:55 +0100 Subject: [PATCH 072/123] change recording --- .../implementations/neuron_impl_sinusoid_readout.h | 9 +++++---- spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index e1bb889363b..da62c40f3b7 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -282,8 +282,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, REAL error = result - global_parameters->target_V[target_ind]; learning_signal = error; // Record Error - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - error; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// error; // neuron->syn_state[3].delta_w; // neuron->syn_state[0].z_bar; @@ -304,9 +304,10 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, recorded_variable_values[V_RECORDING_INDEX] = // neuron->syn_state[0].z_bar; global_parameters->target_V[target_ind]; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - - global_parameters->target_V[target_ind]; +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// - global_parameters->target_V[target_ind]; } + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[syn_ind].z_bar // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index d8ac424e615..41850f7af74 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -14,7 +14,7 @@ class SinusoidReadout(AbstractPyNNNeuronModelStandard): @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", "isyn_inh2", - "l", "w_fb", "eta"}) + "l", "w_fb", "eta", "update_ready"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, From f1c035b9ebf1131e3a7fed40e16920f7f745d97c Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 14:35:54 +0100 Subject: [PATCH 073/123] change recording --- .../src/neuron/implementations/neuron_impl_sinusoid_readout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index da62c40f3b7..6992036b000 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -307,7 +307,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = // - global_parameters->target_V[target_ind]; } - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[syn_ind].z_bar + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[syn_ind].z_bar; // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; From b1301acc577c066c0908558c6402c823e82f721f Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 14:36:55 +0100 Subject: [PATCH 074/123] change recording --- .../src/neuron/implementations/neuron_impl_sinusoid_readout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 6992036b000..11bd03d7fa2 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -307,7 +307,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = // - global_parameters->target_V[target_ind]; } - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[syn_ind].z_bar; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].z_bar; // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; From e7317317c550a2a1a42b5b598ca9698026c63981 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 15:10:23 +0100 Subject: [PATCH 075/123] removed decay pon zbar --- .../src/neuron/models/neuron_model_sinusoid_readout_impl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index bce4f8d6a01..ba15228d765 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -71,8 +71,8 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * -// + +// + (1 - neuron->exp_TC) * + + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update From 3870823548fa566be3c1fb88acca71be213fc420 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 15:21:01 +0100 Subject: [PATCH 076/123] updated weight update issue with - += --- .../src/neuron/models/neuron_model_sinusoid_readout_impl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index ba15228d765..8b5103ee4ae 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -60,7 +60,7 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - uint32_t total_synapses_per_neuron = 200; //todo should this be fixed? + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? neuron->L = learning_signal * neuron->w_fb; @@ -100,9 +100,9 @@ state_t neuron_model_state_update( // ****************************************************************** REAL this_dt_weight_change = // -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - -local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; - neuron->syn_state[syn_ind].delta_w += this_dt_weight_change; + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ // io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " // "z_bar_inp = %k \t z_bar = %k \t time:%u\n" From 89a92603c10697b596af330b8e7bb46562f98ca4 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 15:27:39 +0100 Subject: [PATCH 077/123] changed recording --- .../src/neuron/implementations/neuron_impl_sinusoid_readout.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 11bd03d7fa2..7ced4b616bc 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -307,11 +307,11 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = // - global_parameters->target_V[target_ind]; } - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].z_bar; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index*20].z_bar; // Record target recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = // global_parameters->target_V[target_ind]; - neuron->syn_state[neuron_index].delta_w; + neuron->syn_state[neuron_index*20].delta_w; // exc_input_values[0]; From 6c5481e0a55f7f89bf69633e75dd06be13127865 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 16:29:48 +0100 Subject: [PATCH 078/123] removed reset if not left right task --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index b8f0b7b3f69..54a7d22a23e 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -172,6 +172,9 @@ state_t neuron_model_state_update( // neuron->L = learning_signal; uint32_t test_length = (150*neuron->number_of_cues)+1000+150; + if(neuron->number_of_cues == 0){ + test_length = neuron->window_size; + } if (time % neuron->window_size > test_length * 2){ //todo make this relative to number of cues neuron->L = new_learning_signal + (reg_learning_signal);// * 0.1k); @@ -184,7 +187,7 @@ state_t neuron_model_state_update( // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // } - if (time % test_length == 0 || time % test_length == 1){ + if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ // io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); neuron->B = neuron->b_0; neuron->b = 0.k; @@ -196,7 +199,7 @@ state_t neuron_model_state_update( // io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ - if (time % test_length == 0 || time % test_length == 1){ + if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; @@ -275,7 +278,7 @@ state_t neuron_model_state_update( // All operations now need doing once per recurrent eprop synapse for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ - if (time % test_length == 0 || time % test_length == 1){ + if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; From b7b65c75160e1d1db65195b46bd41b92bc731c9a Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 16:30:18 +0100 Subject: [PATCH 079/123] default number of cues = 0 --- spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index e7e19e76836..438038fac7c 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -49,7 +49,7 @@ def __init__( B=10, small_b=0, small_b_0=10, tau_a=500, beta=1.8, # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0, window_size=13000, number_of_cues=1 + l=0, w_fb=0.5, eta=1.0, window_size=13000, number_of_cues=0 ): # pylint: disable=too-many-arguments, too-many-locals From 4d56da3eba38cfceb946b0d88eee4d6f215f69ad Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 16:57:10 +0100 Subject: [PATCH 080/123] return of the rate reg --- .../neuron_impl_eprop_adaptive.h | 18 +++++++++--------- .../models/neuron_model_eprop_adaptive_impl.c | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index bd1bd85e987..5291c057d89 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -235,11 +235,11 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, input_t external_bias, state_t *recorded_variable_values) { -// if (neuron_index == 0) { -// // Decay global rate trace (only done once per core per timestep) -// global_parameters->core_pop_rate = global_parameters->core_pop_rate -// * global_parameters->rate_exp_TC; -// } + if (neuron_index == 0) { + // Decay global rate trace (only done once per core per timestep) + global_parameters->core_pop_rate = global_parameters->core_pop_rate + * global_parameters->rate_exp_TC; + } // Get the neuron itself @@ -344,19 +344,19 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, else if(neuron_index == 1){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } else if(neuron_index == 2){ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; } else{ // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; } // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; @@ -377,7 +377,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // / (accum_time // * (accum)syn_dynamics_neurons_in_partition)) // - global_parameters->core_target_rate; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->core_pop_rate; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 54a7d22a23e..75c0524bc17 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -156,7 +156,7 @@ state_t neuron_model_state_update( } if (time % neuron->window_size == 0){ // new_learning_signal = 0.k; - global_parameters->core_pop_rate = 0.k; +// global_parameters->core_pop_rate = 0.k; printed_value = false; } // neuron->L = learning_signal * neuron->w_fb; From 81b79ae6f9c0db27bacccca061c9b53c5ca538dd Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 17:18:13 +0100 Subject: [PATCH 081/123] return of the rate reg --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 3 ++- .../plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 5291c057d89..74af4b050e6 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -377,7 +377,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // / (accum_time // * (accum)syn_dynamics_neurons_in_partition)) // - global_parameters->core_target_rate; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->core_pop_rate; + REAL reg_learning_signal = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 08f2ea49860..34430b80c37 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -315,7 +315,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } // Calculate regularisation error - REAL reg_error = 0.k;//(global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike + REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); From 7f41581f2345b5190719529d5982fca40fcd2160 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 17:32:03 +0100 Subject: [PATCH 082/123] return of the rate reg --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 2 +- .../plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 74af4b050e6..372fc97f5da 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -377,7 +377,7 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // / (accum_time // * (accum)syn_dynamics_neurons_in_partition)) // - global_parameters->core_target_rate; - REAL reg_learning_signal = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; + REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 34430b80c37..363d1fd9af2 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -315,7 +315,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } // Calculate regularisation error - REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike + REAL reg_error = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); From ea7420c1658b4ea2eb4fb82140865d0703f2d345 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Mon, 12 Oct 2020 18:12:42 +0100 Subject: [PATCH 083/123] return of the rate reg --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 3 ++- .../plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 372fc97f5da..94c10dd6e3f 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -377,7 +377,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // / (accum_time // * (accum)syn_dynamics_neurons_in_partition)) // - global_parameters->core_target_rate; - REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); +// REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); + REAL reg_learning_signal = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 363d1fd9af2..a8a08a44f7b 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -317,6 +317,8 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // Calculate regularisation error REAL reg_error = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike + +// REAL reg_error = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); From 14791a30260eb9c2345de88fa1709fcb54576f49 Mon Sep 17 00:00:00 2001 From: such-a-git Date: Tue, 13 Oct 2020 13:57:35 +0100 Subject: [PATCH 084/123] removed printing for small windows and long runs --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.c | 6 +++--- .../plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c index 75c0524bc17..5f11cf2abde 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c @@ -145,9 +145,9 @@ state_t neuron_model_state_update( - global_parameters->core_target_rate; // io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); if (time % neuron->window_size == neuron->window_size - 1 & !printed_value){ //hardcoded time of reset - io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\tL:%k, v_mem:%k\n", - time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate, - learning_signal-v_mem_error, v_mem_error); +// io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\tL:%k, v_mem:%k\n", +// time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate, +// learning_signal-v_mem_error, v_mem_error); // global_parameters->core_pop_rate = 0.k; // REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) // / (accum)syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index a8a08a44f7b..5e50147f70e 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -321,6 +321,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // REAL reg_error = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; // io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); +// io_printf(IO_BUF, "reg_error before: %k\n", reg_error); // Return final synaptic word and weight return synapse_structure_get_final_state(current_state, reg_error); From bafd5d062f6e86a4ec00dc6fff970bce856e35f4 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 22 Oct 2020 11:44:40 +0100 Subject: [PATCH 085/123] Delete unneeded file --- neural_modelling/src/core_pop_rate | 113 ----------------------------- 1 file changed, 113 deletions(-) delete mode 100644 neural_modelling/src/core_pop_rate diff --git a/neural_modelling/src/core_pop_rate b/neural_modelling/src/core_pop_rate deleted file mode 100644 index e50435b2880..00000000000 --- a/neural_modelling/src/core_pop_rate +++ /dev/null @@ -1,113 +0,0 @@ -neuron/implementations/neuron_impl_left_right_readout.h:152: validate_mars_kiss64_seed(global_parameters->kiss_seed); -neuron/implementations/neuron_impl_left_right_readout.h:208: io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); -neuron/implementations/neuron_impl_left_right_readout.h:209: io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); -neuron/implementations/neuron_impl_left_right_readout.h:210: io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); -neuron/implementations/neuron_impl_left_right_readout.h:211: io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); -neuron/implementations/neuron_impl_left_right_readout.h:212: io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); -neuron/implementations/neuron_impl_left_right_readout.h:213:// io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); -neuron/implementations/neuron_impl_left_right_readout.h:214: io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); -neuron/implementations/neuron_impl_left_right_readout.h:215: io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); -neuron/implementations/neuron_impl_left_right_readout.h:216: io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); -neuron/implementations/neuron_impl_left_right_readout.h:217: io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); -neuron/implementations/neuron_impl_left_right_readout.h:218: io_printf(IO_BUF, "poisson key: %u \n\n", global_parameters->p_key); -neuron/implementations/neuron_impl_left_right_readout.h:219: io_printf(IO_BUF, "poisson pop size: %u \n\n", global_parameters->p_pop_size); -neuron/implementations/neuron_impl_left_right_readout.h:255:// global_parameters->target_V[target_ind]); -neuron/implementations/neuron_impl_left_right_readout.h:313: global_parameters->readout_V_0 = result; -neuron/implementations/neuron_impl_left_right_readout.h:324: global_parameters->readout_V_1 = result; -neuron/implementations/neuron_impl_left_right_readout.h:327:// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_0; -neuron/implementations/neuron_impl_left_right_readout.h:330:// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_1; -neuron/implementations/neuron_impl_left_right_readout.h:340: global_parameters->cross_entropy = 0.k; -neuron/implementations/neuron_impl_left_right_readout.h:341: global_parameters->mean_0 = 0.k; -neuron/implementations/neuron_impl_left_right_readout.h:342: global_parameters->mean_1 = 0.k; -neuron/implementations/neuron_impl_left_right_readout.h:346: key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { -neuron/implementations/neuron_impl_left_right_readout.h:363: REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 -neuron/implementations/neuron_impl_left_right_readout.h:372: payload = global_parameters->rate_on; -neuron/implementations/neuron_impl_left_right_readout.h:374: for (int j = current_cue_direction*global_parameters->p_pop_size; -neuron/implementations/neuron_impl_left_right_readout.h:375: j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ -neuron/implementations/neuron_impl_left_right_readout.h:376: spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); -neuron/implementations/neuron_impl_left_right_readout.h:383: payload = global_parameters->rate_off; -neuron/implementations/neuron_impl_left_right_readout.h:385: for (int j = current_cue_direction*global_parameters->p_pop_size; -neuron/implementations/neuron_impl_left_right_readout.h:386: j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ -neuron/implementations/neuron_impl_left_right_readout.h:387: spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); -neuron/implementations/neuron_impl_left_right_readout.h:414: payload = global_parameters->rate_on; -neuron/implementations/neuron_impl_left_right_readout.h:416: for (int j = 2*global_parameters->p_pop_size; -neuron/implementations/neuron_impl_left_right_readout.h:417: j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ -neuron/implementations/neuron_impl_left_right_readout.h:418: spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); -neuron/implementations/neuron_impl_left_right_readout.h:426:// io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); -neuron/implementations/neuron_impl_left_right_readout.h:430:// io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); -neuron/implementations/neuron_impl_left_right_readout.h:431:// global_parameters->mean_0 += global_parameters->readout_V_0; -neuron/implementations/neuron_impl_left_right_readout.h:432:// global_parameters->mean_1 += global_parameters->readout_V_1; -neuron/implementations/neuron_impl_left_right_readout.h:435:// accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); -neuron/implementations/neuron_impl_left_right_readout.h:436:// accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); -neuron/implementations/neuron_impl_left_right_readout.h:437: accum exp_0 = expk(global_parameters->readout_V_0 * 0.1k); -neuron/implementations/neuron_impl_left_right_readout.h:438: accum exp_1 = expk(global_parameters->readout_V_1 * 0.1k); -neuron/implementations/neuron_impl_left_right_readout.h:441: if (global_parameters->readout_V_0 > global_parameters->readout_V_1){ -neuron/implementations/neuron_impl_left_right_readout.h:455:// io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); -neuron/implementations/neuron_impl_left_right_readout.h:458: global_parameters->cross_entropy = -logk(softmax_1); -neuron/implementations/neuron_impl_left_right_readout.h:462: global_parameters->cross_entropy = -logk(softmax_0); -neuron/implementations/neuron_impl_left_right_readout.h:472:// learning_signal = global_parameters->cross_entropy; -neuron/implementations/neuron_impl_left_right_readout.h:482: payload = global_parameters->rate_off; -neuron/implementations/neuron_impl_left_right_readout.h:483: for (int j = 2*global_parameters->p_pop_size; -neuron/implementations/neuron_impl_left_right_readout.h:484: j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ -neuron/implementations/neuron_impl_left_right_readout.h:485: spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); -neuron/implementations/neuron_impl_left_right_readout.h:491:// learning_signal = global_parameters->cross_entropy; -neuron/implementations/neuron_impl_left_right_readout.h:493: recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->cross_entropy; -neuron/implementations/neuron_impl_store_recall_readout.h:139: validate_mars_kiss64_seed(global_parameters->kiss_seed); -neuron/implementations/neuron_impl_store_recall_readout.h:195: io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); -neuron/implementations/neuron_impl_store_recall_readout.h:196: io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); -neuron/implementations/neuron_impl_store_recall_readout.h:197: io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); -neuron/implementations/neuron_impl_store_recall_readout.h:198: io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); -neuron/implementations/neuron_impl_store_recall_readout.h:199: io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); -neuron/implementations/neuron_impl_store_recall_readout.h:200: io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); -neuron/implementations/neuron_impl_store_recall_readout.h:201: io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); -neuron/implementations/neuron_impl_store_recall_readout.h:202: io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); -neuron/implementations/neuron_impl_store_recall_readout.h:203: io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); -neuron/implementations/neuron_impl_store_recall_readout.h:204: io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); -neuron/implementations/neuron_impl_store_recall_readout.h:205: io_printf(IO_BUF, "poisson key: %k \n\n", global_parameters->p_key); -neuron/implementations/neuron_impl_store_recall_readout.h:206: io_printf(IO_BUF, "poisson pop size: %k \n\n", global_parameters->p_pop_size); -neuron/implementations/neuron_impl_store_recall_readout.h:241: REAL random_number = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); -neuron/implementations/neuron_impl_store_recall_readout.h:242: if (random_number < global_parameters->prob_command){ -neuron/implementations/neuron_impl_store_recall_readout.h:246: REAL switch_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); -neuron/implementations/neuron_impl_store_recall_readout.h:259: payload = global_parameters->rate_on; -neuron/implementations/neuron_impl_store_recall_readout.h:262: payload = global_parameters->rate_off; -neuron/implementations/neuron_impl_store_recall_readout.h:264: for (int j = i*global_parameters->p_pop_size; -neuron/implementations/neuron_impl_store_recall_readout.h:265: j < i*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ -neuron/implementations/neuron_impl_store_recall_readout.h:266: spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); -neuron/implementations/neuron_impl_store_recall_readout.h:323: global_parameters->mean_0 == 0; -neuron/implementations/neuron_impl_store_recall_readout.h:324: global_parameters->mean_1 == 0; -neuron/implementations/neuron_impl_store_recall_readout.h:336: global_parameters->readout_V_0 = result; -neuron/implementations/neuron_impl_store_recall_readout.h:347: global_parameters->readout_V_1 = result; -neuron/implementations/neuron_impl_store_recall_readout.h:359: global_parameters->mean_0 += global_parameters->readout_V_0; -neuron/implementations/neuron_impl_store_recall_readout.h:360: global_parameters->mean_1 += global_parameters->readout_V_1; -neuron/implementations/neuron_impl_store_recall_readout.h:361: accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); -neuron/implementations/neuron_impl_store_recall_readout.h:362: accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); -neuron/implementations/neuron_impl_store_recall_readout.h:367: global_parameters->cross_entropy = -logk(softmax_1); -neuron/implementations/neuron_impl_store_recall_readout.h:370: global_parameters->cross_entropy = -logk(softmax_0); -neuron/implementations/neuron_impl_store_recall_readout.h:382:// if (global_parameters->cross_entropy < -0.7){ -neuron/implementations/neuron_impl_eprop_adaptive.h:211: global_parameters->core_target_rate = global_parameters->core_target_rate -neuron/implementations/neuron_impl_eprop_adaptive.h:213: global_parameters->core_pop_rate = global_parameters->core_pop_rate -neuron/implementations/neuron_impl_eprop_adaptive.h:240: global_parameters->core_pop_rate = global_parameters->core_pop_rate -neuron/implementations/neuron_impl_eprop_adaptive.h:241: * global_parameters->rate_exp_TC; -neuron/implementations/neuron_impl_eprop_adaptive.h:290:// global_parameters->core_pop_rate; -neuron/implementations/neuron_impl_eprop_adaptive.h:307:// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate -neuron/implementations/neuron_impl_eprop_adaptive.h:316:// // global_parameters->core_target_rate; -neuron/implementations/neuron_impl_eprop_adaptive.h:362:// global_parameters->core_pop_rate; -neuron/implementations/neuron_impl_eprop_adaptive.h:369://// global_parameters->core_target_rate; -neuron/implementations/neuron_impl_eprop_adaptive.h:388: global_parameters->core_pop_rate += 1.0k; -neuron/implementations/neuron_impl_eprop_adaptive.h:464: global_parameters->core_target_rate, global_parameters->core_pop_rate); -neuron/implementations/neuron_impl_sinusoid_readout.h:182:// io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); -neuron/implementations/neuron_impl_sinusoid_readout.h:183:// io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); -neuron/implementations/neuron_impl_sinusoid_readout.h:184:// io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); -neuron/implementations/neuron_impl_sinusoid_readout.h:185:// io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); -neuron/implementations/neuron_impl_sinusoid_readout.h:186: io_printf(IO_BUF, "eta: %k \n\n", global_parameters->eta); -neuron/implementations/neuron_impl_sinusoid_readout.h:222:// global_parameters->target_V[target_ind]); -neuron/implementations/neuron_impl_sinusoid_readout.h:282: REAL error = result - global_parameters->target_V[target_ind]; -neuron/implementations/neuron_impl_sinusoid_readout.h:306: global_parameters->target_V[target_ind]; -neuron/implementations/neuron_impl_sinusoid_readout.h:308: - global_parameters->target_V[target_ind]; -neuron/implementations/neuron_impl_sinusoid_readout.h:312:// global_parameters->target_V[target_ind]; -neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c:320: REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; -neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c:318: REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike -neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c:319:// REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike -neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c:320:// io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); -neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c:320: REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; -neuron/models/neuron_model_eprop_adaptive_impl.c:109: REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; From f5e9a0f333f48a692d9b475482e62fc449733238 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 27 Mar 2023 10:16:55 +0100 Subject: [PATCH 086/123] Rho is calculated already during initialise so no need to do this --- .../src/neuron/models/neuron_model_eprop_adaptive_impl.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 9547dbf9ba7..86975be5684 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -365,7 +365,8 @@ state_t neuron_model_state_update( // neuron->psi = neuron->psi << 10; // REAL rho = neuron->rho;//expk(-1.k / 1500.k); // adpt - REAL rho = (accum)decay_s1615(1.k, neuron->e_to_dt_on_tau_a); + // CHECK: but I think this has already been calculated above... ? + REAL rho = neuron->e_to_dt_on_tau_a; // decay_s1615(1.k, neuron->e_to_dt_on_tau_a); // REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); // io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); @@ -474,7 +475,7 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].el_a = 0.k; neuron->syn_state[syn_ind].e_bar = 0.k; } - // ****************************************************************** + // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** neuron->syn_state[syn_ind].z_bar = @@ -489,7 +490,7 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].el_a = (neuron->psi * neuron->syn_state[syn_ind].z_bar) + - (rho - neuron->psi * neuron->beta) * + (rho - neuron->psi * neuron->beta) * neuron->syn_state[syn_ind].el_a; // (rho) * neuron->syn_state[syn_ind].el_a; From d46c4affb357e4e54d2d14eaf0ee7dec7412cd0f Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 3 Apr 2023 13:43:47 +0100 Subject: [PATCH 087/123] Fix merge to make basic eprop (and STDP) work --- .../Makefile | 1 - neural_modelling/src/neuron/c_main.c | 2 + .../neuron_impl_eprop_adaptive.h | 20 +- .../models/neuron_model_eprop_adaptive_impl.h | 28 ++- .../src/neuron/models/neuron_model_lif_impl.h | 1 + neural_modelling/src/neuron/neuron.c | 16 +- .../src/neuron/neuron_recording.h | 1 + .../neuron/plasticity/stdp/stdp_typedefs.h | 1 + .../synapse_dynamics_eprop_adaptive_impl.c | 195 +++++++++--------- .../synapse_structure_weight_eprop_impl.h | 103 +++++++++ .../synapse_structure_weight_impl.h | 4 +- .../timing_dependence/timing_eprop_impl.h | 12 +- .../weight_dependence/weight_eprop_reg_impl.c | 109 +++++++--- .../weight_dependence/weight_eprop_reg_impl.h | 64 +++--- neural_modelling/src/neuron/synapses.c | 16 +- spynnaker/pyNN/extra_models/__init__.py | 7 +- .../connectors/abstract_connector.py | 12 ++ .../models/neuron/builds/eprop_adaptive.py | 4 +- .../stdp/timing_dependence/__init__.py | 8 +- .../timing_dependence_eprop.py | 44 ++-- .../stdp/weight_dependence/__init__.py | 4 +- .../weight_dependence_eprop_reg.py | 32 +-- .../synapse_dynamics_static.py | 10 +- .../synapse_dynamics/synapse_dynamics_stdp.py | 3 + .../synapse_type_eprop_adaptive.py | 121 ++++++----- .../pyNN/utilities/neo_buffer_database.py | 2 - spynnaker/pyNN/utilities/neo_csv.py | 3 - 27 files changed, 521 insertions(+), 302 deletions(-) create mode 100644 neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_eprop_impl.h diff --git a/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile index a9e1bed252f..acb735ba52d 100644 --- a/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile +++ b/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile @@ -15,7 +15,6 @@ APP = $(notdir $(CURDIR)) -OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_eprop_adaptive_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_eprop_adaptive.h SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c diff --git a/neural_modelling/src/neuron/c_main.c b/neural_modelling/src/neuron/c_main.c index cbb1a411149..0dd56164c6d 100644 --- a/neural_modelling/src/neuron/c_main.c +++ b/neural_modelling/src/neuron/c_main.c @@ -150,6 +150,8 @@ void resume_callback(void) { static inline void process_ring_buffers(void) { uint32_t first_index = synapse_row_get_first_ring_buffer_index( time, synapse_type_index_bits, synapse_delay_mask); +// log_info("process_ring_buffers, synapse_delay_mask is %u first_index %u ring_buffer %d", +// synapse_delay_mask, first_index, ring_buffers[first_index]); neuron_transfer(&ring_buffers[first_index]); // Print the neuron inputs. diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 30e33a0edbd..e4e2321a7b3 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -71,10 +71,10 @@ enum bitfield_recording_indices { //extern uint32_t time; extern REAL learning_signal; //uint32_t neurons_in_pop; -uint32_t syn_dynamics_neurons_in_partition; +uint32_t neuron_impl_neurons_in_partition; //! Array of neuron states -static neuron_t *neuron_array; +neuron_t *neuron_array; //! Input states array static input_type_t *input_type_array; @@ -180,7 +180,7 @@ static void neuron_impl_load_neuron_parameters( next, n_neurons); // get number of neurons running on this core for use during execution - syn_dynamics_neurons_in_partition = n_neurons; + neuron_impl_neurons_in_partition = n_neurons; // Read the number of steps per timestep n_steps_per_timestep = address[next++]; @@ -340,7 +340,7 @@ static void neuron_impl_do_timestep_update( for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { - log_info("neuron_index %u time %u ", neuron_index, time); +// log_info("neuron_index %u time %u ", neuron_index, time); // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; @@ -485,7 +485,7 @@ static void neuron_impl_do_timestep_update( // // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; // // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; - log_info("Updating neuron parameters B_t = %k ", B_t); +// log_info("Updating neuron parameters B_t = %k ", B_t); // update neuron parameters state_t result = neuron_model_state_update( @@ -501,12 +501,12 @@ static void neuron_impl_do_timestep_update( //// / ((accum)(time%1300) //// / (1.225k // / (accum_time - // * (accum)syn_dynamics_neurons_in_partition)) + // * (accum)neuron_impl_neurons_in_partition)) // - global_parameters->core_target_rate; - // REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); - // REAL reg_learning_signal = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; + // REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / neuron_impl_neurons_in_partition); + // REAL reg_learning_signal = (global_parameters->core_pop_rate / neuron_impl_neurons_in_partition) - global_parameters->core_target_rate; REAL reg_learning_signal = ( - neuron->core_pop_rate / syn_dynamics_neurons_in_partition) - neuron->core_target_rate; + neuron->core_pop_rate / neuron_impl_neurons_in_partition) - neuron->core_target_rate; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, reg_learning_signal); @@ -515,7 +515,7 @@ static void neuron_impl_do_timestep_update( // TODO: there's quite a few divides lurking in this code, it may // be worth looking to see if any of them can be replaced - log_info("Check: voltage %k neuron->B %k time %u", voltage, neuron->B, time); +// log_info("Check: voltage %k neuron->B %k time %u", voltage, neuron->B, time); state_t nu = (voltage - neuron->B)/neuron->B; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 9547dbf9ba7..b6e24bd5ccc 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -33,6 +33,7 @@ extern uint32_t time; //extern global_neuron_params_pointer_t global_parameters; extern uint32_t syn_dynamics_neurons_in_partition; + typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply REAL z_bar_inp; @@ -178,6 +179,8 @@ struct neuron_t { }; +//neuron_t *neuron_array; + //typedef struct global_neuron_params_t { // REAL core_pop_rate; // REAL core_target_rate; @@ -239,9 +242,9 @@ static inline void neuron_model_initialise( state->V_reset = params->V_reset; state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); - log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract %k", - state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, - state->refract_timer, state->V_reset, state->T_refract); +// log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract %k", +// state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, +// state->refract_timer, state->V_reset, state->T_refract); // for everything else just copy across for now state->z = params->z; @@ -259,16 +262,16 @@ static inline void neuron_model_initialise( state->window_size = params->window_size; state->number_of_cues = params->number_of_cues; - log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", - state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); +// log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", +// state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); state->core_pop_rate = 0.0k; state->core_target_rate = params->target_rate; state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); state->eta = params->eta; - log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", - state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); +// log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", +// state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; @@ -289,9 +292,14 @@ static inline void lif_neuron_closed_form( REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; +// log_info("alpha %k input %k R_membrane %k V_rest %k", +// alpha, input_this_timestep, neuron->R_membrane, neuron->V_rest); + // update membrane voltage neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) - neuron->z * B_t; // this line achieves reset + +// log_info("neuron->V_membrane is %k neuron_z %k B_t %k", neuron->V_membrane, neuron->z, B_t); } //void neuron_model_set_global_neuron_params( @@ -328,6 +336,8 @@ state_t neuron_model_state_update( input_t input_this_timestep = exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; +// log_info("exc input 0 %k exc input 1 %k I_offset %k", exc_input[0], exc_input[1], neuron->I_offset); + lif_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep, B_t); @@ -400,8 +410,8 @@ state_t neuron_model_state_update( // * (accum)syn_dynamics_neurons_in_partition)) // - global_parameters->core_target_rate; - log_info("update learning signal syn_dynamics_neurons_in_partition %u ", - syn_dynamics_neurons_in_partition); +// log_info("update learning signal syn_dynamics_neurons_in_partition %u ", +// syn_dynamics_neurons_in_partition); REAL reg_learning_signal = (neuron->core_pop_rate // make it work for different ts // / ((accum)(time%1300) diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index f71eb8076e8..7dceaa9612f 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -146,6 +146,7 @@ static inline state_t neuron_model_state_update( // If outside of the refractory period if (neuron->refract_timer <= 0) { + REAL total_exc = ZERO; REAL total_inh = ZERO; for (int i=0; i < num_excitatory_inputs; i++) { diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index c7ddefdbc28..91b51823092 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -25,13 +25,13 @@ #include "plasticity/synapse_dynamics.h" #include -// declare spin1_wfi -extern void spin1_wfi(void); - -// Spin1 API ticks - to know when the timer wraps -extern uint ticks; - -#define SPIKE_RECORDING_CHANNEL 0 +//// declare spin1_wfi +//extern void spin1_wfi(void); +// +//// Spin1 API ticks - to know when the timer wraps +//extern uint ticks; +// +//#define SPIKE_RECORDING_CHANNEL 0 //! The key to be used for this core (will be ORed with neuron ID) key_t key; //MADE NON STATIC!!! @@ -219,7 +219,7 @@ void neuron_transfer(weight_t *syns) { // EXPORTED uint32_t neuron_index = 0; for (uint32_t n_i = n_neurons_peak; n_i > 0; n_i--) { weight_t value = syns[ring_buffer_index]; - if (value > 0) { + if (value != 0) { if (neuron_index > n_neurons) { log_error("Neuron index %u out of range", neuron_index); rt_error(RTE_SWERR); diff --git a/neural_modelling/src/neuron/neuron_recording.h b/neural_modelling/src/neuron/neuron_recording.h index 3ce1865dd4b..5a513771af2 100644 --- a/neural_modelling/src/neuron/neuron_recording.h +++ b/neural_modelling/src/neuron/neuron_recording.h @@ -201,6 +201,7 @@ static inline void neuron_recording_record(uint32_t time) { bf_info->count += bf_info->increment; } } + } //! \brief sets up state for next recording. diff --git a/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h b/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h index 523f987a986..06cc3b54d77 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h +++ b/neural_modelling/src/neuron/plasticity/stdp/stdp_typedefs.h @@ -45,5 +45,6 @@ //! \brief Multiply an accum by an STDP fixed point and return an accum static inline accum mul_accum_fixed(accum a, int32_t stdp_fixed) { return a * kbits(stdp_fixed << S1615_TO_STDP_RIGHT_SHIFT); +} #endif // _STDP_TYPEDEFS_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 4ef24b77799..d687ad98d49 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -25,25 +25,49 @@ #include "maths.h" #include "post_events.h" +//--------------------------------------- +// Structures +//--------------------------------------- +//! \brief The type of history data of pre-events +//! +//! This data is stored in SDRAM in the plastic part of the synaptic matrix +typedef struct { + //! The event time + uint32_t prev_time; + //! The event trace + pre_trace_t prev_trace; +} pre_event_history_t; + +//! The format of the plastic data region of a synaptic row +struct synapse_row_plastic_data_t { + //! The pre-event history + pre_event_history_t history; + //! The per-synapse information + plastic_synapse_t synapses[]; +}; + #include "weight_dependence/weight.h" #include "timing_dependence/timing.h" #include #include #include +// TODO: make work with stdp common? (is this even really STDP?) -#include +//#include #include +//#include -extern neuron_pointer_t neuron_array; -extern global_neuron_params_pointer_t global_parameters; +extern neuron_t *neuron_array; +//extern global_neuron_params_pointer_t global_parameters; -static uint32_t synapse_type_index_bits; -static uint32_t synapse_index_bits; -static uint32_t synapse_index_mask; -static uint32_t synapse_type_index_mask; -static uint32_t synapse_delay_index_type_bits; -static uint32_t synapse_type_mask; +// These are now defined earlier +//static uint32_t synapse_type_index_bits; +//static uint32_t synapse_index_bits; +//static uint32_t synapse_index_mask; +//static uint32_t synapse_type_index_mask; +//static uint32_t synapse_delay_index_type_bits; +//static uint32_t synapse_type_mask; uint32_t num_plastic_pre_synaptic_events = 0; uint32_t plastic_saturation_count = 0; @@ -82,18 +106,23 @@ uint32_t syn_dynamics_neurons_in_partition; uint32_t RECURRENT_SYNAPSE_OFFSET = 100; -//--------------------------------------- -// Structures -//--------------------------------------- -typedef struct { - pre_trace_t prev_trace; - uint32_t prev_time; -} pre_event_history_t; +////--------------------------------------- +//// Structures +////--------------------------------------- +//typedef struct { +// pre_trace_t prev_trace; +// uint32_t prev_time; +//} pre_event_history_t; post_event_history_t *post_event_history; /* PRIVATE FUNCTIONS */ +// Mark a value as possibly unused while not using any instructions, guaranteed +#ifndef __use +#define __use(x) do { (void) (x); } while (0) +#endif + //--------------------------------------- // Synapse update loop //--------------------------------------- @@ -177,20 +206,19 @@ static inline pre_event_history_t *plastic_event_history( } void synapse_dynamics_print_plastic_synapses( - address_t plastic_region_address, address_t fixed_region_address, + synapse_row_plastic_data_t *plastic_region_data, + synapse_row_fixed_part_t *fixed_region, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - use(plastic_region_address); - use(fixed_region_address); - use(ring_buffer_to_input_buffer_left_shifts); + __use(plastic_region_data); + __use(fixed_region); + __use(ring_buffer_to_input_buffer_left_shifts); #if LOG_LEVEL >= LOG_DEBUG // Extract separate arrays of weights (from plastic region), // Control words (from fixed region) and number of plastic synapses - plastic_synapse_t *plastic_words = plastic_synapses(plastic_region_address); - const control_t *control_words = - synapse_row_plastic_controls(fixed_region_address); - size_t plastic_synapse = - synapse_row_num_plastic_controls(fixed_region_address); + const plastic_synapse_t *plastic_words = plastic_region_data->synapses; + const control_t *control_words = synapse_row_plastic_controls(fixed_region); + size_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); log_debug("Plastic region %u synapses\n", plastic_synapse); @@ -212,10 +240,10 @@ void synapse_dynamics_print_plastic_synapses( synapses_print_weight( weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", - synapse_row_sparse_delay(control_word, synapse_type_index_bits), + synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), synapse_types_get_type_char(synapse_type), synapse_row_sparse_index(control_word, synapse_index_mask), - SYNAPSE_DELAY_MASK, synapse_type_index_bits); + synapse_delay_mask, synapse_type_index_bits); } #endif // LOG_LEVEL >= LOG_DEBUG } @@ -230,7 +258,7 @@ static inline index_t sparse_axonal_delay(uint32_t x) { #endif } -address_t synapse_dynamics_initialise( +bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { // Load timing dependence data @@ -254,35 +282,12 @@ address_t synapse_dynamics_initialise( return NULL; } - uint32_t n_neurons_power_2 = n_neurons; - uint32_t log_n_neurons = 1; - if (n_neurons != 1) { - if (!is_power_of_2(n_neurons)) { - n_neurons_power_2 = next_power_of_2(n_neurons); - } - log_n_neurons = ilog_2(n_neurons_power_2); - } - - uint32_t n_synapse_types_power_2 = n_synapse_types; - if (!is_power_of_2(n_synapse_types)) { - n_synapse_types_power_2 = next_power_of_2(n_synapse_types); - } - uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); - - synapse_type_index_bits = log_n_neurons + log_n_synapse_types; - synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; - synapse_index_bits = log_n_neurons; - synapse_index_mask = (1 << synapse_index_bits) - 1; - synapse_delay_index_type_bits = - SYNAPSE_DELAY_BITS + synapse_type_index_bits; - synapse_type_mask = (1 << log_n_synapse_types) - 1; - - return weight_result; + return true; // weight_result; } -static inline final_state_t eprop_plasticity_update(update_state_t current_state, - REAL delta_w){ +static inline final_state_t eprop_plasticity_update( + update_state_t current_state, REAL delta_w) { // Test weight change // delta_w = -0.1k; @@ -290,16 +295,16 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state int32_t delta_w_int = (int32_t) roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING if (delta_w){ - if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" -// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" - "\n", - delta_w, delta_w_int -// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) - ); -// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", -// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); - } +// if (PRINT_PLASTICITY){ +// io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" +//// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" +// "\n", +// delta_w, delta_w_int +//// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) +// ); +//// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", +//// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); +// } if (delta_w_int < 0){ current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); @@ -311,11 +316,12 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // if (PRINT_PLASTICITY){ // io_printf(IO_BUF, "delta_w: %k\n", delta_w); // } - current_state = current_state; +// current_state = current_state; // ?? what? } // Calculate regularisation error - REAL reg_error = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike +// REAL reg_error = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike + REAL reg_error = neuron_array[0].core_target_rate - (neuron_array[0].core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; @@ -331,20 +337,20 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state bool synapse_dynamics_process_plastic_synapses( - address_t plastic_region_address, address_t fixed_region_address, - weight_t *ring_buffers, uint32_t time) { + synapse_row_plastic_data_t *plastic_region_address, + synapse_row_fixed_part_t *fixed_region, + weight_t *ring_buffers, uint32_t time, uint32_t colour_delay, + bool *write_back) { // Extract separate arrays of plastic synapses (from plastic region), // Control words (from fixed region) and number of plastic synapses - plastic_synapse_t *plastic_words = - plastic_synapses(plastic_region_address); - const control_t *control_words = - synapse_row_plastic_controls(fixed_region_address); - size_t plastic_synapse = - synapse_row_num_plastic_controls(fixed_region_address); + plastic_synapse_t *plastic_words = plastic_region_address->synapses; + const control_t *control_words = synapse_row_plastic_controls(fixed_region); + size_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); num_plastic_pre_synaptic_events += plastic_synapse; - // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? + // Could maybe have a single z_bar for the entire synaptic row and + // update it once here for all synaptic words? // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { @@ -356,9 +362,9 @@ bool synapse_dynamics_process_plastic_synapses( // 16-bits of 32-bit fixed synapse so same functions can be used // uint32_t delay_axonal = sparse_axonal_delay(control_word); - uint32_t delay = 1.0k; - uint32_t syn_ind_from_delay = - synapse_row_sparse_delay(control_word, synapse_type_index_bits); + uint32_t delay = 1; // why was this 1.0k? + uint32_t syn_ind_from_delay = synapse_row_sparse_delay( + control_word, synapse_type_index_bits, synapse_delay_mask); // uint32_t delay_dendritic = synapse_row_sparse_delay( // control_word, synapse_type_index_bits); @@ -370,12 +376,13 @@ bool synapse_dynamics_process_plastic_synapses( control_word, synapse_type_index_mask); - int32_t neuron_ind = synapse_row_sparse_index(control_word, synapse_index_mask); + int32_t neuron_ind = synapse_row_sparse_index( + control_word, synapse_index_mask); // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, - if (type==1){ + if (type==1) { // this is a recurrent synapse: add 100 to index to correct array location syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } @@ -384,7 +391,7 @@ bool synapse_dynamics_process_plastic_synapses( update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); - neuron_pointer_t neuron = &neuron_array[neuron_ind]; + neuron_t *neuron = &neuron_array[neuron_ind]; neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024k; // !!!! Check what units this is in - same as weight? !!!! // io_printf(IO_BUF, "initial_weight: d%d, k%k, u%u - ", current_state.initial_weight, current_state.initial_weight, current_state.initial_weight); @@ -400,15 +407,15 @@ bool synapse_dynamics_process_plastic_synapses( // } - if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", -// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", - neuron_ind, syn_ind_from_delay, type, - current_state.initial_weight, - neuron->syn_state[syn_ind_from_delay].delta_w, time); - } +// if (PRINT_PLASTICITY){ +//// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", +//// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); +// +// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", +// neuron_ind, syn_ind_from_delay, type, +// current_state.weight, +// neuron->syn_state[syn_ind_from_delay].delta_w, time); +// } // Perform weight update: only if batch time has elapsed @@ -419,6 +426,7 @@ bool synapse_dynamics_process_plastic_synapses( io_printf(IO_BUF, "update_ready=0\n"); } + log_info("delta_w is %k", neuron->syn_state[syn_ind_from_delay].delta_w); // Go through typical weight update process to clip to limits final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); @@ -444,12 +452,12 @@ bool synapse_dynamics_process_plastic_synapses( // Add contribution to synaptic input // Convert into ring buffer offset - uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index_combined( // delay_axonal + delay_dendritic + time, type_index, - synapse_type_index_bits); + synapse_type_index_bits, synapse_delay_mask); - // Check for ring buffer saturation + // Check for ring buffer saturation (? - again? - is int16_t correct here now?) int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); @@ -476,6 +484,7 @@ bool synapse_dynamics_process_plastic_synapses( *plastic_words++ = synapse_structure_get_final_synaptic_word(final_state); } + *write_back = true; return true; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_eprop_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_eprop_impl.h new file mode 100644 index 00000000000..a6350b7fce7 --- /dev/null +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_eprop_impl.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2015 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//! \file +//! \brief Synapses just hold weight +#ifndef _SYNAPSE_STRUCUTRE_WEIGHT_EPROP_IMPL_H_ +#define _SYNAPSE_STRUCUTRE_WEIGHT_EPROP_IMPL_H_ + +//--------------------------------------- +// Structures +//--------------------------------------- +//! Plastic synapse types are just weights; +typedef weight_t plastic_synapse_t; + +//! The update state is purely a weight state +typedef weight_state_t update_state_t; + +// The final state is just a weight as this is +//! Both the weight and the synaptic word +typedef weight_t final_state_t; + +#include "synapse_structure.h" + +//--------------------------------------- +// Synapse interface functions +//--------------------------------------- +//! \brief Get the update state from the synapse structure +//! \param[in] synaptic_word: The plastic synapse data +//! \param[in] synapse_type: What (supported) type of synapse is this? +//! \return The update state +static inline update_state_t synapse_structure_get_update_state( + plastic_synapse_t synaptic_word, index_t synapse_type) { + return weight_get_initial(synaptic_word, synapse_type); +} + +//--------------------------------------- +//! \brief Get the final state from the update state. +//! \param[in] state: the update state +//! \return the final state +static inline final_state_t synapse_structure_get_final_state( + update_state_t state, REAL reg_error) { + return weight_get_final(state, reg_error); +} + +//--------------------------------------- +//! \brief Get the final weight from the final state +//! \param[in] final_state: the final state +//! \return the final weight +static inline weight_t synapse_structure_get_final_weight( + final_state_t final_state) { + return final_state; +} + +//--------------------------------------- +//! \brief Get the final plastic synapse data from the final state +//! \param[in] final_state: the final state +//! \return the final plastic synapse data, ready to be stored +static inline plastic_synapse_t synapse_structure_get_final_synaptic_word( + final_state_t final_state) { + return final_state; +} + +//--------------------------------------- +//! \brief Create the initial plastic synapse data +//! \param[in] weight: the initial synaptic weight +//! \return the plastic synapse data +static inline plastic_synapse_t synapse_structure_create_synapse( + weight_t weight) { + return weight; +} + +//--------------------------------------- +//! \brief Get the current synaptic weight from the plastic synapse data +//! \param[in] synaptic_word: the plastic synapse data +//! \return the current synaptic weight +static inline weight_t synapse_structure_get_weight( + plastic_synapse_t synaptic_word) { + return synaptic_word; +} + +static inline void synapse_structure_decay_weight( + update_state_t *state, uint32_t decay) { + return weight_decay(state, decay); +} + +static inline accum synapse_structure_get_update_weight(update_state_t state) { + return weight_get_update(state); +} + +#endif // _SYNAPSE_STRUCUTRE_WEIGHT_EPROP_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h index cd15c2a3464..269256e61ef 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h @@ -51,8 +51,8 @@ static inline update_state_t synapse_structure_get_update_state( //! \param[in] state: the update state //! \return the final state static inline final_state_t synapse_structure_get_final_state( - update_state_t state, REAL reg_error) { - return weight_get_final(state, reg_error); + update_state_t state) { + return weight_get_final(state); } //--------------------------------------- diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h index f7026b8d761..bb2c5550662 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h @@ -15,8 +15,8 @@ * along with this program. If not, see . */ -#ifndef _TIMING_PAIR_IMPL_H_ -#define _TIMING_PAIR_IMPL_H_ +#ifndef _TIMING_EPROP_IMPL_H_ +#define _TIMING_EPROP_IMPL_H_ //--------------------------------------- // Typedefines @@ -24,7 +24,7 @@ typedef int16_t post_trace_t; typedef int16_t pre_trace_t; -#include +#include #include "timing.h" #include @@ -48,10 +48,10 @@ typedef int16_t pre_trace_t; // Helper macros for looking up decays #define DECAY_LOOKUP_TAU_PLUS(time) \ maths_lut_exponential_decay( \ - time, TAU_PLUS_TIME_SHIFT, TAU_PLUS_SIZE, tau_plus_lookup) + time, tau_plus_lookup) #define DECAY_LOOKUP_TAU_MINUS(time) \ maths_lut_exponential_decay( \ - time, TAU_MINUS_TIME_SHIFT, TAU_MINUS_SIZE, tau_minus_lookup) + time, tau_minus_lookup) //--------------------------------------- // Externals @@ -157,4 +157,4 @@ static inline update_state_t timing_apply_post_spike( } } -#endif // _TIMING_PAIR_IMPL_H_ +#endif // _TIMING_EPROP_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c index e13daf601c5..a74165befd5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c @@ -23,46 +23,101 @@ // Global plasticity parameter data plasticity_weight_region_data_t *plasticity_weight_region_data; +uint32_t *weight_shift; + +//! \brief How the configuration data for additive_one_term is laid out in +//! SDRAM. The layout is an array of these. +typedef struct { + accum min_weight; + accum max_weight; + accum a2_plus; + accum a2_minus; + accum reg_rate; +} eprop_one_term_config_t; + //--------------------------------------- // Functions //--------------------------------------- address_t weight_initialise( address_t address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { - use(ring_buffer_to_input_buffer_left_shifts); - - io_printf(IO_BUF, "weight_initialise: starting\n"); - io_printf(IO_BUF, "\teprop_reg weight dependence\n"); - + UNUSED uint32_t *ring_buffer_to_input_buffer_left_shifts) { // Copy plasticity region data from address // **NOTE** this seems somewhat safer than relying on sizeof - int32_t *plasticity_word = (int32_t *) address; - plasticity_weight_region_data = + eprop_one_term_config_t *config = (eprop_one_term_config_t *) address; + + plasticity_weight_region_data_t *dtcm_copy = plasticity_weight_region_data = spin1_malloc(sizeof(plasticity_weight_region_data_t) * n_synapse_types); - if (plasticity_weight_region_data == NULL) { - io_printf(IO_BUF, "Could not initialise weight region data\n"); + if (dtcm_copy == NULL) { + log_error("Could not initialise weight region data"); return NULL; } - for (uint32_t s = 0; s < n_synapse_types; s++) { - plasticity_weight_region_data[s].min_weight = *plasticity_word++; - plasticity_weight_region_data[s].max_weight = *plasticity_word++; - plasticity_weight_region_data[s].reg_rate = kbits(*plasticity_word++); -// plasticity_weight_region_data[s].a2_plus = *plasticity_word++; -// plasticity_weight_region_data[s].a2_minus = *plasticity_word++; + weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); + if (weight_shift == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + + for (uint32_t s = 0; s < n_synapse_types; s++, config++) { + dtcm_copy[s].min_weight = config->min_weight; + dtcm_copy[s].max_weight = config->max_weight; + dtcm_copy[s].a2_plus = config->a2_plus; + dtcm_copy[s].a2_minus = config->a2_minus; + dtcm_copy[s].reg_rate = config->reg_rate; - io_printf(IO_BUF, "\tSynapse type %u: Min weight:%d, Max weight:%d, reg_rate: %k \n" -// "A2+:%d, A2-:%d" - , - s, plasticity_weight_region_data[s].min_weight, - plasticity_weight_region_data[s].max_weight, - plasticity_weight_region_data[s].reg_rate -// plasticity_weight_region_data[s].a2_plus, -// plasticity_weight_region_data[s].a2_minus - ); + // Copy weight shift + weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; + + log_debug("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k reg_rate:%k", + s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, + dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, + dtcm_copy[s].reg_rate); } - io_printf(IO_BUF, "weight_initialise: completed successfully\n"); // Return end address of region - return (address_t) plasticity_word; + return (address_t) config; } + +////--------------------------------------- +//// Functions +////--------------------------------------- +//address_t weight_initialise( +// address_t address, uint32_t n_synapse_types, +// uint32_t *ring_buffer_to_input_buffer_left_shifts) { +// use(ring_buffer_to_input_buffer_left_shifts); +// +// io_printf(IO_BUF, "weight_initialise: starting\n"); +// io_printf(IO_BUF, "\teprop_reg weight dependence\n"); +// +// // Copy plasticity region data from address +// // **NOTE** this seems somewhat safer than relying on sizeof +// int32_t *plasticity_word = (int32_t *) address; +// plasticity_weight_region_data = +// spin1_malloc(sizeof(plasticity_weight_region_data_t) * n_synapse_types); +// if (plasticity_weight_region_data == NULL) { +// io_printf(IO_BUF, "Could not initialise weight region data\n"); +// return NULL; +// } +// for (uint32_t s = 0; s < n_synapse_types; s++) { +// plasticity_weight_region_data[s].min_weight = *plasticity_word++; +// plasticity_weight_region_data[s].max_weight = *plasticity_word++; +// plasticity_weight_region_data[s].reg_rate = kbits(*plasticity_word++); +// +//// plasticity_weight_region_data[s].a2_plus = *plasticity_word++; +//// plasticity_weight_region_data[s].a2_minus = *plasticity_word++; +// +// io_printf(IO_BUF, "\tSynapse type %u: Min weight:%d, Max weight:%d, reg_rate: %k \n" +//// "A2+:%d, A2-:%d" +// , +// s, plasticity_weight_region_data[s].min_weight, +// plasticity_weight_region_data[s].max_weight, +// plasticity_weight_region_data[s].reg_rate +//// plasticity_weight_region_data[s].a2_plus, +//// plasticity_weight_region_data[s].a2_minus +// ); +// } +// io_printf(IO_BUF, "weight_initialise: completed successfully\n"); +// +// // Return end address of region +// return (address_t) plasticity_word; +//} diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index e867308c216..e996915b589 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -15,8 +15,8 @@ * along with this program. If not, see . */ -#ifndef _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ -#define _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ +#ifndef _WEIGHT_EPROPREG_ONE_TERM_IMPL_H_ +#define _WEIGHT_EPROPREG_ONE_TERM_IMPL_H_ // Include generic plasticity maths functions #include @@ -29,18 +29,18 @@ // Structures //--------------------------------------- typedef struct { - int32_t min_weight; - int32_t max_weight; + accum min_weight; + accum max_weight; + accum a2_plus; + accum a2_minus; + REAL reg_rate; -// int32_t a2_plus; -// int32_t a2_minus; } plasticity_weight_region_data_t; typedef struct { - int32_t initial_weight; + accum weight; - int32_t a2_plus; - int32_t a2_minus; + uint32_t weight_shift; const plasticity_weight_region_data_t *weight_region; } weight_state_t; @@ -50,17 +50,21 @@ typedef struct { //--------------------------------------- // Externals //--------------------------------------- -extern plasticity_weight_region_data_t *plasticity_weight_region_data; +//extern plasticity_weight_region_data_t *plasticity_weight_region_data; //--------------------------------------- -// STDP weight dependance functions +// STDP weight dependence functions //--------------------------------------- static inline weight_state_t weight_get_initial( weight_t weight, index_t synapse_type) { + extern plasticity_weight_region_data_t *plasticity_weight_region_data; + extern uint32_t *weight_shift; + + accum s1615_weight = kbits(weight << weight_shift[synapse_type]); + return (weight_state_t) { - .initial_weight = (int32_t) weight, - .a2_plus = 0, - .a2_minus = 0, + .weight = s1615_weight, + .weight_shift = weight_shift[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -73,18 +77,25 @@ static inline weight_state_t weight_one_term_apply_depression( if (PRINT_PLASTICITY){ io_printf(IO_BUF, "depressing: %d\n", a2_minus); } - state.a2_minus += a2_minus; + state.weight -= mul_accum_fixed(state.weight_region->a2_minus, a2_minus); + state.weight = kbits(MAX(bitsk(state.weight), bitsk(state.weight_region->min_weight))); return state; +// state.a2_minus += a2_minus; +// return state; } //--------------------------------------- static inline weight_state_t weight_one_term_apply_potentiation( weight_state_t state, int32_t a2_plus) { + if (PRINT_PLASTICITY){ io_printf(IO_BUF, "potentiating: %d\n", a2_plus); } - state.a2_plus += a2_plus; + state.weight += mul_accum_fixed(state.weight_region->a2_plus, a2_plus); + state.weight = kbits(MIN(bitsk(state.weight), bitsk(state.weight_region->max_weight))); return state; +// state.a2_plus += a2_plus; +// return state; } //--------------------------------------- @@ -98,11 +109,12 @@ static inline weight_t weight_get_final(weight_state_t new_state, // new_state.a2_minus, new_state.weight_region->a2_minus); // Apply eprop plasticity updates to initial weight - int32_t new_weight = - new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; - int32_t reg_weight = new_weight; - int32_t reg_change = 0; - REAL reg_boundary = 1k; + accum new_weight = bitsk(new_state.weight) >> new_state.weight_shift; +// int32_t new_weight = +// new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; + accum reg_weight = new_weight; + accum reg_change = 0.0k; + REAL reg_boundary = 1.0k; // Calculate regularisation if (new_state.weight_region->reg_rate > 0.0k && (reg_error > reg_boundary || reg_error < -reg_boundary)){ // if reg rate is zero or error small, regularisation is turned off @@ -119,15 +131,15 @@ static inline weight_t weight_get_final(weight_state_t new_state, if (PRINT_PLASTICITY){ io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, max:%d", reg_weight, reg_change, new_state.weight_region->max_weight); } - // Clamp new weight to bounds - reg_weight = MIN(new_state.weight_region->max_weight, - MAX(reg_weight, new_state.weight_region->min_weight)); + // Clamp new weight to bounds (not sure this is needed now?) +// reg_weight = MIN(new_state.weight_region->max_weight, +// MAX(reg_weight, new_state.weight_region->min_weight)); if (PRINT_PLASTICITY){ io_printf(IO_BUF, "\told_weight:%d, a2+:%d, a2-:%d, " // "scaled a2+:%d, scaled a2-:%d," " new_weight:%d, reg_weight:%d, reg_l_rate:%k, reg_error:%k\n", - new_state.initial_weight, new_state.a2_plus, new_state.a2_minus, + new_state.weight, new_state.weight_region->a2_plus, new_state.weight_region->a2_minus, // scaled_a2_plus, scaled_a2_minus, new_weight, reg_weight, new_state.weight_region->reg_rate, reg_error); } @@ -135,4 +147,4 @@ static inline weight_t weight_get_final(weight_state_t new_state, return (weight_t) reg_weight; } -#endif // _WEIGHT_ADDITIVE_ONE_TERM_IMPL_H_ +#endif // _WEIGHT_EPROPREG_ONE_TERM_IMPL_H_ diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 4f473749bc5..ebf26474900 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -24,7 +24,7 @@ #include #include #include -#include "models/neuron_model_eprop_adaptive_impl.h" +//#include "models/neuron_model_eprop_adaptive_impl.h" //! if using profiler import profiler tags #ifdef PROFILER_ENABLED @@ -33,7 +33,7 @@ //! Globals required for synapse benchmarking to work. uint32_t num_fixed_pre_synaptic_events = 0; -//extern neuron_pointer_t neuron_array; +//extern neuron_t *neuron_array; //! The number of neurons static uint32_t n_neurons; @@ -239,6 +239,10 @@ static inline bool process_fixed_synapses( // Add weight to current ring buffer value int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // switch to saturated arithmetic to avoid complicated saturation check, will it check saturation at both ends? +// int32_t test = -22; +// log_info("Check weight: %d accumulation %d test %d buffer %d", weight, accumulation, test, +// ring_buffers[ring_buffer_index]); + // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) // **NOTE** 0x10000 can be expressed as an ARM literal, // but 0xFFFF cannot. Therefore, we use (0x10000 - 1) @@ -308,16 +312,16 @@ bool synapses_initialise( synapse_index_mask = (1 << synapse_index_bits) - 1; synapse_type_bits = log_n_synapse_types; synapse_type_mask = (1 << log_n_synapse_types) - 1; - synapse_delay_bits = log_max_delay; + synapse_delay_bits = 1; // log_max_delay; synapse_delay_mask = (1 << synapse_delay_bits) - 1; synapse_delay_mask_shifted = synapse_delay_mask << synapse_type_index_bits; n_neurons_peak = 1 << log_n_neurons; -// uint32_t n_ring_buffer_bits = -// log_n_neurons + log_n_synapse_types + synapse_delay_bits; // synapse_delay_bits = 1? uint32_t n_ring_buffer_bits = - log_n_neurons + log_n_synapse_types + 1; // synapse_delay_bits = 1? + log_n_neurons + log_n_synapse_types + synapse_delay_bits; // synapse_delay_bits = 1? +// uint32_t n_ring_buffer_bits = +// log_n_neurons + log_n_synapse_types + 1; // synapse_delay_bits = 1? ring_buffer_size = 1 << (n_ring_buffer_bits); ring_buffer_mask = ring_buffer_size - 1; diff --git a/spynnaker/pyNN/extra_models/__init__.py b/spynnaker/pyNN/extra_models/__init__.py index 7bc61e373ca..031a1ec34d5 100644 --- a/spynnaker/pyNN/extra_models/__init__.py +++ b/spynnaker/pyNN/extra_models/__init__.py @@ -20,7 +20,9 @@ from spynnaker.pyNN.models.neuron.synapse_dynamics import ( SynapseDynamicsNeuromodulation as Neuromodulation) from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence import ( - WeightDependenceAdditiveTriplet) + WeightDependenceAdditiveTriplet, WeightDependenceEpropReg) +from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence import ( + TimingDependenceEprop) from spynnaker.pyNN.models.neuron.builds import ( IFCondExpStoc, IFCurrDelta as IFCurDelta, @@ -47,6 +49,9 @@ 'PfisterSpikeTriplet', 'SpikeNearestPairRule', 'RecurrentRule', 'Vogels2011Rule', + # eprop plastic stuff + 'TimingDependenceEprop', + 'WeightDependenceEprop', # Variable rate Poisson 'SpikeSourcePoissonVariable'] diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 1a8cd2e8b40..0f09260b0da 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -143,6 +143,8 @@ def _get_delay_maximum(self, delays, n_connections, synapse_info): return numpy.max(_expr_context.eval(delays, d=d)) elif numpy.isscalar(delays): return delays + elif hasattr(delays, "__getitem__"): + return numpy.max(delays) raise SpynnakerException(f"Unrecognised delay format: {type(delays)}") @abstractmethod @@ -177,6 +179,8 @@ def get_delay_variance(self, delays, synapse_info): return numpy.var(_expr_context.eval(delays, d=d)) elif numpy.isscalar(delays): return 0.0 + elif hasattr(delays, "__getitem__"): + return numpy.var(delays) raise SpynnakerException("Unrecognised delay format") def _get_n_connections_from_pre_vertex_with_delay_maximum( @@ -266,6 +270,8 @@ def get_weight_mean(self, weights, synapse_info): return numpy.mean(_expr_context.eval(weights, d=d)) elif numpy.isscalar(weights): return abs(weights) + elif hasattr(weights, "__getitem__"): + return numpy.mean(weights) raise SpynnakerException("Unrecognised weight format") def _get_weight_maximum(self, weights, n_connections, synapse_info): @@ -297,6 +303,8 @@ def _get_weight_maximum(self, weights, n_connections, synapse_info): return numpy.max(_expr_context.eval(weights, d=d)) elif numpy.isscalar(weights): return abs(weights) + elif hasattr(weights, "__getitem__"): + return numpy.amax(numpy.abs(weights)) raise SpynnakerException("Unrecognised weight format") @abstractmethod @@ -321,6 +329,8 @@ def get_weight_variance(self, weights, synapse_info): return numpy.var(_expr_context.eval(weights, d=d)) elif numpy.isscalar(weights): return 0.0 + elif hasattr(weights, "__getitem__"): + return numpy.var(weights) raise SpynnakerException("Unrecognised weight format") def _expand_distances(self, d_expression): @@ -418,6 +428,8 @@ def _generate_values( return values(d) elif numpy.isscalar(values): return numpy.repeat([values], n_connections).astype("float64") + elif hasattr(values, "__getitem__"): + return numpy.array(values).astype("float64") raise SpynnakerException("Unrecognised values {}".format(values)) def _generate_weights( diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index 7dbb6e83b91..aece63d8fd0 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -39,7 +39,7 @@ def __init__( tau_refrac=5.0, i_offset=0.0, v=0.0, psi=0.0, #synapse type params - tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, + # tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, # Regularisation params @@ -73,7 +73,7 @@ def __init__( ) synapse_type = SynapseTypeEPropAdaptive( - tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + # tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) input_type = InputTypeCurrent() diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py index cb226ab1f18..cb129b6b76b 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/__init__.py @@ -20,11 +20,11 @@ from .timing_dependence_spike_nearest_pair import ( TimingDependenceSpikeNearestPair) from .timing_dependence_vogels_2011 import TimingDependenceVogels2011 -# from .timing_dependence_eprop import TimingDependenceEprop +from .timing_dependence_eprop import TimingDependenceEprop __all__ = [ "AbstractTimingDependence", "TimingDependenceSpikePair", "TimingDependencePfisterSpikeTriplet", "TimingDependenceRecurrent", - "TimingDependenceSpikeNearestPair", "TimingDependenceVogels2011"] # , - # "TimingDependenceEprop" -# ] + "TimingDependenceSpikeNearestPair", "TimingDependenceVogels2011", + "TimingDependenceEprop" +] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py index 6e0376c6573..e80a9c4e8ff 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py @@ -15,8 +15,8 @@ import logging from spinn_utilities.overrides import overrides -from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( - plasticity_helpers) +# from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( +# plasticity_helpers) from .abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) @@ -32,15 +32,13 @@ class TimingDependenceEprop(AbstractTimingDependence): __slots__ = [ "__synapse_structure", -# "__tau_minus", -# "__tau_minus_last_entry", -# "__tau_plus", -# "__tau_plus_last_entry" - ] + "__a_plus", + "__a_minus"] - def __init__(self): #, tau_plus=20.0, tau_minus=20.0): -# self.__tau_plus = tau_plus -# self.__tau_minus = tau_minus + def __init__(self, A_plus=0.01, A_minus=0.01): + + self.__a_plus = A_plus + self.__a_minus = A_minus self.__synapse_structure = SynapseStructureWeightOnly() @@ -48,13 +46,21 @@ def __init__(self): #, tau_plus=20.0, tau_minus=20.0): # self.__tau_plus_last_entry = None # self.__tau_minus_last_entry = None -# @property -# def tau_plus(self): -# return self.__tau_plus -# -# @property -# def tau_minus(self): -# return self.__tau_minus + @property + def A_plus(self): + return self.__a_plus + + @A_plus.setter + def A_plus(self, new_value): + self.__a_plus = new_value + + @property + def A_minus(self): + return self.__a_minus + + @A_minus.setter + def A_minus(self, new_value): + self.__a_minus = new_value @overrides(AbstractTimingDependence.is_same_as) def is_same_as(self, timing_dependence): @@ -83,8 +89,8 @@ def n_weight_terms(self): return 1 @overrides(AbstractTimingDependence.write_parameters) - def write_parameters(self, spec, machine_time_step, weight_scales): - + def write_parameters( + self, spec, global_weight_scale, synapse_weight_scales): # There are currently no parameters to write for this rule pass diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py index 657ae499790..d3fc03c4c2f 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/__init__.py @@ -17,8 +17,8 @@ from .weight_dependence_additive import WeightDependenceAdditive from .weight_dependence_multiplicative import WeightDependenceMultiplicative from .weight_dependence_additive_triplet import WeightDependenceAdditiveTriplet -# from .weight_dependence_eprop_reg import WeightDependenceEpropReg +from .weight_dependence_eprop_reg import WeightDependenceEpropReg __all__ = ["AbstractHasAPlusAMinus", "AbstractWeightDependence", "WeightDependenceAdditive", "WeightDependenceMultiplicative", - "WeightDependenceAdditiveTriplet"] # , "WeightDependenceEpropReg"] + "WeightDependenceAdditiveTriplet", "WeightDependenceEpropReg"] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py index 4326454b06d..6f066ab193f 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py @@ -19,6 +19,7 @@ from .abstract_weight_dependence import AbstractWeightDependence +# TODO: this doesn't have Aplus and Aminus? class WeightDependenceEpropReg( AbstractHasAPlusAMinus, AbstractWeightDependence): __slots__ = [ @@ -27,7 +28,7 @@ class WeightDependenceEpropReg( "__reg_rate"] def __init__(self, w_min=0.0, w_max=1.0, reg_rate=0.0): - super(WeightDependenceEpropReg, self).__init__() + super().__init__() self.__w_min = w_min self.__w_max = w_max self.__reg_rate = reg_rate @@ -39,7 +40,7 @@ def w_min(self): @property def w_max(self): return self.__w_max - + @property def reg_rate(self): return self.__reg_rate @@ -64,29 +65,34 @@ def get_parameters_sdram_usage_in_bytes( self, n_synapse_types, n_weight_terms): if n_weight_terms != 1: raise NotImplementedError( - "Multiplicative weight dependence only supports single terms") + "Eprop_reg weight dependence only supports single terms") - return (3 # Number of 32-bit parameters + return (5 # Number of 32-bit parameters * 4) * n_synapse_types @overrides(AbstractWeightDependence.write_parameters) def write_parameters( - self, spec, machine_time_step, weight_scales, n_weight_terms): + self, spec, global_weight_scale, synapse_weight_scales, + n_weight_terms): if n_weight_terms != 1: raise NotImplementedError( - "Multiplicative weight dependence only supports single terms") + "Eprop_reg weight dependence only supports single terms") # Loop through each synapse type's weight scale - for w in weight_scales: + for w in synapse_weight_scales: spec.write_value( - data=int(round(self.__w_min * w)), data_type=DataType.INT32) + data=self.__w_min * global_weight_scale, + data_type=DataType.S1615) spec.write_value( - data=int(round(self.__w_max * w)), data_type=DataType.INT32) + data=self.__w_max * global_weight_scale, + data_type=DataType.S1615) -# spec.write_value( -# data=int(round(self.A_plus * w)), data_type=DataType.INT32) -# spec.write_value( -# data=int(round(self.A_minus * w)), data_type=DataType.INT32) + spec.write_value( + data=self.A_plus * global_weight_scale, + data_type=DataType.S1615) + spec.write_value( + data=self.A_minus * global_weight_scale, + data_type=DataType.S1615) spec.write_value(self.__reg_rate, data_type=DataType.S1615) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index e998f30ac3c..33981a32a11 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -103,12 +103,12 @@ def get_static_synaptic_data( n_synapse_type_bits = get_n_bits(n_synapse_types) fixed_fixed = ( - # ((numpy.rint(connections["weight"]).astype("uint16") & - # 0xFFFF).astype("uint32") << 16) | + ((numpy.rint(connections["weight"]).astype("uint16") & + 0xFFFF).astype("uint32") << 16) | # ((connections["delay"].astype("uint32") & 0xFF) << - # master is commented bit below (branch commented above) - ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & - 0xFFFF) << 16) | + # master is commented bit below (branch commented above) + # ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & + # 0xFFFF) << 16) | (connections["delay"].astype("uint32") << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype( diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index d73794b2744..b502e17f8f7 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -346,6 +346,9 @@ def get_plastic_synaptic_data( n_neuron_id_bits = get_n_bits(max_atoms_per_core) neuron_id_mask = (1 << n_neuron_id_bits) - 1 + dendritic_delays = ( + connections["delay"] * self.__dendritic_delay_fraction) + # Get the fixed data fixed_plastic = ( ((dendritic_delays.astype("uint16") & 0xFF) << diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index 34033e196d0..5e72e6724cf 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -6,15 +6,15 @@ from spynnaker.pyNN.utilities.struct import Struct from spynnaker.pyNN.data import SpynnakerDataView -TAU_SYN_E = 'tau_syn_E' -TAU_SYN_E2 = 'tau_syn_E2' -TAU_SYN_I = 'tau_syn_I' -TAU_SYN_I2 = 'tau_syn_I2' +# TAU_SYN_E = 'tau_syn_E' +# TAU_SYN_E2 = 'tau_syn_E2' +# TAU_SYN_I = 'tau_syn_I' +# TAU_SYN_I2 = 'tau_syn_I2' ISYN_EXC = "isyn_exc" ISYN_EXC2 = "isyn_exc2" ISYN_INH = "isyn_inh" ISYN_INH2 = "isyn_inh2" -TIMESTEP_MS = "timestep_ms" +# TIMESTEP_MS = "timestep_ms" # UNITS = { # TAU_SYN_E: "mV", @@ -28,38 +28,38 @@ class SynapseTypeEPropAdaptive(AbstractSynapseType): __slots__ = [ - "_tau_syn_E", - "_tau_syn_E2", - "_tau_syn_I", - "_tau_syn_I2", + # "_tau_syn_E", + # "_tau_syn_E2", + # "_tau_syn_I", + # "_tau_syn_I2", "_isyn_exc", "_isyn_exc2", "_isyn_inh", "_isyn_inh2"] def __init__( - self, tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, - isyn_exc, isyn_exc2, isyn_inh, isyn_inh2 + self, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2 ): super().__init__( [Struct([ - (DataType.S1615, TAU_SYN_E), + # (DataType.S1615, TAU_SYN_E), (DataType.S1615, ISYN_EXC), - (DataType.S1615, TAU_SYN_E2), + # (DataType.S1615, TAU_SYN_E2), (DataType.S1615, ISYN_EXC2), - (DataType.S1615, TAU_SYN_I), + # (DataType.S1615, TAU_SYN_I), (DataType.S1615, ISYN_INH), - (DataType.S1615, TAU_SYN_I2), - (DataType.S1615, ISYN_INH2), - (DataType.S1615, TIMESTEP_MS)])], - {TAU_SYN_E: "mV", TAU_SYN_E2: "mV", TAU_SYN_I: "mV", - TAU_SYN_I2: "mV", ISYN_EXC: "", ISYN_EXC2: "", + # (DataType.S1615, TAU_SYN_I2), + (DataType.S1615, ISYN_INH2)])], + {ISYN_EXC: "", ISYN_EXC2: "", ISYN_INH: "", ISYN_INH2: ""}) - - self._tau_syn_E = tau_syn_E - self._tau_syn_E2 = tau_syn_E2 - self._tau_syn_I = tau_syn_I - self._tau_syn_I2 = tau_syn_I2 + # {TAU_SYN_E: "mV", TAU_SYN_E2: "mV", TAU_SYN_I: "mV", + # TAU_SYN_I2: "mV", ISYN_EXC: "", ISYN_EXC2: "", + # ISYN_INH: "", ISYN_INH2: ""}) + + # self._tau_syn_E = tau_syn_E + # self._tau_syn_E2 = tau_syn_E2 + # self._tau_syn_I = tau_syn_I + # self._tau_syn_I2 = tau_syn_I2 self._isyn_exc = isyn_exc self._isyn_exc2 = isyn_exc2 self._isyn_inh = isyn_inh @@ -71,12 +71,7 @@ def __init__( @overrides(AbstractSynapseType.add_parameters) def add_parameters(self, parameters): - parameters[TAU_SYN_E] = self._tau_syn_E - parameters[TAU_SYN_E2] = self._tau_syn_E2 - parameters[TAU_SYN_I] = self._tau_syn_I - parameters[TAU_SYN_I2] = self._tau_syn_I2 - parameters[TIMESTEP_MS] = ( - SpynnakerDataView.get_simulation_time_step_ms()) + pass @overrides(AbstractSynapseType.add_state_variables) def add_state_variables(self, state_variables): @@ -147,37 +142,37 @@ def get_synapse_id_by_target(self, target): def get_synapse_targets(self): return "input_connections", "recurrent_connections", "learning_signal", "unused" - @property - def tau_syn_E(self): - return self._tau_syn_E - - @tau_syn_E.setter - def tau_syn_E(self, tau_syn_E): - self._tau_syn_E = tau_syn_E - - @property - def tau_syn_E2(self): - return self._tau_syn_E2 - - @tau_syn_E2.setter - def tau_syn_E2(self, tau_syn_E2): - self._tau_syn_E2 = tau_syn_E2 - - @property - def tau_syn_I(self): - return self._tau_syn_I - - @tau_syn_I.setter - def tau_syn_I(self, tau_syn_I): - self._tau_syn_I = tau_syn_I - - @property - def tau_syn_I2(self): - return self._tau_syn_I2 - - @tau_syn_I2.setter - def tau_syn_I2(self, tau_syn_I2): - self._tau_syn_I2 = tau_syn_I2 + # @property + # def tau_syn_E(self): + # return self._tau_syn_E + # + # @tau_syn_E.setter + # def tau_syn_E(self, tau_syn_E): + # self._tau_syn_E = tau_syn_E + # + # @property + # def tau_syn_E2(self): + # return self._tau_syn_E2 + # + # @tau_syn_E2.setter + # def tau_syn_E2(self, tau_syn_E2): + # self._tau_syn_E2 = tau_syn_E2 + # + # @property + # def tau_syn_I(self): + # return self._tau_syn_I + # + # @tau_syn_I.setter + # def tau_syn_I(self, tau_syn_I): + # self._tau_syn_I = tau_syn_I + # + # @property + # def tau_syn_I2(self): + # return self._tau_syn_I2 + # + # @tau_syn_I2.setter + # def tau_syn_I2(self, tau_syn_I2): + # self._tau_syn_I2 = tau_syn_I2 @property def isyn_exc(self): @@ -199,8 +194,8 @@ def isyn_inh(self, isyn_inh): def isyn_inh2(self): return self._isyn_inh2 - @isyn_inh.setter - def isyn_inh(self, isyn_inh2): + @isyn_inh2.setter + def isyn_inh2(self, isyn_inh2): self._isyn_inh2 = isyn_inh2 @property diff --git a/spynnaker/pyNN/utilities/neo_buffer_database.py b/spynnaker/pyNN/utilities/neo_buffer_database.py index 75449dafc46..ca84e3c55bb 100644 --- a/spynnaker/pyNN/utilities/neo_buffer_database.py +++ b/spynnaker/pyNN/utilities/neo_buffer_database.py @@ -800,8 +800,6 @@ def __get_matrix_data_by_region( record_raw = self._read_contents(cursor, region_id) record_length = len(record_raw) - print("record_length, region_id: ", record_length, region_id) - # There is one column for time and one for each neuron recording data_row_length = len(neurons) * data_type.size full_row_length = data_row_length + self.__N_BYTES_FOR_TIMESTAMP diff --git a/spynnaker/pyNN/utilities/neo_csv.py b/spynnaker/pyNN/utilities/neo_csv.py index a057b16be88..2ab738d6da6 100644 --- a/spynnaker/pyNN/utilities/neo_csv.py +++ b/spynnaker/pyNN/utilities/neo_csv.py @@ -285,14 +285,11 @@ def _insert_matrix_data( source_population=block.name, source_ids=ids) - print("insert_matrix_data variable, segment, block, source_ids, t_start, sampling_rate ", - variable, segment, block, ids, t_start, sampling_rate) channel_index = self.__get_channel_index(indexes, segment.block) data_array.channel_index = channel_index data_array.shape = (data_array.shape[0], data_array.shape[1]) segment.analogsignals.append(data_array) channel_index.analogsignals.append(data_array) - print("data_array shape: ", data_array.shape) def _csv_matrix_data(self, csv_writer, signal_array, indexes): """ Writes data to a csv file From 5a17f838e77f5b7f70a9e01424feb7326a827619 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 14 Apr 2023 16:10:40 +0100 Subject: [PATCH 088/123] Get sinusoid readout neuron working; fix STDP? --- .../neuron/sinusoid_readout/Makefile | 2 +- .../Makefile | 2 +- .../neuron_impl_eprop_adaptive.h | 19 +- .../neuron_impl_sinusoid_readout.h | 568 ++++++++++-------- .../models/neuron_model_eprop_adaptive_impl.h | 37 +- .../neuron_model_sinusoid_readout_impl.c | 354 +++++------ .../neuron_model_sinusoid_readout_impl.h | 295 ++++++++- .../synapse_dynamics_eprop_adaptive_impl.c | 88 +-- .../synapse_dynamics_sinusoid_readout_impl.c | 246 ++++---- .../weight_dependence/weight_eprop_reg_impl.c | 2 +- .../weight_dependence/weight_eprop_reg_impl.h | 17 +- .../src/neuron/spike_processing.c | 8 +- neural_modelling/src/neuron/synapses.c | 6 +- .../poisson/spike_source_poisson.c | 10 +- spynnaker/pyNN/extra_models/__init__.py | 7 +- .../pyNN/models/neuron/builds/__init__.py | 4 +- .../models/neuron/builds/sinusoid_readout.py | 10 +- .../models/neuron/neuron_models/__init__.py | 8 +- .../neuron_model_eprop_adaptive.py | 6 +- .../neuron_model_sinusoid_readout.py | 379 ++++++------ 20 files changed, 1255 insertions(+), 813 deletions(-) diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile index 771ae1c7b58..fbd1c7cb43d 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile @@ -1,6 +1,6 @@ APP = $(notdir $(CURDIR)) -OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c +#OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile index e0e5c4ae587..8841fa7ca31 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile @@ -1,6 +1,6 @@ APP = $(notdir $(CURDIR)) -OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c +#OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h #SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index e4e2321a7b3..4dfb609bce9 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -112,7 +112,8 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { if (sizeof(neuron_t)) { neuron_array = spin1_malloc(n_neurons * sizeof(neuron_t)); if (neuron_array == NULL) { - log_error("Unable to allocate neuron array - Out of DTCM"); + log_error("Unable to allocate neuron array - Out of DTCM %u %u", + n_neurons, sizeof(neuron_t)); return false; } } @@ -240,14 +241,6 @@ static void neuron_impl_load_neuron_parameters( spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); } -#if LOG_LEVEL >= LOG_DEBUG - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - neuron_model_print_state_variables(&neuron_array[n]); - } -#endif // LOG_LEVEL >= LOG_DEBUG - - // // if (sizeof(global_neuron_params_t)) { @@ -321,6 +314,7 @@ static void neuron_impl_load_neuron_parameters( log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { neuron_model_print_parameters(&neuron_array[n]); + neuron_model_print_state_variables(&neuron_array[n]); } log_debug("-------------------------------------\n"); #endif // LOG_LEVEL >= LOG_DEBUG @@ -565,8 +559,11 @@ static void neuron_impl_do_timestep_update( additional_input_has_spiked(additional_input); // Add contribution from this neuron's spike to global rate trace - // TODO: this needs to be global somehow... - neuron->core_pop_rate += 1.0k; + // Make sure it's the same across all neurons + for (uint32_t n_ind=0; n_ind < n_neurons; n_ind++) { + neuron_t *global_neuron = &neuron_array[n_ind]; + global_neuron->core_pop_rate += 1.0k; + } // Record spike neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 7ced4b616bc..440d5585a9f 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -10,17 +10,34 @@ #include #include +#include + // Further includes -#include #include #include #include #include -#include - -#define V_RECORDING_INDEX 0 -#define GSYN_EXCITATORY_RECORDING_INDEX 1 -#define GSYN_INHIBITORY_RECORDING_INDEX 2 +#include // maybe not needed? + +//! Indices for recording of words +enum word_recording_indices { + //! V (somatic potential) recording index + V_RECORDING_INDEX = 0, + //! Gsyn_exc (excitatory synaptic conductance/current) recording index + GSYN_EXC_RECORDING_INDEX = 1, + //! Gsyn_inh (excitatory synaptic conductance/current) recording index + GSYN_INH_RECORDING_INDEX = 2, + //! Number of recorded word-sized state variables + N_RECORDED_VARS = 3 +}; + +//! Indices for recording of bitfields +enum bitfield_recording_indices { + //! Spike event recording index + SPIKE_RECORDING_BITFIELD = 0, + //! Number of recorded bitfields + N_BITFIELD_VARS = 1 +}; #ifndef NUM_EXCITATORY_RECEPTORS #define NUM_EXCITATORY_RECEPTORS 1 @@ -34,24 +51,30 @@ shaping include #endif +#include + //! Array of neuron states -neuron_pointer_t neuron_array; +neuron_t *neuron_array; //! Input states array -static input_type_pointer_t input_type_array; +static input_type_t *input_type_array; //! Additional input array -static additional_input_pointer_t additional_input_array; +static additional_input_t *additional_input_array; //! Threshold states array -static threshold_type_pointer_t threshold_type_array; +static threshold_type_t *threshold_type_array; -//! Global parameters for the neurons -static global_neuron_params_pointer_t global_parameters; +////! Global parameters for the neurons +//static global_neuron_params_pointer_t global_parameters; // The synapse shaping parameters -static synapse_param_t *neuron_synapse_shaping_params; +static synapse_types_t *synapse_types_array; +//! The number of steps to run per timestep +static uint n_steps_per_timestep; + +// TODO: check if these other parameters are needed static REAL next_spike_time = 0; extern uint32_t time; extern key_t key; @@ -59,24 +82,22 @@ extern REAL learning_signal; static uint32_t target_ind = 0; - - static bool neuron_impl_initialise(uint32_t n_neurons) { // allocate DTCM for the global parameter details - if (sizeof(global_neuron_params_t) > 0) { - global_parameters = (global_neuron_params_t *) spin1_malloc( - sizeof(global_neuron_params_t)); - if (global_parameters == NULL) { - log_error("Unable to allocate global neuron parameters" - "- Out of DTCM"); - return false; - } - } +// if (sizeof(global_neuron_params_t) > 0) { +// global_parameters = (global_neuron_params_t *) spin1_malloc( +// sizeof(global_neuron_params_t)); +// if (global_parameters == NULL) { +// log_error("Unable to allocate global neuron parameters" +// "- Out of DTCM"); +// return false; +// } +// } // Allocate DTCM for neuron array - if (sizeof(neuron_t) != 0) { - neuron_array = (neuron_t *) spin1_malloc(n_neurons * sizeof(neuron_t)); + if (sizeof(neuron_t)) { + neuron_array = spin1_malloc(n_neurons * sizeof(neuron_t)); if (neuron_array == NULL) { log_error("Unable to allocate neuron array - Out of DTCM"); return false; @@ -84,9 +105,8 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Allocate DTCM for input type array and copy block of data - if (sizeof(input_type_t) != 0) { - input_type_array = (input_type_t *) spin1_malloc( - n_neurons * sizeof(input_type_t)); + if (sizeof(input_type_t)) { + input_type_array = spin1_malloc(n_neurons * sizeof(input_type_t)); if (input_type_array == NULL) { log_error("Unable to allocate input type array - Out of DTCM"); return false; @@ -94,9 +114,9 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Allocate DTCM for additional input array and copy block of data - if (sizeof(additional_input_t) != 0) { - additional_input_array = (additional_input_pointer_t) spin1_malloc( - n_neurons * sizeof(additional_input_t)); + if (sizeof(additional_input_t)) { + additional_input_array = + spin1_malloc(n_neurons * sizeof(additional_input_t)); if (additional_input_array == NULL) { log_error("Unable to allocate additional input array" " - Out of DTCM"); @@ -105,9 +125,9 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Allocate DTCM for threshold type array and copy block of data - if (sizeof(threshold_type_t) != 0) { - threshold_type_array = (threshold_type_t *) spin1_malloc( - n_neurons * sizeof(threshold_type_t)); + if (sizeof(threshold_type_t)) { + threshold_type_array = + spin1_malloc(n_neurons * sizeof(threshold_type_t)); if (threshold_type_array == NULL) { log_error("Unable to allocate threshold type array - Out of DTCM"); return false; @@ -115,10 +135,10 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Allocate DTCM for synapse shaping parameters - if (sizeof(synapse_param_t) != 0) { - neuron_synapse_shaping_params = (synapse_param_t *) spin1_malloc( - n_neurons * sizeof(synapse_param_t)); - if (neuron_synapse_shaping_params == NULL) { + if (sizeof(synapse_types_t) != 0) { + synapse_types_array = + spin1_malloc(n_neurons * sizeof(synapse_types_t)); + if (synapse_types_array == NULL) { log_error("Unable to allocate synapse parameters array" " - Out of DTCM"); return false; @@ -127,7 +147,7 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { // Initialise pointers to Neuron parameters in STDP code // synapse_dynamics_set_neuron_array(neuron_array); - log_info("set pointer to neuron array in stdp code"); +// log_info("set pointer to neuron array in stdp code"); return true; } @@ -136,54 +156,86 @@ static void neuron_impl_add_inputs( index_t synapse_type_index, index_t neuron_index, input_t weights_this_timestep) { // simple wrapper to synapse type input function - synapse_param_pointer_t parameters = - &(neuron_synapse_shaping_params[neuron_index]); + synapse_types_t *parameters = + &(synapse_types_array[neuron_index]); synapse_types_add_neuron_input(synapse_type_index, parameters, weights_this_timestep); } +static uint32_t n_words_needed(uint32_t size) { + return (size + (sizeof(uint32_t) - 1)) / sizeof(uint32_t); +} + static void neuron_impl_load_neuron_parameters( - address_t address, uint32_t next, uint32_t n_neurons) { + address_t address, uint32_t next, uint32_t n_neurons, + address_t save_initial_state) { log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); - //log_debug("writing neuron global parameters"); - spin1_memcpy(global_parameters, &address[next], - sizeof(global_neuron_params_t)); - next += (sizeof(global_neuron_params_t) + 3) / 4; + // Number of steps per timestep + n_steps_per_timestep = address[next++]; + if (n_steps_per_timestep == 0) { + log_error("bad number of steps per timestep: 0"); + rt_error(RTE_SWERR); + } - log_debug("reading neuron local parameters"); - spin1_memcpy(neuron_array, &address[next], n_neurons * sizeof(neuron_t)); - next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + if (sizeof(neuron_t)) { + neuron_params_t *params = (neuron_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + neuron_model_initialise(&neuron_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(neuron_params_t)); + } - log_debug("reading input type parameters"); - spin1_memcpy(input_type_array, &address[next], - n_neurons * sizeof(input_type_t)); - next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + if (sizeof(input_type_t)) { + input_type_params_t *params = (input_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + input_type_initialise(&input_type_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(input_type_params_t)); + } - log_debug("reading threshold type parameters"); - spin1_memcpy(threshold_type_array, &address[next], - n_neurons * sizeof(threshold_type_t)); - next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + if (sizeof(threshold_type_t)) { + threshold_type_params_t *params = (threshold_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + threshold_type_initialise(&threshold_type_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(threshold_type_params_t)); + } - log_debug("reading synapse parameters"); - spin1_memcpy(neuron_synapse_shaping_params, &address[next], - n_neurons * sizeof(synapse_param_t)); - next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + if (sizeof(synapse_types_t)) { + synapse_types_params_t *params = (synapse_types_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + synapse_types_initialise(&synapse_types_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(synapse_types_params_t)); + } - log_debug("reading additional input type parameters"); - spin1_memcpy(additional_input_array, &address[next], - n_neurons * sizeof(additional_input_t)); - next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + if (sizeof(additional_input_t)) { + additional_input_params_t *params = (additional_input_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + additional_input_initialise(&additional_input_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(additional_input_params_t)); + } - neuron_model_set_global_neuron_params(global_parameters); + // If we are to save the initial state, copy the whole of the parameters + // to the initial state + if (save_initial_state) { + spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); + } // io_printf(IO_BUF, "\nPrinting global params\n"); // io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); // io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); // io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); // io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); - io_printf(IO_BUF, "eta: %k \n\n", global_parameters->eta); +// io_printf(IO_BUF, "eta: %k \n\n", neuron_array[0]->eta); for (index_t n = 0; n < n_neurons; n++) { @@ -193,8 +245,6 @@ static void neuron_impl_load_neuron_parameters( // io_printf(IO_BUF, "size of global params: %u", // sizeof(global_neuron_params_t)); - - #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { @@ -206,201 +256,243 @@ static void neuron_impl_load_neuron_parameters( } +static void neuron_impl_do_timestep_update( + uint32_t timer_count, uint32_t time, uint32_t n_neurons) { + + for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { + // Get the neuron itself + neuron_t *neuron = &neuron_array[neuron_index]; + bool spike = false; +// neuron_t *neuron_zero = &neuron_array[0]; + + target_ind = time & 0x3ff; // repeats on a cycle of 1024 entries in array + + // io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); + // io_printf(IO_BUF, "Target: %k\n\n", + // global_parameters->target_V[target_ind]); + + // Get the input_type parameters and voltage for this neuron + input_type_t *input_type = &input_type_array[neuron_index]; + + // Get threshold and additional input parameters for this neuron + threshold_type_t *threshold_type = + &threshold_type_array[neuron_index]; + additional_input_t *additional_input = + &additional_input_array[neuron_index]; + synapse_types_t *synapse_type = + &synapse_types_array[neuron_index]; + + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + + + // Get the exc and inh values from the synapses + input_t exc_values[NUM_EXCITATORY_RECEPTORS]; + input_t* exc_syn_values = synapse_types_get_excitatory_input( + exc_values, synapse_type); + input_t inh_values[NUM_INHIBITORY_RECEPTORS]; + input_t* inh_syn_values = synapse_types_get_inhibitory_input( + inh_values, synapse_type); + + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_syn_values, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_syn_values, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording + // REAL total_exc = 0; + // REAL total_inh = 0; + // + // for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ + // total_exc += exc_input_values[i]; + // } + // for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ + // total_inh += inh_input_values[i]; + // } + + // Call functions to get the input values to be recorded + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + + // Call functions to convert exc_input and inh_input to current + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); + + REAL current_offset = current_source_get_offset(time, neuron_index); + input_t external_bias = additional_input_get_input_value_as_current( + additional_input, voltage); + + // This is clearly overwritten so why is it here? +// recorded_variable_values[V_RECORDING_INDEX] = voltage; +// neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, voltage); + if (neuron_index == 0){ + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, current_offset, neuron, 0.0k); + + // Calculate error + REAL error = result - neuron->target_V[target_ind]; + learning_signal = error; + // Record Error + // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = + // error; + // neuron->syn_state[3].delta_w; + // neuron->syn_state[0].z_bar; + + log_info("neuron_index %u time %u voltage %k result %k exc input %k targetV %k", + neuron_index, time, voltage, result, exc_input_values[0], + neuron->target_V[target_ind]); + + // Record readout + neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, result); +// neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, voltage); +// recorded_variable_values[V_RECORDING_INDEX] = +// result; + // neuron->syn_state[0].z_bar; + + // Send error (learning signal) as packet with payload + // ToDo can't I just alter the global variable here? + // Another option is just to use "send_spike" instead... ? + while (!spin1_send_mc_packet( + key | neuron_index, bitsk(error), 1 )) { + spin1_delay_us(1); + } + } + else{ + // Record 'Error' + neuron_recording_record_accum( + V_RECORDING_INDEX, neuron_index, + neuron->target_V[target_ind]); +// recorded_variable_values[V_RECORDING_INDEX] = +// // neuron->syn_state[0].z_bar; +// global_parameters->target_V[target_ind]; +// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = +// // - global_parameters->target_V[target_ind]; + } +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index*20].z_bar; + neuron_recording_record_accum( + GSYN_INH_RECORDING_INDEX, neuron_index, + neuron->syn_state[neuron_index*20].z_bar); + // Record target +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = +// // global_parameters->target_V[target_ind]; +// neuron->syn_state[neuron_index*20].delta_w; +// // exc_input_values[0]; + neuron_recording_record_accum( + GSYN_EXC_RECORDING_INDEX, neuron_index, + neuron->syn_state[neuron_index*20].delta_w); + + // If spike occurs, communicate to relevant parts of model + // TODO I don't know why this is here + if (spike) { + // Call relevant model-based functions + // Tell the neuron model + // neuron_model_has_spiked(neuron); + + // Tell the additional input + additional_input_has_spiked(additional_input); + } + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); -static bool neuron_impl_do_timestep_update(index_t neuron_index, - input_t external_bias, state_t *recorded_variable_values) { - - // Get the neuron itself - neuron_pointer_t neuron = &neuron_array[neuron_index]; - bool spike = false; - - target_ind = time & 0x3ff; // repeats on a cycle of 1024 entries in array - -// io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); -// io_printf(IO_BUF, "Target: %k\n\n", -// global_parameters->target_V[target_ind]); - - // Get the input_type parameters and voltage for this neuron - input_type_pointer_t input_type = &input_type_array[neuron_index]; - - // Get threshold and additional input parameters for this neuron - threshold_type_pointer_t threshold_type = - &threshold_type_array[neuron_index]; - additional_input_pointer_t additional_input = - &additional_input_array[neuron_index]; - synapse_param_pointer_t synapse_type = - &neuron_synapse_shaping_params[neuron_index]; - - // Get the voltage - state_t voltage = neuron_model_get_membrane_voltage(neuron); + #if LOG_LEVEL >= LOG_DEBUG + neuron_model_print_state_variables(neuron); + #endif // LOG_LEVEL >= LOG_DEBUG + // Return the boolean to the model timestep update +// return spike; + } +} - // Get the exc and inh values from the synapses - input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); - input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); - // Call functions to obtain exc_input and inh_input - input_t* exc_input_values = input_type_get_input_value( - exc_value, input_type, NUM_EXCITATORY_RECEPTORS); - input_t* inh_input_values = input_type_get_input_value( - inh_value, input_type, NUM_INHIBITORY_RECEPTORS); +//! \brief stores neuron parameter back into sdram +//! \param[in] address: the address in sdram to start the store +static void neuron_impl_store_neuron_parameters( + address_t address, uint32_t next, uint32_t n_neurons) { + log_debug("writing parameters"); - // Sum g_syn contributions from all receptors for recording -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ -// total_exc += exc_input_values[i]; -// } -// for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ -// total_inh += inh_input_values[i]; -// } + // Skip steps per timestep + next += 1; - // Call functions to get the input values to be recorded -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - - // Call functions to convert exc_input and inh_input to current - input_type_convert_excitatory_input_to_current( - exc_input_values, input_type, voltage); - input_type_convert_inhibitory_input_to_current( - inh_input_values, input_type, voltage); - - external_bias += additional_input_get_input_value_as_current( - additional_input, voltage); - - recorded_variable_values[V_RECORDING_INDEX] = voltage; - if (neuron_index == 0){ - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, 0.0k); - - // Calculate error - REAL error = result - global_parameters->target_V[target_ind]; - learning_signal = error; - // Record Error -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// error; -// neuron->syn_state[3].delta_w; -// neuron->syn_state[0].z_bar; - - // Record readout - recorded_variable_values[V_RECORDING_INDEX] = - result; - // neuron->syn_state[0].z_bar; - - // Send error (learning signal) as packet with payload - // ToDo can't I just alter the global variable here? - while (!spin1_send_mc_packet( - key | neuron_index, bitsk(error), 1 )) { - spin1_delay_us(1); + if (sizeof(neuron_t)) { + log_debug("writing neuron local parameters"); +// spin1_memcpy(&address[next], neuron_array, +// n_neurons * sizeof(neuron_t)); + neuron_params_t *params = (neuron_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + neuron_model_save_state(&neuron_array[i], ¶ms[i]); } + next += n_words_needed(n_neurons * sizeof(neuron_params_t)); } - else{ - // Record 'Error' - recorded_variable_values[V_RECORDING_INDEX] = -// neuron->syn_state[0].z_bar; - global_parameters->target_V[target_ind]; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// - global_parameters->target_V[target_ind]; - } - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index*20].z_bar; - // Record target - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// global_parameters->target_V[target_ind]; - neuron->syn_state[neuron_index*20].delta_w; -// exc_input_values[0]; - - - // If spike occurs, communicate to relevant parts of model - if (spike) { - // Call relevant model-based functions - // Tell the neuron model -// neuron_model_has_spiked(neuron); - - // Tell the additional input - additional_input_has_spiked(additional_input); - } - - // Shape the existing input according to the included rule - synapse_types_shape_input(synapse_type); - - #if LOG_LEVEL >= LOG_DEBUG - neuron_model_print_state_variables(neuron); - #endif // LOG_LEVEL >= LOG_DEBUG - - // Return the boolean to the model timestep update - return spike; -} - - + if (sizeof(input_type_t)) { + log_debug("writing input type parameters"); + input_type_params_t *params = (input_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + input_type_save_state(&input_type_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(input_type_params_t)); + } + if (sizeof(threshold_type_t)) { + log_debug("writing threshold type parameters"); + threshold_type_params_t *params = (threshold_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + threshold_type_save_state(&threshold_type_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(threshold_type_params_t)); + } -//! \brief stores neuron parameter back into sdram -//! \param[in] address: the address in sdram to start the store -static void neuron_impl_store_neuron_parameters( - address_t address, uint32_t next, uint32_t n_neurons) { - log_debug("writing parameters"); + if (sizeof(synapse_types_t)) { + log_debug("writing synapse parameters"); + synapse_types_params_t *params = (synapse_types_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + synapse_types_save_state(&synapse_types_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(synapse_types_params_t)); + } - //log_debug("writing neuron global parameters"); - spin1_memcpy(&address[next], global_parameters, - sizeof(global_neuron_params_t)); - next += (sizeof(global_neuron_params_t) + 3) / 4; - - log_debug("writing neuron local parameters"); - spin1_memcpy(&address[next], neuron_array, - n_neurons * sizeof(neuron_t)); - next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; - - log_debug("writing input type parameters"); - spin1_memcpy(&address[next], input_type_array, - n_neurons * sizeof(input_type_t)); - next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; - - log_debug("writing threshold type parameters"); - spin1_memcpy(&address[next], threshold_type_array, - n_neurons * sizeof(threshold_type_t)); - next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; - - log_debug("writing synapse parameters"); - spin1_memcpy(&address[next], neuron_synapse_shaping_params, - n_neurons * sizeof(synapse_param_t)); - next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; - - log_debug("writing additional input type parameters"); - spin1_memcpy(&address[next], additional_input_array, - n_neurons * sizeof(additional_input_t)); - next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + if (sizeof(additional_input_t)) { + log_debug("writing additional input type parameters"); + additional_input_params_t *params = (additional_input_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + additional_input_save_state(&additional_input_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(additional_input_params_t)); + } } #if LOG_LEVEL >= LOG_DEBUG void neuron_impl_print_inputs(uint32_t n_neurons) { bool empty = true; for (index_t i = 0; i < n_neurons; i++) { - empty = empty - && (bitsk(synapse_types_get_excitatory_input( - &(neuron_synapse_shaping_params[i])) - - synapse_types_get_inhibitory_input( - &(neuron_synapse_shaping_params[i]))) == 0); + synapse_types_t *params = &synapse_types_array[i]; + input_t exc_values[NUM_EXCITATORY_RECEPTORS]; + input_t inh_values[NUM_INHIBITORY_RECEPTORS]; + empty = empty && (0 == bitsk( + synapse_types_get_excitatory_input(exc_values, params)[0] + - synapse_types_get_inhibitory_input(inh_values, params)[0])); } if (!empty) { log_debug("-------------------------------------\n"); for (index_t i = 0; i < n_neurons; i++) { + synapse_types_t *params = &synapse_types_array[i]; + input_t exc_values[NUM_EXCITATORY_RECEPTORS]; + input_t inh_values[NUM_INHIBITORY_RECEPTORS]; input_t input = - synapse_types_get_excitatory_input( - &(neuron_synapse_shaping_params[i])) - - synapse_types_get_inhibitory_input( - &(neuron_synapse_shaping_params[i])); + synapse_types_get_excitatory_input(exc_values, params)[0] + - synapse_types_get_inhibitory_input(inh_values, params)[1]; if (bitsk(input) != 0) { log_debug("%3u: %12.6k (= ", i, input); - synapse_types_print_input( - &(neuron_synapse_shaping_params[i])); + synapse_types_print_input(params); log_debug(")\n"); } } @@ -411,7 +503,7 @@ void neuron_impl_print_inputs(uint32_t n_neurons) { void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { - synapse_types_print_parameters(&(neuron_synapse_shaping_params[n])); + synapse_types_print_parameters(&(synapse_types_array[n])); } log_debug("-------------------------------------\n"); } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index d48bd605230..bc59f7bf58a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -101,6 +101,7 @@ struct neuron_params_t { uint32_t window_size; uint32_t number_of_cues; + REAL pop_rate; REAL target_rate; REAL tau_err; REAL eta; // learning rate @@ -265,7 +266,7 @@ static inline void neuron_model_initialise( // log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", // state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); - state->core_pop_rate = 0.0k; + state->core_pop_rate = params->pop_rate; state->core_target_rate = params->target_rate; state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); state->eta = params->eta; @@ -276,30 +277,50 @@ static inline void neuron_model_initialise( for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; } - } static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { // TODO: probably more parameters need copying across at this point, syn_state for a start params->V_init = state->V_membrane; params->refract_timer_init = state->refract_timer; + params->z = state->z; + params->A = state->A; + params->psi = state->psi; + params->B = state->B; + params->b = state->b; + params->b_0 = state->b_0; +// state->e_to_dt_on_tau_a = expk(-kdivk(ts, params->tau_a)); + params->beta = state->beta; +// state->adpt = 1 - expk(-kdivk(ts, params->tau_a)); + params->scalar = state->scalar; + params->L = state->L; + params->w_fb = state->w_fb; + params->window_size = state->window_size; + params->number_of_cues = state->number_of_cues; + +// log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", +// state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); + +// state->core_pop_rate = 0.0k; + params->pop_rate = state->core_pop_rate; + params->target_rate = state->core_target_rate; +// state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); + params->eta = state->eta; + + for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { + params->syn_state[n_syn] = state->syn_state[n_syn]; + } } // simple Leaky I&F ODE static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep, REAL B_t) { - REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; -// log_info("alpha %k input %k R_membrane %k V_rest %k", -// alpha, input_this_timestep, neuron->R_membrane, neuron->V_rest); - // update membrane voltage neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) - neuron->z * B_t; // this line achieves reset - -// log_info("neuron->V_membrane is %k neuron_z %k B_t %k", neuron->V_membrane, neuron->z, B_t); } //void neuron_model_set_global_neuron_params( diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c index 8b5103ee4ae..2e3eacfbd1d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c @@ -1,178 +1,178 @@ -#include "neuron_model_sinusoid_readout_impl.h" - -#include - -extern uint32_t time; -extern REAL learning_signal; -REAL local_eta; - -// simple Leaky I&F ODE -static inline void _lif_neuron_closed_form( - neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { - - REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; - - // update membrane voltage - neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); -} - -void neuron_model_set_global_neuron_params( - global_neuron_params_pointer_t params) { - use(params); - - local_eta = params->eta; - io_printf(IO_BUF, "local eta = %k\n", local_eta); - - // Does Nothing - no params -} - -state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, neuron_pointer_t neuron, REAL dummy) { - - log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); - log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); - use(dummy); - - // If outside of the refractory period - if (neuron->refract_timer <= 0) { -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// total_exc += exc_input[0]; -// total_inh += inh_input[0]; -// for (int i=0; i < num_excitatory_inputs; i++){ -// total_exc += exc_input[i]; -// } -// for (int i=0; i< num_inhibitory_inputs; i++){ -// total_inh += inh_input[i]; -// } - // Get the input in nA - input_t input_this_timestep = - exc_input[0] + exc_input[1] + neuron->I_offset; - - _lif_neuron_closed_form( - neuron, neuron->V_membrane, input_this_timestep); - } else { - - // countdown refractory timer - neuron->refract_timer -= 1; - } - - uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? - - neuron->L = learning_signal * neuron->w_fb; - - // All operations now need doing once per eprop synapse - for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ - // ****************************************************************** - // Low-pass filter incoming spike train - // ****************************************************************** - neuron->syn_state[syn_ind].z_bar = - neuron->syn_state[syn_ind].z_bar * neuron->exp_TC -// + (1 - neuron->exp_TC) * - + - neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - - - // ****************************************************************** - // Update eligibility vector - // ****************************************************************** -// neuron->syn_state[syn_ind].el_a = -// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -// (rho - neuron->psi * neuron->beta) * -// neuron->syn_state[syn_ind].el_a; - - - // ****************************************************************** - // Update eligibility trace - // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); -// -// neuron->syn_state[syn_ind].e_bar = -// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -// + (1 - neuron->exp_TC) * temp_elig_trace; - - // ****************************************************************** - // Update cached total weight change - // ****************************************************************** - REAL this_dt_weight_change = -// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; - - neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; -// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -// "L = %k = %k * %k = l * w_fb\n" -// "this dw = %k \t tot dw %k\n" -// , -// total_synapses_per_neuron, -// syn_ind, -// neuron->syn_state[syn_ind].z_bar_inp, -// neuron->syn_state[syn_ind].z_bar, -// time, -// neuron->L, learning_signal, neuron -> w_fb, -// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -// ); -// } - // reset input (can't have more than one spike per timestep - neuron->syn_state[syn_ind].z_bar_inp = 0; - - - // decrease timestep counter preventing rapid updates - if (neuron->syn_state[syn_ind].update_ready > 0){ - neuron->syn_state[syn_ind].update_ready -= 1; - } - - } - - return neuron->V_membrane; -} - -void neuron_model_has_spiked(neuron_pointer_t neuron) { - - // reset membrane voltage - neuron->V_membrane = neuron->V_reset; - - // reset refractory timer - neuron->refract_timer = neuron->T_refract; -} - -state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { - return neuron->V_membrane; -} - -void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { - log_debug("V membrane = %11.4k mv", neuron->V_membrane); -} - -void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { - io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); - io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); - - io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); - io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); - - io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); - - io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); - - io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); - - io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); - +//#include "neuron_model_sinusoid_readout_impl.h" +// +//#include +// +//extern uint32_t time; +//extern REAL learning_signal; +//REAL local_eta; +// +//// simple Leaky I&F ODE +//static inline void _lif_neuron_closed_form( +// neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { +// +// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; +// +// // update membrane voltage +// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +//} +// +//void neuron_model_set_global_neuron_params( +// global_neuron_params_pointer_t params) { +// use(params); +// +// local_eta = params->eta; +// io_printf(IO_BUF, "local eta = %k\n", local_eta); +// +// // Does Nothing - no params +//} +// +//state_t neuron_model_state_update( +// uint16_t num_excitatory_inputs, input_t* exc_input, +// uint16_t num_inhibitory_inputs, input_t* inh_input, +// input_t external_bias, neuron_pointer_t neuron, REAL dummy) { +// +// log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); +// log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); +// use(dummy); +// +// // If outside of the refractory period +// if (neuron->refract_timer <= 0) { +//// REAL total_exc = 0; +//// REAL total_inh = 0; +//// +//// total_exc += exc_input[0]; +//// total_inh += inh_input[0]; +//// for (int i=0; i < num_excitatory_inputs; i++){ +//// total_exc += exc_input[i]; +//// } +//// for (int i=0; i< num_inhibitory_inputs; i++){ +//// total_inh += inh_input[i]; +//// } +// // Get the input in nA +// input_t input_this_timestep = +// exc_input[0] + exc_input[1] + neuron->I_offset; +// +// _lif_neuron_closed_form( +// neuron, neuron->V_membrane, input_this_timestep); +// } else { +// +// // countdown refractory timer +// neuron->refract_timer -= 1; +// } +// +// uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? +// +// neuron->L = learning_signal * neuron->w_fb; +// +// // All operations now need doing once per eprop synapse +// for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ +// // ****************************************************************** +// // Low-pass filter incoming spike train +// // ****************************************************************** +// neuron->syn_state[syn_ind].z_bar = +// neuron->syn_state[syn_ind].z_bar * neuron->exp_TC +//// + (1 - neuron->exp_TC) * +// + +// neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update +// +// +// // ****************************************************************** +// // Update eligibility vector +// // ****************************************************************** +//// neuron->syn_state[syn_ind].el_a = +//// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + +//// (rho - neuron->psi * neuron->beta) * +//// neuron->syn_state[syn_ind].el_a; +// +// +// // ****************************************************************** +// // Update eligibility trace +// // ****************************************************************** +//// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +//// neuron->beta * neuron->syn_state[syn_ind].el_a); +//// +//// neuron->syn_state[syn_ind].e_bar = +//// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar +//// + (1 - neuron->exp_TC) * temp_elig_trace; +// +// // ****************************************************************** +// // Update cached total weight change +// // ****************************************************************** +// REAL this_dt_weight_change = +//// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; +// local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; +// +// neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; +//// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +//// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +//// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +//// "L = %k = %k * %k = l * w_fb\n" +//// "this dw = %k \t tot dw %k\n" +//// , +//// total_synapses_per_neuron, +//// syn_ind, +//// neuron->syn_state[syn_ind].z_bar_inp, +//// neuron->syn_state[syn_ind].z_bar, +//// time, +//// neuron->L, learning_signal, neuron -> w_fb, +//// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +//// ); +//// } +// // reset input (can't have more than one spike per timestep +// neuron->syn_state[syn_ind].z_bar_inp = 0; +// +// +// // decrease timestep counter preventing rapid updates +// if (neuron->syn_state[syn_ind].update_ready > 0){ +// neuron->syn_state[syn_ind].update_ready -= 1; +// } +// +// } +// +// return neuron->V_membrane; +//} +// +//void neuron_model_has_spiked(neuron_pointer_t neuron) { +// +// // reset membrane voltage +// neuron->V_membrane = neuron->V_reset; +// +// // reset refractory timer +// neuron->refract_timer = neuron->T_refract; +//} +// +//state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { +// return neuron->V_membrane; +//} +// +//void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { +// log_debug("V membrane = %11.4k mv", neuron->V_membrane); +//} +// +//void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { +// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); +// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); +// +// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); +// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); +// +// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); +// // io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); -// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", -// neuron->time_to_spike_ticks); - -// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); -} +// +// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); +// +// io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); +// +//// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +//// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); +//// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", +//// neuron->time_to_spike_ticks); +// +//// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +//// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +//// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +//// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +////// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +//// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +//} diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index 11ed38f80ae..f8e36242f25 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -3,9 +3,14 @@ #include "neuron_model.h" #include "random.h" +#include #define SYNAPSES_PER_NEURON 250 +extern uint32_t time; +extern REAL learning_signal; +//extern REAL local_eta; + typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply @@ -17,7 +22,49 @@ typedef struct eprop_syn_state_t { }eprop_syn_state_t; ///////////////////////////////////////////////////////////// -// definition for LIF neuron parameters +// definition for LIF-sinusoid neuron parameters +struct neuron_params_t { + // membrane voltage [mV] + REAL V_init; + + // membrane resting voltage [mV] + REAL V_rest; + + // membrane capacitance [nF] + REAL c_m; + + // membrane decay time constant + REAL tau_m; + + // offset current (nA) + REAL I_offset; + + // post-spike reset membrane voltage (mV) + REAL V_reset; + + // refractory time of neuron [ms] + REAL T_refract_ms; + + // initial refractory timer value (saved) + int32_t refract_timer_init; + + // The time step in milliseconds + REAL time_step; + + // TODO: double-check that everything above this point is needed + + REAL L; // learning signal + REAL w_fb; // feedback weight + + // former globals + REAL target_V[1024]; + REAL eta; + + // array of synaptic states - peak fan-in of 250 for this case + eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; +}; + + typedef struct neuron_t { // membrane voltage [mV] REAL V_membrane; @@ -48,10 +95,13 @@ typedef struct neuron_t { REAL L; // learning signal REAL w_fb; // feedback weight + // former globals + REAL target_V[1024]; // this could be problematic for DTCM usage + REAL eta; + // array of synaptic states - peak fan-in of >250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; - // Poisson compartment params // REAL mean_isi_ticks; // REAL time_to_spike_ticks; @@ -60,7 +110,6 @@ typedef struct neuron_t { // REAL rate_at_last_setting; // REAL rate_update_threshold; - // // Should be in global params // mars_kiss64_seed_t spike_source_seed; // array of 4 values //// UFRACT seconds_per_tick; @@ -68,12 +117,238 @@ typedef struct neuron_t { } neuron_t; -typedef struct global_neuron_params_t { -// mars_kiss64_seed_t spike_source_seed; // array of 4 values -// REAL ticks_per_second; -// REAL readout_V; - REAL target_V[1024]; - REAL eta; -} global_neuron_params_t; +//typedef struct global_neuron_params_t { +//// mars_kiss64_seed_t spike_source_seed; // array of 4 values +//// REAL ticks_per_second; +//// REAL readout_V; +// REAL target_V[1024]; +// REAL eta; +//} global_neuron_params_t; + +//! \brief Performs a ceil operation on an accum +//! \param[in] value The value to ceil +//! \return The ceil of the value +static inline int32_t lif_ceil_accum(REAL value) { + int32_t bits = bitsk(value); + int32_t integer = bits >> 15; + int32_t fraction = bits & 0x7FFF; + if (fraction > 0) { + return integer + 1; + } + return integer; +} + +static inline void neuron_model_initialise( + neuron_t *state, neuron_params_t *params, uint32_t n_steps_per_timestep) { + REAL ts = kdivui(params->time_step, n_steps_per_timestep); + state->V_membrane = params->V_init; + state->V_rest = params->V_rest; + state->R_membrane = kdivk(params->tau_m, params->c_m); + state->exp_TC = expk(-kdivk(ts, params->tau_m)); + state->I_offset = params->I_offset; + state->refract_timer = params->refract_timer_init; + state->V_reset = params->V_reset; + state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); + + // for everything else just copy across for now + state->L = params->L; + state->w_fb = params->w_fb; + + for (uint32_t n_v = 0; n_v < 1024; n_v++) { + state->target_V[n_v] = params->target_V[n_v]; + } + state->eta = params->eta; +// local_eta = params->eta; + + for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { + state->syn_state[n_syn] = params->syn_state[n_syn]; + } + +} + +static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { + // TODO: probably more parameters need copying across at this point, syn_state for a start + params->V_init = state->V_membrane; + params->refract_timer_init = state->refract_timer; + params->L = state->L; + params->w_fb = state->w_fb; + + for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { + params->syn_state[n_syn] = state->syn_state[n_syn]; + } +} + +// simple Leaky I&F ODE +static inline void lif_neuron_closed_form( + neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { + + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + + // update membrane voltage + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +} + +//void neuron_model_set_global_neuron_params( +// global_neuron_params_pointer_t params) { +// use(params); +// +// local_eta = params->eta; +// io_printf(IO_BUF, "local eta = %k\n", local_eta); +// +// // Does Nothing - no params +//} + +state_t neuron_model_state_update( + uint16_t num_excitatory_inputs, input_t* exc_input, + uint16_t num_inhibitory_inputs, input_t* inh_input, + input_t external_bias, REAL current_offset, neuron_t *restrict neuron, + REAL B_t) { + + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); + log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); + use(B_t); + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// total_exc += exc_input[0]; +// total_inh += inh_input[0]; +// for (int i=0; i < num_excitatory_inputs; i++){ +// total_exc += exc_input[i]; +// } +// for (int i=0; i< num_inhibitory_inputs; i++){ +// total_inh += inh_input[i]; +// } + // Get the input in nA + input_t input_this_timestep = + exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; + + lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep); + } else { + + // countdown refractory timer + neuron->refract_timer -= 1; + } + + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? + + neuron->L = learning_signal * neuron->w_fb; + REAL local_eta = neuron->eta; + + // All operations now need doing once per eprop synapse + for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ + // ****************************************************************** + // Low-pass filter incoming spike train + // ****************************************************************** + neuron->syn_state[syn_ind].z_bar = + neuron->syn_state[syn_ind].z_bar * neuron->exp_TC +// + (1 - neuron->exp_TC) * + + + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** +// neuron->syn_state[syn_ind].el_a = +// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + +// (rho - neuron->psi * neuron->beta) * +// neuron->syn_state[syn_ind].el_a; + + + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** +// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +// neuron->beta * neuron->syn_state[syn_ind].el_a); +// +// neuron->syn_state[syn_ind].e_bar = +// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar +// + (1 - neuron->exp_TC) * temp_elig_trace; + + // ****************************************************************** + // Update cached total weight change + // ****************************************************************** + REAL this_dt_weight_change = +// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; +// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +// "L = %k = %k * %k = l * w_fb\n" +// "this dw = %k \t tot dw %k\n" +// , +// total_synapses_per_neuron, +// syn_ind, +// neuron->syn_state[syn_ind].z_bar_inp, +// neuron->syn_state[syn_ind].z_bar, +// time, +// neuron->L, learning_signal, neuron -> w_fb, +// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +// ); +// } + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; + + + // decrease timestep counter preventing rapid updates + if (neuron->syn_state[syn_ind].update_ready > 0){ + neuron->syn_state[syn_ind].update_ready -= 1; + } + + } + + return neuron->V_membrane; +} + +void neuron_model_has_spiked(neuron_t *restrict neuron) { + + // reset membrane voltage + neuron->V_membrane = neuron->V_reset; + + // reset refractory timer + neuron->refract_timer = neuron->T_refract; +} + +state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) { + return neuron->V_membrane; +} + +void neuron_model_print_state_variables(const neuron_t *neuron) { + log_debug("V membrane = %11.4k mv", neuron->V_membrane); +} + +void neuron_model_print_parameters(const neuron_t *neuron) { +// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); +// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); +// +// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); +// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); +// +// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); +// +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// +// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); +// +// io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); +// +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); +// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", +// neuron->time_to_spike_ticks); + +// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +} #endif // _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index d687ad98d49..a6a354e1240 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -24,6 +24,7 @@ // Plasticity includes #include "maths.h" #include "post_events.h" +#include "synapse_dynamics_stdp_common.h" //--------------------------------------- // Structures @@ -31,12 +32,12 @@ //! \brief The type of history data of pre-events //! //! This data is stored in SDRAM in the plastic part of the synaptic matrix -typedef struct { - //! The event time - uint32_t prev_time; - //! The event trace - pre_trace_t prev_trace; -} pre_event_history_t; +//typedef struct { +// //! The event time +// uint32_t prev_time; +// //! The event trace +// pre_trace_t prev_trace; +//} pre_event_history_t; //! The format of the plastic data region of a synaptic row struct synapse_row_plastic_data_t { @@ -46,11 +47,11 @@ struct synapse_row_plastic_data_t { plastic_synapse_t synapses[]; }; -#include "weight_dependence/weight.h" -#include "timing_dependence/timing.h" -#include -#include -#include +//#include "weight_dependence/weight.h" +//#include "timing_dependence/timing.h" +//#include +//#include +//#include // TODO: make work with stdp common? (is this even really STDP?) @@ -69,8 +70,8 @@ extern neuron_t *neuron_array; //static uint32_t synapse_delay_index_type_bits; //static uint32_t synapse_type_mask; -uint32_t num_plastic_pre_synaptic_events = 0; -uint32_t plastic_saturation_count = 0; +//uint32_t num_plastic_pre_synaptic_events = 0; +//uint32_t plastic_saturation_count = 0; uint32_t syn_dynamics_neurons_in_partition; @@ -114,14 +115,14 @@ uint32_t RECURRENT_SYNAPSE_OFFSET = 100; // uint32_t prev_time; //} pre_event_history_t; -post_event_history_t *post_event_history; +//post_event_history_t *post_event_history; /* PRIVATE FUNCTIONS */ -// Mark a value as possibly unused while not using any instructions, guaranteed -#ifndef __use -#define __use(x) do { (void) (x); } while (0) -#endif +//// Mark a value as possibly unused while not using any instructions, guaranteed +//#ifndef __use +//#define __use(x) do { (void) (x); } while (0) +//#endif //--------------------------------------- // Synapse update loop @@ -294,7 +295,9 @@ static inline final_state_t eprop_plasticity_update( int32_t delta_w_int = (int32_t) roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - if (delta_w){ + log_info("delta_w_int %d", delta_w_int); + + if (delta_w){ // TODO: This should probably be delta_w_int // if (PRINT_PLASTICITY){ // io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" //// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" @@ -307,9 +310,11 @@ static inline final_state_t eprop_plasticity_update( // } if (delta_w_int < 0){ - current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); + current_state = weight_one_term_apply_depression( + current_state, delta_w_int << 3); } else { - current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + current_state = weight_one_term_apply_potentiation( + current_state, delta_w_int << 3); } } else { @@ -375,15 +380,15 @@ bool synapse_dynamics_process_plastic_synapses( uint32_t type_index = synapse_row_sparse_type_index( control_word, synapse_type_index_mask); - - int32_t neuron_ind = synapse_row_sparse_index( + uint32_t neuron_ind = synapse_row_sparse_index( control_word, synapse_index_mask); // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, if (type==1) { - // this is a recurrent synapse: add 100 to index to correct array location + // this is a recurrent synapse: add 100 to index to + // correct array location syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } @@ -392,7 +397,7 @@ bool synapse_dynamics_process_plastic_synapses( synapse_structure_get_update_state(*plastic_words, type); neuron_t *neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024k; // !!!! Check what units this is in - same as weight? !!!! + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; // !!!! Check what units this is in - same as weight? !!!! // io_printf(IO_BUF, "initial_weight: d%d, k%k, u%u - ", current_state.initial_weight, current_state.initial_weight, current_state.initial_weight); // if (current_state.initial_weight > 0){ @@ -426,7 +431,7 @@ bool synapse_dynamics_process_plastic_synapses( io_printf(IO_BUF, "update_ready=0\n"); } - log_info("delta_w is %k", neuron->syn_state[syn_ind_from_delay].delta_w); +// log_info("delta_w is %k", neuron->syn_state[syn_ind_from_delay].delta_w); // Go through typical weight update process to clip to limits final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); @@ -461,6 +466,9 @@ bool synapse_dynamics_process_plastic_synapses( int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); + log_info("Check: accumulation %d ring_buffer %d time %u", + accumulation, ring_buffers[ring_buffer_index], time); + // uint32_t sat_test = accumulation & 0x10000; // if (sat_test) { // accumulation = sat_test - 1; @@ -501,20 +509,22 @@ void synapse_dynamics_process_post_synaptic_event( timing_add_post_spike(time, last_post_time, last_post_trace)); } -input_t synapse_dynamics_get_intrinsic_bias( - uint32_t time, index_t neuron_index) { - use(time); - use(neuron_index); - return 0.0k; -} - -uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { - return num_plastic_pre_synaptic_events; -} +//input_t synapse_dynamics_get_intrinsic_bias( +// uint32_t time, index_t neuron_index) { +// use(time); +// use(neuron_index); +// return 0.0k; +//} +// +//uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { +// return num_plastic_pre_synaptic_events; +//} +// +//uint32_t synapse_dynamics_get_plastic_saturation_count(void) { +// return plastic_saturation_count; +//} -uint32_t synapse_dynamics_get_plastic_saturation_count(void) { - return plastic_saturation_count; -} +// TODO: fix below to match other dynamics impls #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index cea041ae669..83891837f2f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -24,30 +24,31 @@ // Plasticity includes #include "maths.h" #include "post_events.h" +#include "synapse_dynamics_stdp_common.h" -#include "weight_dependence/weight.h" -#include "timing_dependence/timing.h" -#include -#include -#include +//#include "weight_dependence/weight.h" +//#include "timing_dependence/timing.h" +//#include +//#include +//#include -#include +//#include //#include #include -extern neuron_pointer_t neuron_array; +extern neuron_t *neuron_array; //extern global_neuron_params_pointer_t global_parameters; -static uint32_t synapse_type_index_bits; -static uint32_t synapse_index_bits; -static uint32_t synapse_index_mask; -static uint32_t synapse_type_index_mask; -static uint32_t synapse_delay_index_type_bits; -static uint32_t synapse_type_mask; +//static uint32_t synapse_type_index_bits; +//static uint32_t synapse_index_bits; +//static uint32_t synapse_index_mask; +//static uint32_t synapse_type_index_mask; +//static uint32_t synapse_delay_index_type_bits; +//static uint32_t synapse_type_mask; -uint32_t num_plastic_pre_synaptic_events = 0; -uint32_t plastic_saturation_count = 0; +//uint32_t num_plastic_pre_synaptic_events = 0; +//uint32_t plastic_saturation_count = 0; //--------------------------------------- // Macros @@ -84,12 +85,20 @@ uint32_t RECURRENT_SYNAPSE_OFFSET = 100; //--------------------------------------- // Structures //--------------------------------------- -typedef struct { - pre_trace_t prev_trace; - uint32_t prev_time; -} pre_event_history_t; +//typedef struct { +// pre_trace_t prev_trace; +// uint32_t prev_time; +//} pre_event_history_t; -post_event_history_t *post_event_history; +//post_event_history_t *post_event_history; + +//! The format of the plastic data region of a synaptic row +struct synapse_row_plastic_data_t { + //! The pre-event history + pre_event_history_t history; + //! The per-synapse information + plastic_synapse_t synapses[]; +}; /* PRIVATE FUNCTIONS */ @@ -176,20 +185,19 @@ static inline pre_event_history_t *plastic_event_history( } void synapse_dynamics_print_plastic_synapses( - address_t plastic_region_address, address_t fixed_region_address, + synapse_row_plastic_data_t *plastic_region_data, + synapse_row_fixed_part_t *fixed_region, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - use(plastic_region_address); - use(fixed_region_address); - use(ring_buffer_to_input_buffer_left_shifts); + __use(plastic_region_data); + __use(fixed_region); + __use(ring_buffer_to_input_buffer_left_shifts); #if LOG_LEVEL >= LOG_DEBUG // Extract separate arrays of weights (from plastic region), // Control words (from fixed region) and number of plastic synapses - plastic_synapse_t *plastic_words = plastic_synapses(plastic_region_address); - const control_t *control_words = - synapse_row_plastic_controls(fixed_region_address); - size_t plastic_synapse = - synapse_row_num_plastic_controls(fixed_region_address); + const plastic_synapse_t *plastic_words = plastic_region_data->synapses; + const control_t *control_words = synapse_row_plastic_controls(fixed_region); + size_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); log_debug("Plastic region %u synapses\n", plastic_synapse); @@ -211,10 +219,10 @@ void synapse_dynamics_print_plastic_synapses( synapses_print_weight( weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", - synapse_row_sparse_delay(control_word, synapse_type_index_bits), + synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), synapse_types_get_type_char(synapse_type), synapse_row_sparse_index(control_word, synapse_index_mask), - SYNAPSE_DELAY_MASK, synapse_type_index_bits); + synapse_delay_mask, synapse_type_index_bits); } #endif // LOG_LEVEL >= LOG_DEBUG } @@ -229,57 +237,44 @@ static inline index_t sparse_axonal_delay(uint32_t x) { #endif } -address_t synapse_dynamics_initialise( +bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - // Load timing dependence data - address_t weight_region_address = timing_initialise(address); - if (address == NULL) { - return NULL; - } +// // Load timing dependence data +// address_t weight_region_address = timing_initialise(address); +// if (address == NULL) { +// return NULL; +// } +// +// // Load weight dependence data +// address_t weight_result = weight_initialise( +// weight_region_address, n_synapse_types, +// ring_buffer_to_input_buffer_left_shifts); +// if (weight_result == NULL) { +// return NULL; +// } +// +// post_event_history = post_events_init_buffers(n_neurons); +// if (post_event_history == NULL) { +// return NULL; +// } - // Load weight dependence data - address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts); - if (weight_result == NULL) { - return NULL; + if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, + ring_buffer_to_input_buffer_left_shifts)) { + return false; } post_event_history = post_events_init_buffers(n_neurons); if (post_event_history == NULL) { - return NULL; - } - - uint32_t n_neurons_power_2 = n_neurons; - uint32_t log_n_neurons = 1; - if (n_neurons != 1) { - if (!is_power_of_2(n_neurons)) { - n_neurons_power_2 = next_power_of_2(n_neurons); - } - log_n_neurons = ilog_2(n_neurons_power_2); + return false; } - uint32_t n_synapse_types_power_2 = n_synapse_types; - if (!is_power_of_2(n_synapse_types)) { - n_synapse_types_power_2 = next_power_of_2(n_synapse_types); - } - uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); - - synapse_type_index_bits = log_n_neurons + log_n_synapse_types; - synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; - synapse_index_bits = log_n_neurons; - synapse_index_mask = (1 << synapse_index_bits) - 1; - synapse_delay_index_type_bits = - SYNAPSE_DELAY_BITS + synapse_type_index_bits; - synapse_type_mask = (1 << log_n_synapse_types) - 1; - - return weight_result; + return true; // weight_result; } -static inline final_state_t eprop_plasticity_update(update_state_t current_state, - REAL delta_w){ +static inline final_state_t eprop_plasticity_update( + update_state_t current_state, REAL delta_w){ // Test weight change // delta_w = -0.1k; @@ -291,7 +286,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; - if (delta_w){ + if (delta_w){ // TODO: This should probably be delta_w_int if (PRINT_PLASTICITY){ io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" // ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" @@ -304,46 +299,43 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } if (delta_w_int < 0){ - current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); + current_state = weight_one_term_apply_depression( + current_state, delta_w_int << 3); } else { - current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + current_state = weight_one_term_apply_potentiation( + current_state, delta_w_int << 3); } } else { // if (PRINT_PLASTICITY){ // io_printf(IO_BUF, "delta_w: %k\n", delta_w); // } - current_state = current_state; +// current_state = current_state; what? } // Calculate regularisation error REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; - // Return final synaptic word and weight return synapse_structure_get_final_state(current_state, reg_error); } - - bool synapse_dynamics_process_plastic_synapses( - address_t plastic_region_address, address_t fixed_region_address, - weight_t *ring_buffers, uint32_t time) { + synapse_row_plastic_data_t *plastic_region_address, + synapse_row_fixed_part_t *fixed_region, + weight_t *ring_buffers, uint32_t time, uint32_t colour_delay, + bool *write_back) { // Extract separate arrays of plastic synapses (from plastic region), // Control words (from fixed region) and number of plastic synapses - plastic_synapse_t *plastic_words = - plastic_synapses(plastic_region_address); - const control_t *control_words = - synapse_row_plastic_controls(fixed_region_address); - size_t plastic_synapse = - synapse_row_num_plastic_controls(fixed_region_address); + plastic_synapse_t *plastic_words = plastic_region_address->synapses; + const control_t *control_words = synapse_row_plastic_controls(fixed_region); + size_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); num_plastic_pre_synaptic_events += plastic_synapse; - // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? - - + // Could maybe have a single z_bar for the entire synaptic row + // and update it once here for all synaptic words? // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { @@ -355,9 +347,9 @@ bool synapse_dynamics_process_plastic_synapses( // 16-bits of 32-bit fixed synapse so same functions can be used // uint32_t delay_axonal = sparse_axonal_delay(control_word); - uint32_t delay = 1.0k; - uint32_t syn_ind_from_delay = - synapse_row_sparse_delay(control_word, synapse_type_index_bits); + uint32_t delay = 1; // 1.0k; ?? + uint32_t syn_ind_from_delay = synapse_row_sparse_delay( + control_word, synapse_type_index_bits, synapse_delay_mask); // uint32_t delay_dendritic = synapse_row_sparse_delay( // control_word, synapse_type_index_bits); @@ -368,34 +360,35 @@ bool synapse_dynamics_process_plastic_synapses( uint32_t type_index = synapse_row_sparse_type_index( control_word, synapse_type_index_mask); - - int32_t neuron_ind = synapse_row_sparse_index(control_word, synapse_index_mask); + int32_t neuron_ind = synapse_row_sparse_index( + control_word, synapse_index_mask); // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, if (type==1){ - // this is a recurrent synapse: add 100 to index to correct array location + // this is a recurrent synapse: add 100 to index to + // correct array location syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } - neuron_pointer_t neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! - + neuron_t *neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; + // !!!! Check what units this is in - same as weight? !!!! // Create update state from the plastic synaptic word update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); - if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", -// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", - neuron_ind, syn_ind_from_delay, type, - current_state.initial_weight, - neuron->syn_state[syn_ind_from_delay].delta_w, time); - } +// if (PRINT_PLASTICITY){ +//// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", +//// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); +// +// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", +// neuron_ind, syn_ind_from_delay, type, +// current_state.initial_weight, +// neuron->syn_state[syn_ind_from_delay].delta_w, time); +// } // Perform weight update: only if batch time has elapsed final_state_t final_state; @@ -407,6 +400,9 @@ bool synapse_dynamics_process_plastic_synapses( io_printf(IO_BUF, "update_ready=0\n"); } + log_info("Check: eprop plasticity update, delta_w %k syn_ind %u time %u", + neuron->syn_state[syn_ind_from_delay].delta_w, syn_ind_from_delay, time); + // Go through typical weight update process to clip to limits final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); @@ -431,15 +427,18 @@ bool synapse_dynamics_process_plastic_synapses( // Add contribution to synaptic input // Convert into ring buffer offset - uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index_combined( // delay_axonal + delay_dendritic + time, type_index, - synapse_type_index_bits); + synapse_type_index_bits, synapse_delay_mask); // Check for ring buffer saturation int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); + log_info("Check: accumulation %d ring_buffer %d time %u", + accumulation, ring_buffers[ring_buffer_index], time); + // uint32_t sat_test = accumulation & 0x10000; // if (sat_test) { // accumulation = sat_test - 1; @@ -448,10 +447,13 @@ bool synapse_dynamics_process_plastic_synapses( ring_buffers[ring_buffer_index] = accumulation; + // no overflow or underflow checking? + // Write back updated synaptic word to plastic region *plastic_words++ = synapse_structure_get_final_synaptic_word(final_state); } + *write_back = true; return true; } @@ -468,20 +470,22 @@ void synapse_dynamics_process_post_synaptic_event( timing_add_post_spike(time, last_post_time, last_post_trace)); } -input_t synapse_dynamics_get_intrinsic_bias( - uint32_t time, index_t neuron_index) { - use(time); - use(neuron_index); - return 0.0k; -} - -uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { - return num_plastic_pre_synaptic_events; -} +//input_t synapse_dynamics_get_intrinsic_bias( +// uint32_t time, index_t neuron_index) { +// use(time); +// use(neuron_index); +// return 0.0k; +//} +// +//uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { +// return num_plastic_pre_synaptic_events; +//} +// +//uint32_t synapse_dynamics_get_plastic_saturation_count(void) { +// return plastic_saturation_count; +//} -uint32_t synapse_dynamics_get_plastic_saturation_count(void) { - return plastic_saturation_count; -} +// TODO: fix below to match other dynamics impls #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c index a74165befd5..9887bec5a64 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c @@ -68,7 +68,7 @@ address_t weight_initialise( // Copy weight shift weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; - log_debug("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k reg_rate:%k", + log_info("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k reg_rate:%k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, dtcm_copy[s].reg_rate); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index e996915b589..48afd72713d 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -77,7 +77,8 @@ static inline weight_state_t weight_one_term_apply_depression( if (PRINT_PLASTICITY){ io_printf(IO_BUF, "depressing: %d\n", a2_minus); } - state.weight -= mul_accum_fixed(state.weight_region->a2_minus, a2_minus); +// state.weight -= mul_accum_fixed(state.weight_region->a2_minus, a2_minus); + state.weight -= kbits(a2_minus); state.weight = kbits(MAX(bitsk(state.weight), bitsk(state.weight_region->min_weight))); return state; // state.a2_minus += a2_minus; @@ -91,8 +92,12 @@ static inline weight_state_t weight_one_term_apply_potentiation( if (PRINT_PLASTICITY){ io_printf(IO_BUF, "potentiating: %d\n", a2_plus); } - state.weight += mul_accum_fixed(state.weight_region->a2_plus, a2_plus); + log_info("weight %k a2_plus %d converted to %k bitsk(weight) %d", + state.weight, a2_plus, kbits(a2_plus), bitsk(state.weight)); +// state.weight += mul_accum_fixed(state.weight_region->a2_plus, a2_plus); + state.weight += kbits(a2_plus); state.weight = kbits(MIN(bitsk(state.weight), bitsk(state.weight_region->max_weight))); + log_info("weight after min of max %k", state.weight); return state; // state.a2_plus += a2_plus; // return state; @@ -109,7 +114,8 @@ static inline weight_t weight_get_final(weight_state_t new_state, // new_state.a2_minus, new_state.weight_region->a2_minus); // Apply eprop plasticity updates to initial weight - accum new_weight = bitsk(new_state.weight) >> new_state.weight_shift; +// accum new_weight = bitsk(new_state.weight) >> new_state.weight_shift; + accum new_weight = new_state.weight; // int32_t new_weight = // new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; accum reg_weight = new_weight; @@ -144,7 +150,10 @@ static inline weight_t weight_get_final(weight_state_t new_state, new_weight, reg_weight, new_state.weight_region->reg_rate, reg_error); } - return (weight_t) reg_weight; + log_info("reg_weight %k new_weight %k reg_error %k reg_change %k reg_boundary %k", + reg_weight, new_weight, reg_error, reg_change, reg_boundary); + + return (weight_t) (bitsk(reg_weight) >> new_state.weight_shift); } #endif // _WEIGHT_EPROPREG_ONE_TERM_IMPL_H_ diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index a1c8b12c0c8..0698d6e6ccc 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -488,8 +488,8 @@ bool spike_processing_initialise( // EXPORTED // Set up the callbacks spin1_callback_on(MC_PACKET_RECEIVED, multicast_packet_received_callback, mc_packet_callback_priority); - spin1_callback_on(MCPL_PACKET_RECEIVED, - multicast_packet_pl_received_callback, mc_packet_callback_priority); +// spin1_callback_on(MCPL_PACKET_RECEIVED, +// multicast_packet_pl_received_callback, mc_packet_callback_priority); simulation_dma_transfer_done_callback_on( DMA_TAG_READ_SYNAPTIC_ROW, dma_complete_callback); spin1_callback_on(USER_EVENT, user_event_callback, user_event_priority); @@ -499,8 +499,8 @@ bool spike_processing_initialise( // EXPORTED // // // This is unlikely to work properly now because of the use of MCPL for multiple spikes // // Register MC_PACKET_RECEIVED_PAYLOAD -// spin1_callback_on(MCPL_PACKET_RECEIVED, -// multicast_packet_wpayload_received_callback, mc_packet_callback_priority); + spin1_callback_on(MCPL_PACKET_RECEIVED, + multicast_packet_wpayload_received_callback, mc_packet_callback_priority); // // io_printf(IO_BUF, "Registered MCPL callback successfully\n"); diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index ebf26474900..fa12994e6b6 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -239,9 +239,9 @@ static inline bool process_fixed_synapses( // Add weight to current ring buffer value int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // switch to saturated arithmetic to avoid complicated saturation check, will it check saturation at both ends? -// int32_t test = -22; -// log_info("Check weight: %d accumulation %d test %d buffer %d", weight, accumulation, test, -// ring_buffers[ring_buffer_index]); + int32_t test = -22; + log_info("Check weight: %d accumulation %d test %d buffer %d time %u", + weight, accumulation, test, ring_buffers[ring_buffer_index], time); // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) // **NOTE** 0x10000 can be expressed as an ARM literal, diff --git a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c index 6abb768c08b..a53749f7d01 100644 --- a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c +++ b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c @@ -881,7 +881,10 @@ static void process_fast_source(index_t s_id, spike_source_t *source) { if (ssp_params.has_key) { // Send spikes const uint32_t spike_key = keys[s_id] | colour; - send_spike_mc_payload(spike_key, num_spikes); + for (uint32_t n_spike = 0; n_spike < num_spikes; n_spike++) { + send_spike_mc(spike_key); + } +// send_spike_mc_payload(spike_key, num_spikes); } else if (sdram_inputs->address != 0) { input_this_timestep[sdram_inputs->offset + s_id] += sdram_inputs->weights[s_id] * num_spikes; @@ -918,7 +921,10 @@ static void process_slow_source(index_t s_id, spike_source_t *source) { if (ssp_params.has_key) { // Send package const uint32_t spike_key = keys[s_id] | colour; - send_spike_mc_payload(spike_key, count); + for (uint32_t n_spike = 0; n_spike < count; n_spike++) { + send_spike_mc(spike_key); + } +// send_spike_mc_payload(spike_key, count); } else if (sdram_inputs->address != 0) { input_this_timestep[sdram_inputs->offset + s_id] += sdram_inputs->weights[s_id] * count; diff --git a/spynnaker/pyNN/extra_models/__init__.py b/spynnaker/pyNN/extra_models/__init__.py index 031a1ec34d5..3a31e445e9e 100644 --- a/spynnaker/pyNN/extra_models/__init__.py +++ b/spynnaker/pyNN/extra_models/__init__.py @@ -30,7 +30,8 @@ IFCurrDualExpBase as IF_curr_dual_exp, IzkCondExpBase as Izhikevich_cond, IFCurrExpSEMDBase as IF_curr_exp_sEMD, - EPropAdaptive as EPropAdaptive) + EPropAdaptive as EPropAdaptive, + SinusoidReadout as SinusoidReadout) # Variable rate poisson from spynnaker.pyNN.models.spike_source import SpikeSourcePoissonVariable @@ -39,7 +40,9 @@ # sPyNNaker models not currently part of full pyNN 'IFCurDelta', 'IFCurrExpCa2Adaptive', 'IFCondExpStoc', 'Izhikevich_cond', 'IF_curr_dual_exp', 'IF_curr_exp_sEMD', - 'EPropAdaptive', + + # Eprop implementation and related models (Adam Perrett/Oliver Rhodes) + 'EPropAdaptive', 'SinusoidReadout', # Neuromodulation synapse dynamics (Mantas Mikaitis) 'Neuromodulation', diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index 5eb4fb6ef3a..2ee749f0e19 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -28,7 +28,7 @@ from .if_curr_exp_semd_base import IFCurrExpSEMDBase from .eprop_adaptive import EPropAdaptive # from .store_recall_readout import StoreRecallReadout -# from .sinusoid_readout import SinusoidReadout +from .sinusoid_readout import SinusoidReadout # from .left_right_readout import LeftRightReadout __all__ = ["EIFConductanceAlphaPopulation", "HHCondExp", "IFCondAlpha", @@ -36,4 +36,4 @@ "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", - "EPropAdaptive"] # , "StoreRecallReadout", "SinusoidReadout", "LeftRightReadout"] + "EPropAdaptive", "SinusoidReadout"] # , "StoreRecallReadout", "LeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index 41850f7af74..ac720ce4d42 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -13,17 +13,17 @@ class SinusoidReadout(AbstractPyNNNeuronModelStandard): """ @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2", + "isyn_inh2", "target_data", "l", "w_fb", "eta", "update_ready"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, + # tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, # mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, - target_data =[], + target_data=[], # Learning signal and weight update constants l=0, w_fb=0.5, eta=1.0, update_ready=1024): @@ -35,7 +35,7 @@ def __init__( l, w_fb, eta, update_ready) synapse_type = SynapseTypeEPropAdaptive( - tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + # tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) input_type = InputTypeCurrent() @@ -46,4 +46,4 @@ def __init__( model_name="sinusoid_readout", binary="sinusoid_readout.aplx", neuron_model=neuron_model, input_type=input_type, - synapse_type=synapse_type, threshold_type=threshold_type) \ No newline at end of file + synapse_type=synapse_type, threshold_type=threshold_type) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index a36ae4f1888..3a66050b8ab 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -17,12 +17,12 @@ NeuronModelLeakyIntegrateAndFire) from .neuron_model_eprop_adaptive import NeuronModelEPropAdaptive # from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout -# from .neuron_model_sinusoid_readout import ( -# NeuronModelLeakyIntegrateAndFireSinusoidReadout) +from .neuron_model_sinusoid_readout import ( + NeuronModelLeakyIntegrateAndFireSinusoidReadout) # from .neuron_model_left_right_readout import NeuronModelLeftRightReadout __all__ = ["NeuronModelIzh", "NeuronModelLeakyIntegrateAndFire", - "NeuronModelEPropAdaptive"] #, + "NeuronModelEPropAdaptive", + "NeuronModelLeakyIntegrateAndFireSinusoidReadout"] # "NeuronModelStoreRecallReadout", - # "NeuronModelLeakyIntegrateAndFireSinusoidReadout", # "NeuronModelLeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 85b5bdc0d15..dfa6f0bf986 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -54,6 +54,7 @@ NUMBER_OF_CUES = "number_of_cues" # eprop "global" +CORE_POP_RATE = "core_pop_rate" TARGET_RATE = "target_rate" TAU_ERR = "tau_err" ETA = "eta" # (global learning rate) @@ -181,6 +182,7 @@ def __init__( (DataType.S1615, W_FB), (DataType.UINT32, WINDOW_SIZE), (DataType.UINT32, NUMBER_OF_CUES), + (DataType.S1615, CORE_POP_RATE), # core_pop_rate (DataType.S1615, TARGET_RATE), # core_target_rate (DataType.S1615, TAU_ERR), # rate_exp_TC (DataType.S1615, ETA)] # eta (learning rate) @@ -262,7 +264,7 @@ def add_parameters(self, parameters): parameters[NUMBER_OF_CUES] = self.__number_of_cues # Are these parameters or variables? - # parameters[CORE_POP_RATE] = self.__core_pop_rate + parameters[CORE_POP_RATE] = 0.0 # initialise here, not in C # parameters[CORE_TARGET_RATE] = self.__core_target_rate # parameters[RATE_EXP_TC] = self.__rate_exp_TC parameters[TARGET_RATE] = self.__target_rate @@ -527,3 +529,5 @@ def number_of_cues(self): @window_size.setter def window_size(self, new_value): self.__number_of_cues = new_value + + # setter for "globals" like target rate etc. ? diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 3bc0d4bb14c..543c381b778 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -1,14 +1,16 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items -from .abstract_neuron_model import AbstractNeuronModel +from spynnaker.pyNN.models.neuron.implementations import ( + AbstractStandardNeuronComponent) +from spynnaker.pyNN.utilities.struct import Struct +from spynnaker.pyNN.data import SpynnakerDataView # constants SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state -MICROSECONDS_PER_SECOND = 1000000.0 -MICROSECONDS_PER_MILLISECOND = 1000.0 +# MICROSECONDS_PER_SECOND = 1000000.0 +# MICROSECONDS_PER_MILLISECOND = 1000.0 V = "v" V_REST = "v_rest" @@ -17,7 +19,9 @@ I_OFFSET = "i_offset" V_RESET = "v_reset" TAU_REFRAC = "tau_refrac" -COUNT_REFRAC = "count_refrac" +# COUNT_REFRAC = "count_refrac" +TIMESTEP = "timestep" +REFRACT_TIMER = "refract_timer" # MEAN_ISI_TICKS = "mean_isi_ticks" # TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" # SEED1 = "seed1" @@ -32,35 +36,42 @@ # Learning signal L = "learning_signal" W_FB = "feedback_weight" - -UNITS = { - V: 'mV', - V_REST: 'mV', - TAU_M: 'ms', - CM: 'nF', - I_OFFSET: 'nA', - V_RESET: 'mV', - TAU_REFRAC: 'ms' -} - - -class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractNeuronModel): +ETA = "eta" + +# eprop synapse +DELTA_W = "delta_w" +Z_BAR_OLD = "z_bar_old" +Z_BAR = "z_bar" +UPDATE_READY = "update_ready" + +# UNITS = { +# V: 'mV', +# V_REST: 'mV', +# TAU_M: 'ms', +# CM: 'nF', +# I_OFFSET: 'nA', +# V_RESET: 'mV', +# TAU_REFRAC: 'ms' +# } + + +class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractStandardNeuronComponent): __slots__ = [ - "_v", - "_v_init", - "_v_rest", - "_tau_m", - "_cm", - "_i_offset", - "_v_reset", - "_tau_refrac", - "_target_data", + "__v_init", + "__v_rest", + "__tau_m", + "__cm", + "__i_offset", + "__v_reset", + "__tau_refrac", + + "__target_data", # learning signal - "_l", - "_w_fb", - "_eta", - "_update_ready" + "__l", + "__w_fb", + "__eta", + "__update_ready" ] def __init__( @@ -72,227 +83,237 @@ def __init__( eta, update_ready): - data_types = [ - DataType.S1615, # v - DataType.S1615, # v_rest - DataType.S1615, # r_membrane (= tau_m / cm) - DataType.S1615, # exp_tc (= e^(-ts / tau_m)) - DataType.S1615, # i_offset - DataType.INT32, # count_refrac - DataType.S1615, # v_reset - DataType.INT32, # tau_refrac + struct_neuron_vals = [ + (DataType.S1615, V), # v + (DataType.S1615, V_REST), # v_rest + (DataType.S1615, CM), # r_membrane (= tau_m / cm) + (DataType.S1615, TAU_M), # exp_tc (= e^(-ts / tau_m)) + (DataType.S1615, I_OFFSET), # i_offset + (DataType.S1615, V_RESET), # v_reset + (DataType.S1615, TAU_REFRAC), # tau_refrac + (DataType.INT32, REFRACT_TIMER), # count_refrac + (DataType.S1615, TIMESTEP), # timestep # Learning signal - DataType.S1615, # L - DataType.S1615 # w_fb + (DataType.S1615, L), # L + (DataType.S1615, W_FB), # w_fb ] - # Synapse states - always initialise to zero - eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) - DataType.S1615, # delta_w - DataType.S1615, # z_bar_old - DataType.S1615, # z_bar - # DataType.S1615, # ep_a - # DataType.S1615, # e_bar - DataType.UINT32 # update_ready - ] - # Extend to include fan-in for each neuron - data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) - - global_data_types=[] - global_data_types.extend([DataType.S1615 for i in range(1024)]) - global_data_types.extend([DataType.S1615]) # eta (learning rate) + # former global parameters + for n in range(1024): + struct_neuron_vals.extend( + [(DataType.S1615, TARGET_DATA+str(n))]) + struct_neuron_vals.extend([(DataType.S1615, ETA)]) - - super(NeuronModelLeakyIntegrateAndFireSinusoidReadout, self).__init__( - data_types=data_types, - - global_data_types=global_data_types - ) + # Synapse states - always initialise to zero + for n in range(SYNAPSES_PER_NEURON): + struct_neuron_vals.extend( + # eprop_syn_state + [(DataType.S1615, DELTA_W+str(n)), # delta_w + (DataType.S1615, Z_BAR_OLD+str(n)), # z_bar_old + (DataType.S1615, Z_BAR+str(n)), # z_bar + (DataType.UINT32, UPDATE_READY+str(n))]) # update_ready + + super().__init__( + [Struct(struct_neuron_vals)], + {V: 'mV', V_REST: 'mV', TAU_M: 'ms', CM: 'nF', I_OFFSET: 'nA', + V_RESET: 'mV', TAU_REFRAC: 'ms'}) if v_init is None: v_init = v_rest - self._v_init = v_init - self._v_rest = v_rest - self._tau_m = tau_m - self._cm = cm - self._i_offset = i_offset - self._v_reset = v_reset - self._tau_refrac = tau_refrac - self._target_data = target_data + self.__v_init = v_init + self.__v_rest = v_rest + self.__tau_m = tau_m + self.__cm = cm + self.__i_offset = i_offset + self.__v_reset = v_reset + self.__tau_refrac = tau_refrac + + self.__target_data = target_data # learning signal - self._l = l - self._w_fb = w_fb + self.__l = l + self.__w_fb = w_fb - self._eta = eta + self.__eta = eta - self._update_ready = update_ready + self.__update_ready = update_ready - @overrides(AbstractNeuronModel.get_n_cpu_cycles) - def get_n_cpu_cycles(self, n_neurons): - # A bit of a guess - return 100 * n_neurons + # @overrides(AbstractNeuronModel.get_n_cpu_cycles) + # def get_n_cpu_cycles(self, n_neurons): + # # A bit of a guess + # return 100 * n_neurons - @overrides(AbstractNeuronModel.add_parameters) + @overrides(AbstractStandardNeuronComponent.add_parameters) def add_parameters(self, parameters): - parameters[V_REST] = self._v_rest - parameters[TAU_M] = self._tau_m - parameters[CM] = self._cm - parameters[I_OFFSET] = self._i_offset - parameters[V_RESET] = self._v_reset - parameters[TAU_REFRAC] = self._tau_refrac - parameters[TARGET_DATA] = 0.0 - - #learning params - parameters[W_FB] = self._w_fb - - - @overrides(AbstractNeuronModel.add_state_variables) - def add_state_variables(self, state_variables): - state_variables[V] = self._v_init - state_variables[COUNT_REFRAC] = 0 + parameters[V_REST] = self.__v_rest + parameters[TAU_M] = self.__tau_m + parameters[CM] = self.__cm + parameters[I_OFFSET] = self.__i_offset + parameters[V_RESET] = self.__v_reset + parameters[TAU_REFRAC] = self.__tau_refrac + parameters[TIMESTEP] = SpynnakerDataView.get_simulation_time_step_ms() - #learning params - state_variables[L] = self._l + # learning params + parameters[W_FB] = self.__w_fb + # Target data (formerly global data) + for n in range(1024): + parameters[TARGET_DATA+str(n)] = self.__target_data[n] - @overrides(AbstractNeuronModel.get_units) - def get_units(self, variable): - return UNITS[variable] + parameters[ETA] = self.__eta - @overrides(AbstractNeuronModel.has_variable) - def has_variable(self, variable): - return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) - def get_values(self, parameters, state_variables, vertex_slice, ts): - - # Add the rest of the data - values = [state_variables[V], - parameters[V_REST], - parameters[TAU_M] / parameters[CM], - parameters[TAU_M].apply_operation( - operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - parameters[I_OFFSET], state_variables[COUNT_REFRAC], - parameters[V_RESET], - parameters[TAU_REFRAC].apply_operation( - operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + @overrides(AbstractStandardNeuronComponent.add_state_variables) + def add_state_variables(self, state_variables): + state_variables[V] = self.__v_init + state_variables[REFRACT_TIMER] = 0 - state_variables[L], - parameters[W_FB] - ] + # learning params + state_variables[L] = self.__l - # create synaptic state - init all state to zero for n in range(SYNAPSES_PER_NEURON): - eprop_syn_init = [0, # delta w - 0, # z_bar_inp - 0,#, # z_bar - # 0, # el_a - # 0] # e_bar - self._update_ready, #int(numpy.random.rand()*1024) # update_ready - ] - # extend to appropriate fan-in - values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) - - return values - - @overrides(AbstractNeuronModel.update_values) - def update_values(self, values, parameters, state_variables): - - # Read the data - (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, - _v_reset, _tau_refrac, - _l, _w_fb) = values # Not sure this will work with the new array of synapse!!! - # todo check alignment on this - - # Copy the changed data only - state_variables[V] = _v - - state_variables[L] = _l - - - # Global params - @inject_items({"machine_time_step": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_global_values, - additional_arguments={'machine_time_step'}) - def get_global_values(self, machine_time_step): - vals = [] - - vals.extend(self._target_data) - vals.extend([self._eta]) - return vals + state_variables[DELTA_W+str(n)] = 0 + state_variables[Z_BAR_OLD+str(n)] = 0 + state_variables[Z_BAR+str(n)] = 0 + state_variables[UPDATE_READY+str(n)] = self.__update_ready + + + # @overrides(AbstractNeuronModel.get_units) + # def get_units(self, variable): + # return UNITS[variable] + # + # @overrides(AbstractNeuronModel.has_variable) + # def has_variable(self, variable): + # return variable in UNITS + # + # @inject_items({"ts": "MachineTimeStep"}) + # @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + # def get_values(self, parameters, state_variables, vertex_slice, ts): + # + # # Add the rest of the data + # values = [state_variables[V], + # parameters[V_REST], + # parameters[TAU_M] / parameters[CM], + # parameters[TAU_M].apply_operation( + # operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), + # parameters[I_OFFSET], state_variables[COUNT_REFRAC], + # parameters[V_RESET], + # parameters[TAU_REFRAC].apply_operation( + # operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + # + # state_variables[L], + # parameters[W_FB] + # ] + # + # # create synaptic state - init all state to zero + # for n in range(SYNAPSES_PER_NEURON): + # eprop_syn_init = [0, # delta w + # 0, # z_bar_inp + # 0,#, # z_bar + # # 0, # el_a + # # 0] # e_bar + # self._update_ready, #int(numpy.random.rand()*1024) # update_ready + # ] + # # extend to appropriate fan-in + # values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) + # + # return values + # + # @overrides(AbstractNeuronModel.update_values) + # def update_values(self, values, parameters, state_variables): + # + # # Read the data + # (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, + # _v_reset, _tau_refrac, + # _l, _w_fb) = values # Not sure this will work with the new array of synapse!!! + # # todo check alignment on this + # + # # Copy the changed data only + # state_variables[V] = _v + # + # state_variables[L] = _l + # + # + # # Global params + # @inject_items({"machine_time_step": "MachineTimeStep"}) + # @overrides(AbstractNeuronModel.get_global_values, + # additional_arguments={'machine_time_step'}) + # def get_global_values(self, machine_time_step): + # vals = [] + # + # vals.extend(self._target_data) + # vals.extend([self._eta]) + # return vals @property def target_data(self): - return self._target_data + return self.__target_data @target_data.setter def target_data(self, target_data): - self._target_data = target_data + self.__target_data = target_data @property def v_init(self): - return self._v + return self.__v_init @v_init.setter def v_init(self, v_init): - self._v = v_init + self.__v_init = v_init @property def v_rest(self): - return self._v_rest + return self.__v_rest @v_rest.setter def v_rest(self, v_rest): - self._v_rest = v_rest + self.__v_rest = v_rest @property def tau_m(self): - return self._tau_m + return self.__tau_m @tau_m.setter def tau_m(self, tau_m): - self._tau_m = tau_m + self.__tau_m = tau_m @property def cm(self): - return self._cm + return self.__cm @cm.setter def cm(self, cm): - self._cm = cm + self.__cm = cm @property def i_offset(self): - return self._i_offset + return self.__i_offset @i_offset.setter def i_offset(self, i_offset): - self._i_offset = i_offset + self.__i_offset = i_offset @property def v_reset(self): - return self._v_reset + return self.__v_reset @v_reset.setter def v_reset(self, v_reset): - self._v_reset = v_reset + self.__v_reset = v_reset @property def tau_refrac(self): - return self._tau_refrac + return self.__tau_refrac @tau_refrac.setter def tau_refrac(self, tau_refrac): - self._tau_refrac = tau_refrac + self.__tau_refrac = tau_refrac @property def w_fb(self): - return self._w_fb + return self.__w_fb @w_fb.setter - def w_fb(self, new_value): - self._w_fb = new_value - + def w_fb(self, w_fb): + self.__w_fb = w_fb From b82a0cd0ff562502c0edc4db4e46ec3d9c014855 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 14 Apr 2023 16:47:40 +0100 Subject: [PATCH 089/123] Get rid of debug prints for now --- .../implementations/neuron_impl_sinusoid_readout.h | 6 +++--- .../stdp/synapse_dynamics_eprop_adaptive_impl.c | 6 +++--- .../stdp/synapse_dynamics_sinusoid_readout_impl.c | 8 ++++---- .../stdp/weight_dependence/weight_eprop_reg_impl.h | 10 +++++----- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 440d5585a9f..79738beeab0 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -344,9 +344,9 @@ static void neuron_impl_do_timestep_update( // neuron->syn_state[3].delta_w; // neuron->syn_state[0].z_bar; - log_info("neuron_index %u time %u voltage %k result %k exc input %k targetV %k", - neuron_index, time, voltage, result, exc_input_values[0], - neuron->target_V[target_ind]); +// log_info("neuron_index %u time %u voltage %k result %k exc input %k targetV %k", +// neuron_index, time, voltage, result, exc_input_values[0], +// neuron->target_V[target_ind]); // Record readout neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, result); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index a6a354e1240..a2ac5f47295 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -295,7 +295,7 @@ static inline final_state_t eprop_plasticity_update( int32_t delta_w_int = (int32_t) roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - log_info("delta_w_int %d", delta_w_int); +// log_info("delta_w_int %d", delta_w_int); if (delta_w){ // TODO: This should probably be delta_w_int // if (PRINT_PLASTICITY){ @@ -466,8 +466,8 @@ bool synapse_dynamics_process_plastic_synapses( int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); - log_info("Check: accumulation %d ring_buffer %d time %u", - accumulation, ring_buffers[ring_buffer_index], time); +// log_info("Check: accumulation %d ring_buffer %d time %u", +// accumulation, ring_buffers[ring_buffer_index], time); // uint32_t sat_test = accumulation & 0x10000; // if (sat_test) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 83891837f2f..3c0d060d792 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -400,8 +400,8 @@ bool synapse_dynamics_process_plastic_synapses( io_printf(IO_BUF, "update_ready=0\n"); } - log_info("Check: eprop plasticity update, delta_w %k syn_ind %u time %u", - neuron->syn_state[syn_ind_from_delay].delta_w, syn_ind_from_delay, time); +// log_info("Check: eprop plasticity update, delta_w %k syn_ind %u time %u", +// neuron->syn_state[syn_ind_from_delay].delta_w, syn_ind_from_delay, time); // Go through typical weight update process to clip to limits final_state = eprop_plasticity_update(current_state, @@ -436,8 +436,8 @@ bool synapse_dynamics_process_plastic_synapses( int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); - log_info("Check: accumulation %d ring_buffer %d time %u", - accumulation, ring_buffers[ring_buffer_index], time); +// log_info("Check: accumulation %d ring_buffer %d time %u", +// accumulation, ring_buffers[ring_buffer_index], time); // uint32_t sat_test = accumulation & 0x10000; // if (sat_test) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 48afd72713d..d715f531f38 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -92,12 +92,12 @@ static inline weight_state_t weight_one_term_apply_potentiation( if (PRINT_PLASTICITY){ io_printf(IO_BUF, "potentiating: %d\n", a2_plus); } - log_info("weight %k a2_plus %d converted to %k bitsk(weight) %d", - state.weight, a2_plus, kbits(a2_plus), bitsk(state.weight)); +// log_info("weight %k a2_plus %d converted to %k bitsk(weight) %d", +// state.weight, a2_plus, kbits(a2_plus), bitsk(state.weight)); // state.weight += mul_accum_fixed(state.weight_region->a2_plus, a2_plus); state.weight += kbits(a2_plus); state.weight = kbits(MIN(bitsk(state.weight), bitsk(state.weight_region->max_weight))); - log_info("weight after min of max %k", state.weight); +// log_info("weight after min of max %k", state.weight); return state; // state.a2_plus += a2_plus; // return state; @@ -150,8 +150,8 @@ static inline weight_t weight_get_final(weight_state_t new_state, new_weight, reg_weight, new_state.weight_region->reg_rate, reg_error); } - log_info("reg_weight %k new_weight %k reg_error %k reg_change %k reg_boundary %k", - reg_weight, new_weight, reg_error, reg_change, reg_boundary); +// log_info("reg_weight %k new_weight %k reg_error %k reg_change %k reg_boundary %k", +// reg_weight, new_weight, reg_error, reg_change, reg_boundary); return (weight_t) (bitsk(reg_weight) >> new_state.weight_shift); } From f4618abaa00afa8abc8426832714033a38966571 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 17 Apr 2023 13:18:49 +0100 Subject: [PATCH 090/123] Do STDP initialise correctly here as well --- .../synapse_dynamics_eprop_adaptive_impl.c | 38 ++++++++++++------- .../neuron_model_eprop_adaptive.py | 2 +- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index a2ac5f47295..3a9565ccf8f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -262,25 +262,35 @@ static inline index_t sparse_axonal_delay(uint32_t x) { bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - // Load timing dependence data - address_t weight_region_address = timing_initialise(address); - if (address == NULL) { - return NULL; - } - - syn_dynamics_neurons_in_partition = n_neurons; +// // Load timing dependence data +// address_t weight_region_address = timing_initialise(address); +// if (address == NULL) { +// return NULL; +// } +// +// syn_dynamics_neurons_in_partition = n_neurons; +// +// // Load weight dependence data +// address_t weight_result = weight_initialise( +// weight_region_address, n_synapse_types, +// ring_buffer_to_input_buffer_left_shifts); +// if (weight_result == NULL) { +// return NULL; +// } +// +// post_event_history = post_events_init_buffers(n_neurons); +// if (post_event_history == NULL) { +// return NULL; +// } - // Load weight dependence data - address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts); - if (weight_result == NULL) { - return NULL; + if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, + ring_buffer_to_input_buffer_left_shifts)) { + return false; } post_event_history = post_events_init_buffers(n_neurons); if (post_event_history == NULL) { - return NULL; + return false; } return true; // weight_result; diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index dfa6f0bf986..209fc9785b8 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -276,7 +276,7 @@ def add_state_variables(self, state_variables): state_variables[V] = self.__v_init state_variables[REFRACT_TIMER] = 0 state_variables[PSI] = self.__psi - state_variables[Z] = 0 # initalise to zero + state_variables[Z] = 0 # initialise to zero state_variables[A] = 0 # initialise to zero state_variables[BIG_B] = self.__B From cee70f16a3613427598d83edd31866374b148732 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 24 Apr 2023 16:55:14 +0100 Subject: [PATCH 091/123] Make work for delays and add use_key to sinusoid readout --- .../neuron_impl_eprop_adaptive.h | 6 ++- .../neuron_impl_sinusoid_readout.h | 16 +++++-- .../models/neuron_model_eprop_adaptive_impl.h | 45 +++++++++++++++---- neural_modelling/src/neuron/neuron.c | 2 +- .../synapse_dynamics_eprop_adaptive_impl.c | 1 + .../src/neuron/spike_processing.c | 26 +++++------ neural_modelling/src/neuron/synapse_row.h | 12 ++--- neural_modelling/src/neuron/synapses.c | 2 +- .../abstract_spynnaker_splitter_delay.py | 2 +- .../neuron/abstract_population_vertex.py | 4 +- spynnaker/pyNN/utilities/constants.py | 4 +- 11 files changed, 80 insertions(+), 40 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 4dfb609bce9..f7d526d5ff4 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -334,7 +334,7 @@ static void neuron_impl_do_timestep_update( for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { -// log_info("neuron_index %u time %u ", neuron_index, time); +// log_info("timestep_update neuron_index %u time %u ", neuron_index, time); // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; @@ -440,11 +440,15 @@ static void neuron_impl_do_timestep_update( // Simplified what was below this to choose which delta_w to record for different indices if ((neuron_index == 0) || (neuron_index == 1) || (neuron_index == 2)) { +// log_info("z_bar_inp %k (10 + neuron_index) %u L %k time %u", +// neuron->syn_state[10+neuron_index].z_bar_inp, 10+neuron_index, neuron->L, time); neuron_recording_record_accum( GSYN_INH_RECORDING_INDEX, neuron_index, neuron->syn_state[10+neuron_index].delta_w); } else { +// log_info("z_bar_inp %k neuron_index %u L %k time %u", +// neuron->syn_state[0+neuron_index].z_bar_inp, neuron_index, neuron->L, time); neuron_recording_record_accum( GSYN_INH_RECORDING_INDEX, neuron_index, neuron->syn_state[0+neuron_index].delta_w); diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 79738beeab0..b42af2db917 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -74,10 +74,13 @@ static synapse_types_t *synapse_types_array; //! The number of steps to run per timestep static uint n_steps_per_timestep; +//! Whether key is set, from neuron.c +extern bool use_key; + // TODO: check if these other parameters are needed static REAL next_spike_time = 0; extern uint32_t time; -extern key_t key; +extern uint32_t *neuron_keys; extern REAL learning_signal; static uint32_t target_ind = 0; @@ -358,10 +361,15 @@ static void neuron_impl_do_timestep_update( // Send error (learning signal) as packet with payload // ToDo can't I just alter the global variable here? // Another option is just to use "send_spike" instead... ? - while (!spin1_send_mc_packet( - key | neuron_index, bitsk(error), 1 )) { - spin1_delay_us(1); +// send_spike_mc_payload(key, bitsk(error)); + if (use_key) { + send_spike_mc_payload(neuron_keys[neuron_index], bitsk(error)); } +// log_info("send learning signal key %u neuron_index %u", neuron_keys[neuron_index], neuron_index); +// while (!spin1_send_mc_packet( +// neuron_keys[neuron_index], bitsk(error), 1 )) { +// spin1_delay_us(1); +// } } else{ // Record 'Error' diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index bc59f7bf58a..44f9668ad7d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -243,9 +243,9 @@ static inline void neuron_model_initialise( state->V_reset = params->V_reset; state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); -// log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract %k", -// state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, -// state->refract_timer, state->V_reset, state->T_refract); + log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract %k", + state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, + state->refract_timer, state->V_reset, state->T_refract); // for everything else just copy across for now state->z = params->z; @@ -263,16 +263,16 @@ static inline void neuron_model_initialise( state->window_size = params->window_size; state->number_of_cues = params->number_of_cues; -// log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", -// state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); + log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", + state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); state->core_pop_rate = params->pop_rate; state->core_target_rate = params->target_rate; state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); state->eta = params->eta; -// log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", -// state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); + log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", + state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; @@ -344,6 +344,9 @@ state_t neuron_model_state_update( log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); +// log_info("z_bar_inp (0) %k z_bar_inp (1) %k", +// neuron->syn_state[0].z_bar_inp, neuron->syn_state[1].z_bar_inp); + // REAL total_exc = 0; // REAL total_inh = 0; // @@ -387,6 +390,9 @@ state_t neuron_model_state_update( // } neuron->psi *= neuron->A; +// log_info("check psi %k and A %k psi_temp1 %k psi_temp2 %k", +// neuron->psi, neuron->A, psi_temp1, psi_temp2); + // This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. // This should make it possible to do a pause and resume cycle and have reliable unloading of data. uint32_t total_input_synapses_per_neuron = 40; //todo should this be fixed? @@ -485,6 +491,9 @@ state_t neuron_model_state_update( neuron->L = new_learning_signal; // Copy eta here instead? REAL local_eta = neuron->eta; + +// log_info("neuron L %k local_eta %k learning_signal %k w_fb %k v_mem_error %k", +// neuron->L, local_eta, learning_signal, neuron->w_fb, v_mem_error); // if (time % 99 == 0){ // io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // } @@ -497,7 +506,11 @@ state_t neuron_model_state_update( neuron->z = 0.k; // io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); } -// io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); + +// log_info("Before eprop synapse update z_bar_inp (0) %k z_bar_inp (1) %k time %u", +// neuron->syn_state[0].z_bar_inp, neuron->syn_state[1].z_bar_inp, time); + + // io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); // All operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ @@ -515,6 +528,10 @@ state_t neuron_model_state_update( (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update +// if (syn_ind < 13) { +// log_info("z_bar %k syn_ind %u", neuron->syn_state[syn_ind].z_bar, syn_ind); +// } + // ****************************************************************** // Update eligibility vector @@ -525,6 +542,10 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].el_a; // (rho) * neuron->syn_state[syn_ind].el_a; +// if (syn_ind < 13) { +// log_info("el_a %k syn_ind %u", neuron->syn_state[syn_ind].el_a, syn_ind); +// } + // ****************************************************************** // Update eligibility trace @@ -537,6 +558,10 @@ state_t neuron_model_state_update( neuron->exp_TC * neuron->syn_state[syn_ind].e_bar + (1 - neuron->exp_TC) * temp_elig_trace; +// if (syn_ind < 13) { +// log_info("e_bar %k syn_ind %u", neuron->syn_state[syn_ind].e_bar, syn_ind); +// } + // ****************************************************************** // Update cached total weight change // ****************************************************************** @@ -544,6 +569,10 @@ state_t neuron_model_state_update( local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) +// if (syn_ind < 13) { +// log_info("delta_w %k syn_ind %u", neuron->syn_state[syn_ind].delta_w, syn_ind); +// } + // if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ // io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " // "z_bar_inp = %k \t z_bar = %k \t time:%u\n" diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index 91b51823092..502f17fa373 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -34,7 +34,7 @@ //#define SPIKE_RECORDING_CHANNEL 0 //! The key to be used for this core (will be ORed with neuron ID) -key_t key; //MADE NON STATIC!!! +//key_t key; //MADE NON STATIC!!! //! The keys to be used by the neurons (one per neuron) uint32_t *neuron_keys; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 3a9565ccf8f..140a693ffa0 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -408,6 +408,7 @@ bool synapse_dynamics_process_plastic_synapses( neuron_t *neuron = &neuron_array[neuron_ind]; neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; // !!!! Check what units this is in - same as weight? !!!! +// log_info("plastic update of z_bar_inp for neuron_ind %u syn_ind %u", neuron_ind, syn_ind_from_delay); // io_printf(IO_BUF, "initial_weight: d%d, k%k, u%u - ", current_state.initial_weight, current_state.initial_weight, current_state.initial_weight); // if (current_state.initial_weight > 0){ diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 0698d6e6ccc..4ff756cd64c 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -310,18 +310,18 @@ static void multicast_packet_received_callback(uint key, UNUSED uint unused) { //! \brief Called when a multicast packet is received //! \param[in] key: The key of the packet. The spike. //! \param[in] payload: the payload of the packet. The count. -static void multicast_packet_pl_received_callback(uint key, uint payload) { - p_per_ts_struct.packets_this_time_step += 1; - - // cycle through the packet insertion - bool added = false; - for (uint count = payload; count > 0; count--) { - added = in_spikes_add_spike(key); - } - if (added) { - start_dma_loop(); - } -} +//static void multicast_packet_pl_received_callback(uint key, uint payload) { +// p_per_ts_struct.packets_this_time_step += 1; +// +// // cycle through the packet insertion +// bool added = false; +// for (uint count = payload; count > 0; count--) { +// added = in_spikes_add_spike(key); +// } +// if (added) { +// start_dma_loop(); +// } +//} //! \brief Called when a DMA completes //! \param unused: unused @@ -419,7 +419,7 @@ static void multicast_packet_wpayload_received_callback(uint key, uint payload){ learning_signal = kbits(payload); // Print payload to test transmission of error -// io_printf(IO_BUF, "payload: %k\n", learning_signal); +// io_printf(IO_BUF, " payload (learning signal): %k\n", learning_signal); // Assign learning signal to global memory diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index ec9d989a74d..35364ed862b 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -96,13 +96,13 @@ #endif //! how many bits the synapse delay will take -#ifndef SYNAPSE_DELAY_BITS -#define SYNAPSE_DELAY_BITS 8 -#endif +//#ifndef SYNAPSE_DELAY_BITS +//#define SYNAPSE_DELAY_BITS 8 +//#endif -// Create some masks based on the number of bits -//! the mask for the synapse delay in the row -#define SYNAPSE_DELAY_MASK ((1 << SYNAPSE_DELAY_BITS) - 1) +//// Create some masks based on the number of bits +////! the mask for the synapse delay in the row +//#define SYNAPSE_DELAY_MASK ((1 << SYNAPSE_DELAY_BITS) - 1) //! should this go somewhere else, perhaps in the models that use it? #define SYNAPSE_WEIGHTS_SIGNED true diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index fa12994e6b6..34c54acf185 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -312,7 +312,7 @@ bool synapses_initialise( synapse_index_mask = (1 << synapse_index_bits) - 1; synapse_type_bits = log_n_synapse_types; synapse_type_mask = (1 << log_n_synapse_types) - 1; - synapse_delay_bits = 1; // log_max_delay; + synapse_delay_bits = log_max_delay; // 8; 3; synapse_delay_mask = (1 << synapse_delay_bits) - 1; synapse_delay_mask_shifted = synapse_delay_mask << synapse_type_index_bits; diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py index 8299a06e045..f68d7ae4cc6 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py @@ -27,7 +27,7 @@ class AbstractSpynnakerSplitterDelay(object, metaclass=AbstractBase): __slots__ = [] # max delays supported by a slice split machine vertex - MAX_SUPPORTED_DELAY_TICS = 255 # 16 ?? at least, I think so... + MAX_SUPPORTED_DELAY_TICS = 64 # 16 ?? at least, I think so... def max_support_delay(self): """ diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 0ec4870e9bb..282aacf17cf 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -682,8 +682,8 @@ def set_current_state_values(self, name, value, selector=None): selector, value) - # This was added to the eprop_adaptive branch; does it need adding to generate_data_spec - # (which has now been moved to the machine vertex...) + # This was added to the eprop_adaptive branch; does it need adding to + # generate_data_spec (which has now been moved to the machine vertex...) # if isinstance(self.__pynn_model._model.neuron_model, NeuronModelLeftRightReadout): # poisson_key = routing_info.get_first_key_from_pre_vertex(placement.vertex, "CONTROL") diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 292952a930e..b9c0dda2ad7 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -54,9 +54,7 @@ SCALE = WEIGHT_FLOAT_TO_FIXED_SCALE * NA_TO_PA_SCALE # natively supported delays for all abstract_models -MAX_SUPPORTED_DELAY_TICS = 256 -MAX_DELAY_BLOCKS = 0 -MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 256 +MAX_SUPPORTED_DELAY_TICS = 64 #: the minimum supported delay slot between two neurons MIN_SUPPORTED_DELAY = 1 From aa937db679210c49788099d76709289f2308df70 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 4 May 2023 13:59:36 +0100 Subject: [PATCH 092/123] Update left-right task to where it now seems to work --- .../Makefile | 2 +- .../neuron_impl_eprop_adaptive.h | 3 + .../neuron_impl_left_right_readout.h | 963 ++++++++++-------- .../models/neuron_model_eprop_adaptive_impl.h | 4 +- .../neuron_model_left_right_readout_impl.c | 424 ++++---- .../neuron_model_left_right_readout_impl.h | 414 +++++++- .../neuron_model_sinusoid_readout_impl.h | 2 +- ...synapse_dynamics_left_right_readout_impl.c | 244 +++-- .../synapse_dynamics_sinusoid_readout_impl.c | 3 +- .../poisson/spike_source_poisson.c | 6 + spynnaker/pyNN/extra_models/__init__.py | 5 +- .../neuron/abstract_population_vertex.py | 28 +- .../pyNN/models/neuron/builds/__init__.py | 4 +- .../neuron/builds/left_right_readout.py | 9 +- .../implementations/neuron_impl_standard.py | 8 +- .../models/neuron/neuron_models/__init__.py | 6 +- .../neuron_model_left_right_readout.py | 528 +++++----- .../neuron_model_sinusoid_readout.py | 2 +- .../neuron/population_machine_vertex.py | 28 +- 19 files changed, 1633 insertions(+), 1050 deletions(-) diff --git a/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile index 6417929a30a..5d69049c0aa 100644 --- a/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile +++ b/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile @@ -1,6 +1,6 @@ APP = $(notdir $(CURDIR)) -OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_left_right_readout_impl.c +#OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_left_right_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_left_right_readout.h #SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index f7d526d5ff4..cd3b9ab800e 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -521,8 +521,11 @@ static void neuron_impl_do_timestep_update( neuron->z = 1.0k * neuron->A; // implements refractory period } + bool spike = z_t; +// log_info("time %u neuron_index %u z_t %u spike %u z %k nu %k", +// time, neuron_index, z_t, spike, neuron->z, nu); // ********************************************************* diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index af90d31274a..40229872db3 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -10,17 +10,34 @@ #include #include +#include + // Further includes -#include #include #include #include #include #include -#define V_RECORDING_INDEX 0 -#define GSYN_EXCITATORY_RECORDING_INDEX 1 -#define GSYN_INHIBITORY_RECORDING_INDEX 2 +//! Indices for recording of words +enum word_recording_indices { + //! V (somatic potential) recording index + V_RECORDING_INDEX = 0, + //! Gsyn_exc (excitatory synaptic conductance/current) recording index + GSYN_EXC_RECORDING_INDEX = 1, + //! Gsyn_inh (excitatory synaptic conductance/current) recording index + GSYN_INH_RECORDING_INDEX = 2, + //! Number of recorded word-sized state variables + N_RECORDED_VARS = 3 +}; + +//! Indices for recording of bitfields +enum bitfield_recording_indices { + //! Spike event recording index + SPIKE_RECORDING_BITFIELD = 0, + //! Number of recorded bitfields + N_BITFIELD_VARS = 1 +}; #ifndef NUM_EXCITATORY_RECEPTORS #define NUM_EXCITATORY_RECEPTORS 1 @@ -34,31 +51,40 @@ shaping include #endif +#include + //! Array of neuron states -neuron_pointer_t neuron_array; +neuron_t *neuron_array; //! Input states array -static input_type_pointer_t input_type_array; +static input_type_t *input_type_array; //! Additional input array -static additional_input_pointer_t additional_input_array; +static additional_input_t *additional_input_array; //! Threshold states array -static threshold_type_pointer_t threshold_type_array; +static threshold_type_t *threshold_type_array; //! Global parameters for the neurons -static global_neuron_params_pointer_t global_parameters; +//static global_neuron_params_pointer_t global_parameters; // The synapse shaping parameters -static synapse_param_t *neuron_synapse_shaping_params; +static synapse_types_t *synapse_types_array; + +//! The number of steps to run per timestep +static uint n_steps_per_timestep; +//! Whether key is set, from neuron.c +extern bool use_key; + +// TODO: are these parameters needed? static REAL next_spike_time = 0; -extern uint32_t time; -extern key_t key; +//extern uint32_t time; +extern uint32_t *neuron_keys; extern REAL learning_signal; static uint32_t target_ind = 0; -// recording prams +// recording params (?) uint32_t is_it_right = 0; //uint32_t choice = 0; @@ -68,9 +94,9 @@ typedef enum STATE_CUE, STATE_WAITING, STATE_PROMPT, -} current_state_t; +} left_right_state_t; -current_state_t current_state = 0; +left_right_state_t current_state = 0; uint32_t current_time = 0; uint32_t cue_number = 0; uint32_t current_cue_direction = 2; // 0 = left, 1 = right @@ -90,19 +116,19 @@ bool completed_broadcast = true; static bool neuron_impl_initialise(uint32_t n_neurons) { // allocate DTCM for the global parameter details - if (sizeof(global_neuron_params_t) > 0) { - global_parameters = (global_neuron_params_t *) spin1_malloc( - sizeof(global_neuron_params_t)); - if (global_parameters == NULL) { - log_error("Unable to allocate global neuron parameters" - "- Out of DTCM"); - return false; - } - } +// if (sizeof(global_neuron_params_t) > 0) { +// global_parameters = (global_neuron_params_t *) spin1_malloc( +// sizeof(global_neuron_params_t)); +// if (global_parameters == NULL) { +// log_error("Unable to allocate global neuron parameters" +// "- Out of DTCM"); +// return false; +// } +// } // Allocate DTCM for neuron array if (sizeof(neuron_t) != 0) { - neuron_array = (neuron_t *) spin1_malloc(n_neurons * sizeof(neuron_t)); + neuron_array = spin1_malloc(n_neurons * sizeof(neuron_t)); if (neuron_array == NULL) { log_error("Unable to allocate neuron array - Out of DTCM"); return false; @@ -111,8 +137,7 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { // Allocate DTCM for input type array and copy block of data if (sizeof(input_type_t) != 0) { - input_type_array = (input_type_t *) spin1_malloc( - n_neurons * sizeof(input_type_t)); + input_type_array = spin1_malloc(n_neurons * sizeof(input_type_t)); if (input_type_array == NULL) { log_error("Unable to allocate input type array - Out of DTCM"); return false; @@ -121,8 +146,8 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { // Allocate DTCM for additional input array and copy block of data if (sizeof(additional_input_t) != 0) { - additional_input_array = (additional_input_pointer_t) spin1_malloc( - n_neurons * sizeof(additional_input_t)); + additional_input_array = spin1_malloc( + n_neurons * sizeof(additional_input_t)); if (additional_input_array == NULL) { log_error("Unable to allocate additional input array" " - Out of DTCM"); @@ -132,7 +157,7 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { // Allocate DTCM for threshold type array and copy block of data if (sizeof(threshold_type_t) != 0) { - threshold_type_array = (threshold_type_t *) spin1_malloc( + threshold_type_array = spin1_malloc( n_neurons * sizeof(threshold_type_t)); if (threshold_type_array == NULL) { log_error("Unable to allocate threshold type array - Out of DTCM"); @@ -141,10 +166,10 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Allocate DTCM for synapse shaping parameters - if (sizeof(synapse_param_t) != 0) { - neuron_synapse_shaping_params = (synapse_param_t *) spin1_malloc( - n_neurons * sizeof(synapse_param_t)); - if (neuron_synapse_shaping_params == NULL) { + if (sizeof(synapse_types_t) != 0) { + synapse_types_array = spin1_malloc( + n_neurons * sizeof(synapse_types_t)); + if (synapse_types_array == NULL) { log_error("Unable to allocate synapse parameters array" " - Out of DTCM"); return false; @@ -152,11 +177,11 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } // Seed the random input - validate_mars_kiss64_seed(global_parameters->kiss_seed); + validate_mars_kiss64_seed(neuron_array->kiss_seed); // Initialise pointers to Neuron parameters in STDP code // synapse_dynamics_set_neuron_array(neuron_array); - log_info("set pointer to neuron array in stdp code"); +// log_info("set pointer to neuron array in stdp code"); return true; } @@ -165,48 +190,79 @@ static void neuron_impl_add_inputs( index_t synapse_type_index, index_t neuron_index, input_t weights_this_timestep) { // simple wrapper to synapse type input function - synapse_param_pointer_t parameters = - &(neuron_synapse_shaping_params[neuron_index]); + synapse_types_t *parameters = + &(synapse_types_array[neuron_index]); synapse_types_add_neuron_input(synapse_type_index, parameters, weights_this_timestep); } +static uint32_t n_words_needed(uint32_t size) { + return (size + (sizeof(uint32_t) - 1)) / sizeof(uint32_t); +} + static void neuron_impl_load_neuron_parameters( - address_t address, uint32_t next, uint32_t n_neurons) { + address_t address, uint32_t next, uint32_t n_neurons, + address_t save_initial_state) { log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); - //log_debug("writing neuron global parameters"); - spin1_memcpy(global_parameters, &address[next], - sizeof(global_neuron_params_t)); - next += (sizeof(global_neuron_params_t) + 3) / 4; - - log_debug("reading neuron local parameters"); - spin1_memcpy(neuron_array, &address[next], n_neurons * sizeof(neuron_t)); - next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + // Number of steps per timestep + n_steps_per_timestep = address[next++]; + if (n_steps_per_timestep == 0) { + log_error("bad number of steps per timestep: 0"); + rt_error(RTE_SWERR); + } - log_debug("reading input type parameters"); - spin1_memcpy(input_type_array, &address[next], - n_neurons * sizeof(input_type_t)); - next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + if (sizeof(neuron_t)) { + neuron_params_t *params = (neuron_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + neuron_model_initialise(&neuron_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(neuron_params_t)); + } - log_debug("reading threshold type parameters"); - spin1_memcpy(threshold_type_array, &address[next], - n_neurons * sizeof(threshold_type_t)); - next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + if (sizeof(input_type_t)) { + input_type_params_t *params = (input_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + input_type_initialise(&input_type_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(input_type_params_t)); + } - log_debug("reading synapse parameters"); - spin1_memcpy(neuron_synapse_shaping_params, &address[next], - n_neurons * sizeof(synapse_param_t)); - next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + if (sizeof(threshold_type_t)) { + threshold_type_params_t *params = (threshold_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + threshold_type_initialise(&threshold_type_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(threshold_type_params_t)); + } - log_debug("reading additional input type parameters"); - spin1_memcpy(additional_input_array, &address[next], - n_neurons * sizeof(additional_input_t)); - next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + if (sizeof(synapse_types_t)) { + synapse_types_params_t *params = (synapse_types_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + synapse_types_initialise(&synapse_types_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(synapse_types_params_t)); + } - neuron_model_set_global_neuron_params(global_parameters); + if (sizeof(additional_input_t)) { + additional_input_params_t *params = (additional_input_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + additional_input_initialise(&additional_input_array[i], ¶ms[i], + n_steps_per_timestep); + } + next += n_words_needed(n_neurons * sizeof(additional_input_params_t)); + } + // If we are to save the initial state, copy the whole of the parameters + // to the initial state + if (save_initial_state) { + spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); + } // io_printf(IO_BUF, "\nPrinting global params\n"); // io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); // io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); @@ -244,321 +300,393 @@ static void neuron_impl_load_neuron_parameters( -static bool neuron_impl_do_timestep_update(index_t neuron_index, - input_t external_bias, state_t *recorded_variable_values) { - - // Get the neuron itself - neuron_pointer_t neuron = &neuron_array[neuron_index]; - bool spike = false; - -// current_time = time & 0x3ff; // repeats on a cycle of 1024 entries in array - -// io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); -// io_printf(IO_BUF, "Target: %k\n\n", -// global_parameters->target_V[target_ind]); - - // Get the input_type parameters and voltage for this neuron - input_type_pointer_t input_type = &input_type_array[neuron_index]; - - // Get threshold and additional input parameters for this neuron - threshold_type_pointer_t threshold_type = - &threshold_type_array[neuron_index]; - additional_input_pointer_t additional_input = - &additional_input_array[neuron_index]; - synapse_param_pointer_t synapse_type = - &neuron_synapse_shaping_params[neuron_index]; - - // Get the voltage - state_t voltage = neuron_model_get_membrane_voltage(neuron); - - - // Get the exc and inh values from the synapses - input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); - input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); - - // Call functions to obtain exc_input and inh_input - input_t* exc_input_values = input_type_get_input_value( - exc_value, input_type, NUM_EXCITATORY_RECEPTORS); - input_t* inh_input_values = input_type_get_input_value( - inh_value, input_type, NUM_INHIBITORY_RECEPTORS); - - // Sum g_syn contributions from all receptors for recording -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ -// total_exc += exc_input_values[i]; -// } -// for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ -// total_inh += inh_input_values[i]; -// } - - // Call functions to get the input values to be recorded -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - - // Call functions to convert exc_input and inh_input to current - input_type_convert_excitatory_input_to_current( - exc_input_values, input_type, voltage); - input_type_convert_inhibitory_input_to_current( - inh_input_values, input_type, voltage); - - external_bias += additional_input_get_input_value_as_current( - additional_input, voltage); - - if (neuron_index == 0){ -// io_printf(IO_BUF, "n0 - "); - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, -50k); - // Finally, set global membrane potential to updated value - global_parameters->readout_V_0 = result; - - } else if (neuron_index == 1){ -// io_printf(IO_BUF, "n1 - "); - // update neuron parameters -// learning_signal *= -1.k; - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, -50k); -// learning_signal *= -1.k; - // Finally, set global membrane potential to updated value - global_parameters->readout_V_1 = result; - } -// if (neuron_index == 0){ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_0; -// } -// else if (neuron_index == 1){ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_1; -// } -// io_printf(IO_BUF, "state = %u - %u\n", current_state, time); - if (cue_number == 0 && completed_broadcast){ // reset start of new test -// io_printf(IO_BUF, "time entering reset %u\n", time); -// io_printf(IO_BUF, "Resetting\n"); - completed_broadcast = false; - current_time = time; - current_state = STATE_CUE; - accumulative_direction = 0; - // error params - global_parameters->cross_entropy = 0.k; - learning_signal = 0.k; - global_parameters->mean_0 = 0.k; - global_parameters->mean_1 = 0.k; - softmax_0 = 0k; - softmax_1 = 0k; - while (!spin1_send_mc_packet( - key | neuron_index, bitsk(global_parameters->cross_entropy), 1 )) { - spin1_delay_us(1); - } - } -// io_printf(IO_BUF, "current_state = %u, cue_number = %u, direction = %u, time = %u\n", current_state, cue_number, current_cue_direction, time); - // In this state the environment is giving the left/right cues to the agent - if (current_state == STATE_CUE){ -// io_printf(IO_BUF, "time entering cue %u\n", time); - if (neuron_index == 0){ - // if it's current in the waiting time between cues do nothing -// if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ -// do nothing? -// } - // begin sending left/right cue - if ((time - current_time) % (wait_between_cues + duration_of_cue) >= wait_between_cues){ - // pick broadcast if just entered - if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ - // pick new value and broadcast - REAL random_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); // 0-1 - if (random_value < 0.5k){ - current_cue_direction = 0; - } - else{ - current_cue_direction = 1; - } -// current_cue_direction = (current_cue_direction + 1) % 2; - accumulative_direction += current_cue_direction; - REAL payload; - payload = global_parameters->rate_on; -// io_printf(IO_BUF, "poisson setting 1, direction = %u\n", current_cue_direction); - for (int j = current_cue_direction*global_parameters->p_pop_size; - j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); - } - } - } - // turn off and reset if finished - else if ((time - current_time) % (wait_between_cues + duration_of_cue) == 0 && (time - current_time) > 0){//(wait_between_cues + duration_of_cue) - 1){ - cue_number += 1; - REAL payload; - payload = global_parameters->rate_off; -// io_printf(IO_BUF, "poisson setting 2, direction = %u\n", current_cue_direction); - for (int j = current_cue_direction*global_parameters->p_pop_size; - j < current_cue_direction*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); - } - if (cue_number >= global_parameters->number_of_cues){ - current_state = (current_state + 1) % 3; - } - } - } - } - else if (current_state == STATE_WAITING){ -// io_printf(IO_BUF, "time entering wait %u\n", time); - // waiting for prompt, all things ok - if (cue_number >= global_parameters->number_of_cues){ - current_time = time; - cue_number = 0; - } - if ((time - current_time) >= wait_before_result){ - current_state = (current_state + 1) % 3; - start_prompt = true; - } - } - else if (current_state == STATE_PROMPT){ -// io_printf(IO_BUF, "time entering prompt %u\n", time); - if (start_prompt && neuron_index == 1){ - current_time = time; - // send packets to the variable poissons with the updated states - for (int i = 0; i < 4; i++){ - REAL payload; - payload = global_parameters->rate_on; -// io_printf(IO_BUF, "poisson setting 3, turning on prompt\n"); - for (int j = 2*global_parameters->p_pop_size; - j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, bitsk(payload), WITH_PAYLOAD); - } - } - } - if (neuron_index == 2){ // this is the error source - // Switched to always broadcasting error but with packet -// ticks_for_mean += 1; //todo is it a running error like this over prompt? - start_prompt = false; -// io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); -// io_printf(IO_BUF, "ticks %u - accum %k - ", ticks_for_mean, (accum)ticks_for_mean); - // Softmax of the exc and inh inputs representing 1 and 0 respectively - // may need to scale to stop huge numbers going in the exp -// io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); -// global_parameters->mean_0 += global_parameters->readout_V_0; -// global_parameters->mean_1 += global_parameters->readout_V_1; - // divide -> * 1/x -// io_printf(IO_BUF, " umm "); -// accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); -// accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); - accum exp_0 = expk(global_parameters->readout_V_0);// * 0.1k); - accum exp_1 = expk(global_parameters->readout_V_1);// * 0.1k); -// io_printf(IO_BUF, "or here - "); - if (exp_0 == 0k && exp_1 == 0k){ - if (global_parameters->readout_V_0 > global_parameters->readout_V_1){ - softmax_0 = 1k; - softmax_1 = 0k; - } - else{ - softmax_0 = 0k; - softmax_1 = 1k; - } - } - else{ -// accum denominator = 1.k / (exp_1 + exp_0); - softmax_0 = exp_0 / (exp_1 + exp_0); - softmax_1 = exp_1 / (exp_1 + exp_0); - } -// io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); - // What to do if log(0)? - if (accumulative_direction > global_parameters->number_of_cues >> 1){ - global_parameters->cross_entropy = -logk(softmax_1); - learning_signal = softmax_0; - is_it_right = 1; - } - else{ - global_parameters->cross_entropy = -logk(softmax_0); - learning_signal = softmax_0 - 1.k; - is_it_right = 0; - } -// if (learning_signal > 0.5){ -// learning_signal = 1k; -// } -// else if (learning_signal < -0.5){ -// learning_signal = -1k; -// } -// else{ -// learning_signal = 0k; -// } - while (!spin1_send_mc_packet( - key | neuron_index, bitsk(learning_signal), 1 )) { - spin1_delay_us(1); - } -// if(learning_signal){ -// io_printf(IO_BUF, "learning signal before cast = %k\n", learning_signal); -// } -// learning_signal = global_parameters->cross_entropy; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// io_printf(IO_BUF, "broadcasting error\n"); - } - if ((time - current_time) >= prompt_duration && neuron_index == 0){ -// io_printf(IO_BUF, "time entering end of test %u\n", time); -// io_printf(IO_BUF, "poisson setting 4, turning off prompt\n"); - current_state = 0; - completed_broadcast = true; - for (int i = 0; i < 4; i++){ - REAL payload; - payload = global_parameters->rate_off; - for (int j = 2*global_parameters->p_pop_size; - j < 2*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); - } - } - } - } +static void neuron_impl_do_timestep_update( + uint32_t timer_count, uint32_t time, uint32_t n_neurons) { + + for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { + +// log_info("neuron_index %u time %u n_neurons %u", neuron_index, time, n_neurons); + + // Get the neuron itself + neuron_t *neuron = &neuron_array[neuron_index]; + bool spike = false; + + // current_time = time & 0x3ff; // repeats on a cycle of 1024 entries in array + + // io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); + // io_printf(IO_BUF, "Target: %k\n\n", + // global_parameters->target_V[target_ind]); + + // Get the input_type parameters and voltage for this neuron + input_type_t *input_type = &input_type_array[neuron_index]; + + // Get threshold and additional input parameters for this neuron + threshold_type_t *threshold_type = + &threshold_type_array[neuron_index]; + additional_input_t *additional_input = + &additional_input_array[neuron_index]; + synapse_types_t *synapse_type = + &synapse_types_array[neuron_index]; + + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + + // Get the exc and inh values from the synapses + input_t exc_values[NUM_EXCITATORY_RECEPTORS]; + input_t* exc_syn_values = synapse_types_get_excitatory_input( + exc_values, synapse_type); + input_t inh_values[NUM_INHIBITORY_RECEPTORS]; + input_t* inh_syn_values = synapse_types_get_inhibitory_input( + inh_values, synapse_type); + + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_syn_values, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_syn_values, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording + // REAL total_exc = 0; + // REAL total_inh = 0; + // + // for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ + // total_exc += exc_input_values[i]; + // } + // for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ + // total_inh += inh_input_values[i]; + // } + + // Call functions to get the input values to be recorded + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + + // Call functions to convert exc_input and inh_input to current + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); + + REAL current_offset = current_source_get_offset(time, neuron_index); + + input_t external_bias = additional_input_get_input_value_as_current( + additional_input, voltage); + + if (neuron_index == 0){ + // io_printf(IO_BUF, "n0 - "); + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, current_offset, neuron, -50k); + // Finally, set global membrane potential to updated value + for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { + // Get the neuron itself + neuron_t *glob_neuron = &neuron_array[glob_n]; + glob_neuron->readout_V_0 = result; + } +// global_parameters->readout_V_0 = result; + + } else if (neuron_index == 1){ + // io_printf(IO_BUF, "n1 - "); + // update neuron parameters + // learning_signal *= -1.k; + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, current_offset, neuron, -50k); + // learning_signal *= -1.k; + // Finally, set global membrane potential to updated value + for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { + // Get the neuron itself + neuron_t *glob_neuron = &neuron_array[glob_n]; + glob_neuron->readout_V_1 = result; + } +// global_parameters->readout_V_1 = result; + } + // if (neuron_index == 0){ + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_0; + // } + // else if (neuron_index == 1){ + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_1; + // } + // io_printf(IO_BUF, "state = %u - %u\n", current_state, time); + if (cue_number == 0 && completed_broadcast){ // reset start of new test + // io_printf(IO_BUF, "time entering reset %u\n", time); + // io_printf(IO_BUF, "Resetting\n"); + completed_broadcast = false; + current_time = time; + current_state = STATE_CUE; + accumulative_direction = 0; + // error params +// global_parameters->cross_entropy = 0.k; + neuron->cross_entropy = 0.k; + learning_signal = 0.k; +// global_parameters->mean_0 = 0.k; +// global_parameters->mean_1 = 0.k; + neuron->mean_0 = 0.k; + neuron->mean_1 = 0.k; + softmax_0 = 0k; + softmax_1 = 0k; + if (use_key) { + // I don't understand, this just sends zero + // Oh, maybe it's a "completed" signal + send_spike_mc_payload( + neuron_keys[neuron_index], bitsk(neuron->cross_entropy)); +// while (!spin1_send_mc_packet( +// neuron_keys[neuron_index], +// bitsk(neuron->cross_entropy), 1)) { +// spin1_delay_us(1); +// } + } + } + // io_printf(IO_BUF, "current_state = %u, cue_number = %u, direction = %u, time = %u\n", current_state, cue_number, current_cue_direction, time); + // In this state the environment is giving the left/right cues to the agent + if (current_state == STATE_CUE){ + // io_printf(IO_BUF, "time entering cue %u\n", time); + if (neuron_index == 0){ + // if it's current in the waiting time between cues do nothing + // if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ + // do nothing? + // } + // begin sending left/right cue + if ((time - current_time) % + (wait_between_cues + duration_of_cue) >= wait_between_cues){ + // pick broadcast if just entered + if ((time - current_time) % + (wait_between_cues + duration_of_cue) == wait_between_cues){ + // pick new value and broadcast + REAL random_value = (REAL)(mars_kiss64_seed( + neuron->kiss_seed) / (REAL)0xffffffff); // 0-1 + if (random_value < 0.5k){ + current_cue_direction = 0; + } + else{ + current_cue_direction = 1; + } + // current_cue_direction = (current_cue_direction + 1) % 2; + accumulative_direction += current_cue_direction; + REAL payload; + payload = neuron->rate_on; +// io_printf(IO_BUF, "poisson setting 1, direction = %u\n", current_cue_direction); + for (int j = current_cue_direction*neuron->p_pop_size; + j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++){ +// log_info("current cue direction %u payload %k key index %u time %u neuron_index %u", +// current_cue_direction, payload, j, time, neuron_index); + send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); +// spin1_send_mc_packet( +// neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); + } + } + } + // turn off and reset if finished + else if ((time - current_time) % (wait_between_cues + duration_of_cue) == 0 && (time - current_time) > 0){//(wait_between_cues + duration_of_cue) - 1){ + cue_number += 1; + REAL payload; + payload = neuron->rate_off; + // io_printf(IO_BUF, "poisson setting 2, direction = %u\n", current_cue_direction); + for (int j = current_cue_direction*neuron->p_pop_size; + j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++){ + send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); +// spin1_send_mc_packet( +// neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); + } + if (cue_number >= neuron->number_of_cues){ + current_state = (current_state + 1) % 3; + } + } + } + } + else if (current_state == STATE_WAITING){ + // io_printf(IO_BUF, "time entering wait %u\n", time); + // waiting for prompt, all things ok + if (cue_number >= neuron->number_of_cues){ + current_time = time; + cue_number = 0; + } + if ((time - current_time) >= wait_before_result){ + current_state = (current_state + 1) % 3; + start_prompt = true; + } + } + else if (current_state == STATE_PROMPT){ + // io_printf(IO_BUF, "time entering prompt %u\n", time); + if (start_prompt && neuron_index == 1){ + current_time = time; + // send packets to the variable poissons with the updated states + for (int i = 0; i < 4; i++){ + REAL payload; + payload = neuron->rate_on; + // io_printf(IO_BUF, "poisson setting 3, turning on prompt\n"); + for (int j = 2*neuron->p_pop_size; + j < 2*neuron->p_pop_size + neuron->p_pop_size; j++){ + send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); +// spin1_send_mc_packet( +// neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); + } + } + } + if (neuron_index == 2){ // this is the error source + // Switched to always broadcasting error but with packet + // ticks_for_mean += 1; //todo is it a running error like this over prompt? + start_prompt = false; + // io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); + // io_printf(IO_BUF, "ticks %u - accum %k - ", ticks_for_mean, (accum)ticks_for_mean); + // Softmax of the exc and inh inputs representing 1 and 0 respectively + // may need to scale to stop huge numbers going in the exp + // io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); + // global_parameters->mean_0 += global_parameters->readout_V_0; + // global_parameters->mean_1 += global_parameters->readout_V_1; + // divide -> * 1/x + // io_printf(IO_BUF, " umm "); + // accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); + // accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); + accum exp_0 = expk(neuron->readout_V_0);// * 0.1k); + accum exp_1 = expk(neuron->readout_V_1);// * 0.1k); + // io_printf(IO_BUF, "or here - "); + // Um... how can an exponential be zero? + if (exp_0 == 0k && exp_1 == 0k){ + if (neuron->readout_V_0 > neuron->readout_V_1){ + softmax_0 = 1k; + softmax_1 = 0k; + } + else{ + softmax_0 = 0k; + softmax_1 = 1k; + } + } + else{ + // accum denominator = 1.k / (exp_1 + exp_0); + softmax_0 = exp_0 / (exp_1 + exp_0); + softmax_1 = exp_1 / (exp_1 + exp_0); + } + // io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); + // What to do if log(0)? + if (accumulative_direction > neuron->number_of_cues >> 1){ + for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { + // Get the neuron itself + neuron_t *glob_neuron = &neuron_array[glob_n]; + glob_neuron->cross_entropy = -logk(softmax_1); + } + learning_signal = softmax_0; + is_it_right = 1; + } + else{ + for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { + // Get the neuron itself + neuron_t *glob_neuron = &neuron_array[glob_n]; + glob_neuron->cross_entropy = -logk(softmax_0); + } + learning_signal = softmax_0 - 1.k; + is_it_right = 0; + } + // if (learning_signal > 0.5){ + // learning_signal = 1k; + // } + // else if (learning_signal < -0.5){ + // learning_signal = -1k; + // } + // else{ + // learning_signal = 0k; + // } + if (use_key) { + send_spike_mc_payload(neuron_keys[neuron_index], bitsk(learning_signal)); +// while (!spin1_send_mc_packet( +// neuron_keys[neuron_index], bitsk(learning_signal), 1 )) { +// spin1_delay_us(1); +// } + } + // if(learning_signal){ + // io_printf(IO_BUF, "learning signal before cast = %k\n", learning_signal); + // } + // learning_signal = global_parameters->cross_entropy; + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = + // io_printf(IO_BUF, "broadcasting error\n"); + } + if ((time - current_time) >= prompt_duration && neuron_index == 0){ + // io_printf(IO_BUF, "time entering end of test %u\n", time); + // io_printf(IO_BUF, "poisson setting 4, turning off prompt\n"); + current_state = 0; + completed_broadcast = true; + for (int i = 0; i < 4; i++){ + REAL payload; + payload = neuron->rate_off; + for (int j = 2*neuron->p_pop_size; + j < 2*neuron->p_pop_size + neuron->p_pop_size; j++){ + send_spike_mc_payload(neuron->p_key | j, payload); +// spin1_send_mc_packet( +// neuron->p_key | j, payload, WITH_PAYLOAD); + } + } + } + } -// learning_signal = global_parameters->cross_entropy; + // learning_signal = global_parameters->cross_entropy; + +// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = learning_signal;//exc_input_values[0];//neuron->syn_state[1].update_ready;// +// recorded_variable_values[V_RECORDING_INDEX] = voltage; +// log_info("neuron_index %u time %u record learning signal %k", +// neuron_index, time, learning_signal); + neuron_recording_record_accum( + GSYN_INH_RECORDING_INDEX, neuron_index, learning_signal); + neuron_recording_record_accum( + V_RECORDING_INDEX, neuron_index, voltage); + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = ; + // if (neuron_index == 2){ + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = accumulative_direction; + // } + // else { + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; + // } + if (neuron_index == 2){ //this neuron does nothing + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; + // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[50].delta_w; + neuron_recording_record_accum( + GSYN_EXC_RECORDING_INDEX, neuron_index, + neuron->syn_state[50].delta_w); + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = is_it_right; + } + else if (neuron_index == 1){ + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].z_bar; + // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].delta_w; + neuron_recording_record_accum( + GSYN_EXC_RECORDING_INDEX, neuron_index, + neuron->syn_state[40].delta_w); + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; + } + else{ + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].z_bar; + // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; +// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; + neuron_recording_record_accum( + GSYN_EXC_RECORDING_INDEX, neuron_index, + neuron->syn_state[0].delta_w); + // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; + } - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = learning_signal;//exc_input_values[0];//neuron->syn_state[1].update_ready;// - recorded_variable_values[V_RECORDING_INDEX] = voltage; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = ; -// if (neuron_index == 2){ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = accumulative_direction; -// } -// else { -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; -// } - if (neuron_index == 2){ //this neuron does nothing -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; -// recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[50].delta_w; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = is_it_right; - } - else if (neuron_index == 1){ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].z_bar; -// recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].delta_w; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; - } - else{ -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].z_bar; -// recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; - } + // If spike occurs, communicate to relevant parts of model + if (spike) { + // Call relevant model-based functions + // Tell the neuron model + // neuron_model_has_spiked(neuron); - // If spike occurs, communicate to relevant parts of model - if (spike) { - // Call relevant model-based functions - // Tell the neuron model -// neuron_model_has_spiked(neuron); + // Tell the additional input + additional_input_has_spiked(additional_input); + } - // Tell the additional input - additional_input_has_spiked(additional_input); - } + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); - // Shape the existing input according to the included rule - synapse_types_shape_input(synapse_type); + #if LOG_LEVEL >= LOG_DEBUG + neuron_model_print_state_variables(neuron); + #endif // LOG_LEVEL >= LOG_DEBUG - #if LOG_LEVEL >= LOG_DEBUG - neuron_model_print_state_variables(neuron); - #endif // LOG_LEVEL >= LOG_DEBUG + // Return the boolean to the model timestep update + // return spike; + } - // Return the boolean to the model timestep update - return spike; +// log_info("end of do_timestep_update time %u", time); } @@ -571,61 +699,82 @@ static void neuron_impl_store_neuron_parameters( address_t address, uint32_t next, uint32_t n_neurons) { log_debug("writing parameters"); - //log_debug("writing neuron global parameters"); - spin1_memcpy(&address[next], global_parameters, - sizeof(global_neuron_params_t)); - next += (sizeof(global_neuron_params_t) + 3) / 4; + // Skip steps per timestep + next += 1; - log_debug("writing neuron local parameters"); - spin1_memcpy(&address[next], neuron_array, - n_neurons * sizeof(neuron_t)); - next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; + if (sizeof(neuron_t)) { + log_debug("writing neuron local parameters"); +// spin1_memcpy(&address[next], neuron_array, +// n_neurons * sizeof(neuron_t)); + neuron_params_t *params = (neuron_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + neuron_model_save_state(&neuron_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(neuron_params_t)); + } - log_debug("writing input type parameters"); - spin1_memcpy(&address[next], input_type_array, - n_neurons * sizeof(input_type_t)); - next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; + if (sizeof(input_type_t)) { + log_debug("writing input type parameters"); + input_type_params_t *params = (input_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + input_type_save_state(&input_type_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(input_type_params_t)); + } - log_debug("writing threshold type parameters"); - spin1_memcpy(&address[next], threshold_type_array, - n_neurons * sizeof(threshold_type_t)); - next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; + if (sizeof(threshold_type_t)) { + log_debug("writing threshold type parameters"); + threshold_type_params_t *params = (threshold_type_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + threshold_type_save_state(&threshold_type_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(threshold_type_params_t)); + } - log_debug("writing synapse parameters"); - spin1_memcpy(&address[next], neuron_synapse_shaping_params, - n_neurons * sizeof(synapse_param_t)); - next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; + if (sizeof(synapse_types_t)) { + log_debug("writing synapse parameters"); + synapse_types_params_t *params = (synapse_types_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + synapse_types_save_state(&synapse_types_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(synapse_types_params_t)); + } - log_debug("writing additional input type parameters"); - spin1_memcpy(&address[next], additional_input_array, - n_neurons * sizeof(additional_input_t)); - next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; + if (sizeof(additional_input_t)) { + log_debug("writing additional input type parameters"); + additional_input_params_t *params = (additional_input_params_t *) &address[next]; + for (uint32_t i = 0; i < n_neurons; i++) { + additional_input_save_state(&additional_input_array[i], ¶ms[i]); + } + next += n_words_needed(n_neurons * sizeof(additional_input_params_t)); + } } #if LOG_LEVEL >= LOG_DEBUG void neuron_impl_print_inputs(uint32_t n_neurons) { bool empty = true; for (index_t i = 0; i < n_neurons; i++) { - empty = empty - && (bitsk(synapse_types_get_excitatory_input( - &(neuron_synapse_shaping_params[i])) - - synapse_types_get_inhibitory_input( - &(neuron_synapse_shaping_params[i]))) == 0); + synapse_types_t *params = &synapse_types_array[i]; + input_t exc_values[NUM_EXCITATORY_RECEPTORS]; + input_t inh_values[NUM_INHIBITORY_RECEPTORS]; + empty = empty && (0 == bitsk( + synapse_types_get_excitatory_input(exc_values, params)[0] + - synapse_types_get_inhibitory_input(inh_values, params)[0])); } if (!empty) { log_debug("-------------------------------------\n"); for (index_t i = 0; i < n_neurons; i++) { + synapse_types_t *params = &synapse_types_array[i]; + input_t exc_values[NUM_EXCITATORY_RECEPTORS]; + input_t inh_values[NUM_INHIBITORY_RECEPTORS]; input_t input = - synapse_types_get_excitatory_input( - &(neuron_synapse_shaping_params[i])) - - synapse_types_get_inhibitory_input( - &(neuron_synapse_shaping_params[i])); + synapse_types_get_excitatory_input(exc_values, params)[0] + - synapse_types_get_inhibitory_input(inh_values, params)[1]; if (bitsk(input) != 0) { log_debug("%3u: %12.6k (= ", i, input); - synapse_types_print_input( - &(neuron_synapse_shaping_params[i])); + synapse_types_print_input(params); log_debug(")\n"); } } diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 44f9668ad7d..8e143ec4fe2 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -243,9 +243,9 @@ static inline void neuron_model_initialise( state->V_reset = params->V_reset; state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); - log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract %k", + log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract_ms %k T_refract %d", state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, - state->refract_timer, state->V_reset, state->T_refract); + state->refract_timer, state->V_reset, params->T_refract_ms, state->T_refract); // for everything else just copy across for now state->z = params->z; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c index ed965b16595..9ddb65d370a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c @@ -1,214 +1,214 @@ -#include "neuron_model_left_right_readout_impl.h" - -#include - -extern uint32_t time; -extern REAL learning_signal; -REAL local_eta; -REAL v_mem_error; - -// simple Leaky I&F ODE -static inline void _lif_neuron_closed_form( - neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { - - REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; - - // update membrane voltage - neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); -} - -void neuron_model_set_global_neuron_params( - global_neuron_params_pointer_t params) { - use(params); - - local_eta = params->eta; - -// io_printf(IO_BUF, "local eta = %k\n", local_eta); -// io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); -// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -// io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); -// io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); -// io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); -// io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); -// io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); -// io_printf(IO_BUF, "p_key = %u\n", params->p_key); -// io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); -// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -// io_printf(IO_BUF, "local eta = %k\n", params->); - - // Does Nothing - no params -} - -state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, neuron_pointer_t neuron, REAL dummy) { - - log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); - log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); -// io_printf(IO_BUF, "Exc 1: %12.6k, Exc 2: %12.6k - ", exc_input[0], exc_input[1]); -// io_printf(IO_BUF, "Inh 1: %12.6k, Inh 2: %12.6k - %u\n", inh_input[0], inh_input[1], time); - use(dummy); - - // If outside of the refractory period - if (neuron->refract_timer <= 0) { -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// total_exc += exc_input[0]; -// total_inh += inh_input[0]; -// for (int i=0; i < num_excitatory_inputs; i++){ -// total_exc += exc_input[i]; -// } -// for (int i=0; i< num_inhibitory_inputs; i++){ -// total_inh += inh_input[i]; -// } - // Get the input in nA - input_t input_this_timestep = - exc_input[0] + exc_input[1] + neuron->I_offset; - - _lif_neuron_closed_form( - neuron, neuron->V_membrane, input_this_timestep); - } else { - - // countdown refractory timer - neuron->refract_timer -= 1; - } - - uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? - -// if(learning_signal){ -// io_printf(IO_BUF, "learning signal = %k\n", learning_signal); -// } -// if (neuron->V_membrane > 10.k){ -// v_mem_error = neuron->V_membrane - 10.k; -//// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); -// } -// else if (neuron->V_membrane < -10.k){ -// v_mem_error = neuron->V_membrane + 10.k; -//// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); +//#include "neuron_model_left_right_readout_impl.h" +// +//#include +// +//extern uint32_t time; +//extern REAL learning_signal; +//REAL local_eta; +//REAL v_mem_error; +// +//// simple Leaky I&F ODE +//static inline void _lif_neuron_closed_form( +// neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { +// +// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; +// +// // update membrane voltage +// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +//} +// +//void neuron_model_set_global_neuron_params( +// global_neuron_params_pointer_t params) { +// use(params); +// +// local_eta = params->eta; +// +//// io_printf(IO_BUF, "local eta = %k\n", local_eta); +//// io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); +//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +//// io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); +//// io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); +//// io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); +//// io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); +//// io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); +//// io_printf(IO_BUF, "p_key = %u\n", params->p_key); +//// io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); +//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +//// io_printf(IO_BUF, "local eta = %k\n", params->); +// +// // Does Nothing - no params +//} +// +//state_t neuron_model_state_update( +// uint16_t num_excitatory_inputs, input_t* exc_input, +// uint16_t num_inhibitory_inputs, input_t* inh_input, +// input_t external_bias, neuron_pointer_t neuron, REAL dummy) { +// +// log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); +// log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); +//// io_printf(IO_BUF, "Exc 1: %12.6k, Exc 2: %12.6k - ", exc_input[0], exc_input[1]); +//// io_printf(IO_BUF, "Inh 1: %12.6k, Inh 2: %12.6k - %u\n", inh_input[0], inh_input[1], time); +// use(dummy); +// +// // If outside of the refractory period +// if (neuron->refract_timer <= 0) { +//// REAL total_exc = 0; +//// REAL total_inh = 0; +//// +//// total_exc += exc_input[0]; +//// total_inh += inh_input[0]; +//// for (int i=0; i < num_excitatory_inputs; i++){ +//// total_exc += exc_input[i]; +//// } +//// for (int i=0; i< num_inhibitory_inputs; i++){ +//// total_inh += inh_input[i]; +//// } +// // Get the input in nA +// input_t input_this_timestep = +// exc_input[0] + exc_input[1] + neuron->I_offset; +// +// _lif_neuron_closed_form( +// neuron, neuron->V_membrane, input_this_timestep); +// } else { +// +// // countdown refractory timer +// neuron->refract_timer -= 1; // } -// else{ -// v_mem_error = 0.k; +// +// uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? +// +//// if(learning_signal){ +//// io_printf(IO_BUF, "learning signal = %k\n", learning_signal); +//// } +//// if (neuron->V_membrane > 10.k){ +//// v_mem_error = neuron->V_membrane - 10.k; +////// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); +//// } +//// else if (neuron->V_membrane < -10.k){ +//// v_mem_error = neuron->V_membrane + 10.k; +////// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); +//// } +//// else{ +//// v_mem_error = 0.k; +//// } +//// learning_signal += v_mem_error * 0.1; +// +// neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); +//// REAL tau_decay = expk(-1.k / 1500.k); +// // All operations now need doing once per eprop synapse +// for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ +// // ****************************************************************** +// // Low-pass filter incoming spike train +// // ****************************************************************** +// neuron->syn_state[syn_ind].z_bar = +// neuron->syn_state[syn_ind].z_bar * neuron->exp_TC +// + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update +// +// +// // ****************************************************************** +// // Update eligibility vector +// // ****************************************************************** +//// neuron->syn_state[syn_ind].el_a = +//// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + +//// (rho - neuron->psi * neuron->beta) * +//// neuron->syn_state[syn_ind].el_a; +// +// +// // ****************************************************************** +// // Update eligibility trace +// // ****************************************************************** +//// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +//// neuron->beta * neuron->syn_state[syn_ind].el_a); +//// +//// neuron->syn_state[syn_ind].e_bar = +//// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar +//// + (1 - neuron->exp_TC) * temp_elig_trace; +// +// // ****************************************************************** +// // Update cached total weight change +// // ****************************************************************** +// +// REAL this_dt_weight_change = +//// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; +// local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; +// +// neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; +//// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +//// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +//// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +//// "L = %k = %k * %k = l * w_fb\n" +//// "this dw = %k \t tot dw %k\n" +//// , +//// total_synapses_per_neuron, +//// syn_ind, +//// neuron->syn_state[syn_ind].z_bar_inp, +//// neuron->syn_state[syn_ind].z_bar, +//// time, +//// neuron->L, learning_signal, neuron -> w_fb, +//// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +//// ); +//// } +// // reset input (can't have more than one spike per timestep +// neuron->syn_state[syn_ind].z_bar_inp = 0; +// +// // decrease timestep counter preventing rapid updates +//// if (neuron->syn_state[syn_ind].update_ready > 0){ +//// io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); +// neuron->syn_state[syn_ind].update_ready -= 1; +//// } +//// else{ +//// io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); +//// } +// // } -// learning_signal += v_mem_error * 0.1; - - neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); -// REAL tau_decay = expk(-1.k / 1500.k); - // All operations now need doing once per eprop synapse - for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ - // ****************************************************************** - // Low-pass filter incoming spike train - // ****************************************************************** - neuron->syn_state[syn_ind].z_bar = - neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - - - // ****************************************************************** - // Update eligibility vector - // ****************************************************************** -// neuron->syn_state[syn_ind].el_a = -// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -// (rho - neuron->psi * neuron->beta) * -// neuron->syn_state[syn_ind].el_a; - - - // ****************************************************************** - // Update eligibility trace - // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); -// -// neuron->syn_state[syn_ind].e_bar = -// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -// + (1 - neuron->exp_TC) * temp_elig_trace; - - // ****************************************************************** - // Update cached total weight change - // ****************************************************************** - - REAL this_dt_weight_change = -// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; - - neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; -// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -// "L = %k = %k * %k = l * w_fb\n" -// "this dw = %k \t tot dw %k\n" -// , -// total_synapses_per_neuron, -// syn_ind, -// neuron->syn_state[syn_ind].z_bar_inp, -// neuron->syn_state[syn_ind].z_bar, -// time, -// neuron->L, learning_signal, neuron -> w_fb, -// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -// ); -// } - // reset input (can't have more than one spike per timestep - neuron->syn_state[syn_ind].z_bar_inp = 0; - - // decrease timestep counter preventing rapid updates -// if (neuron->syn_state[syn_ind].update_ready > 0){ -// io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); - neuron->syn_state[syn_ind].update_ready -= 1; -// } -// else{ -// io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); -// } - - } - - return neuron->V_membrane; -} - -void neuron_model_has_spiked(neuron_pointer_t neuron) { - - // reset membrane voltage - neuron->V_membrane = neuron->V_reset; - - // reset refractory timer - neuron->refract_timer = neuron->T_refract; -} - -state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { - return neuron->V_membrane; -} - -void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { - log_debug("V membrane = %11.4k mv", neuron->V_membrane); -} - -void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { -// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); -// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); -// -// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); -// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); -// -// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); -// -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// -// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); -// -// io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); -// -// io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); - -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); -// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", -// neuron->time_to_spike_ticks); - -// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); -} +// +// return neuron->V_membrane; +//} +// +//void neuron_model_has_spiked(neuron_pointer_t neuron) { +// +// // reset membrane voltage +// neuron->V_membrane = neuron->V_reset; +// +// // reset refractory timer +// neuron->refract_timer = neuron->T_refract; +//} +// +//state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { +// return neuron->V_membrane; +//} +// +//void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { +// log_debug("V membrane = %11.4k mv", neuron->V_membrane); +//} +// +//void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { +//// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); +//// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); +//// +//// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); +//// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); +//// +//// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); +//// +//// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +//// +//// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); +//// +//// io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); +//// +//// io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); +// +//// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +//// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); +//// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", +//// neuron->time_to_spike_ticks); +// +//// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +//// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +//// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +//// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +////// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +//// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +//} diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 7c966732fd4..2852784498b 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -6,6 +6,9 @@ #define SYNAPSES_PER_NEURON 250 +//extern uint32_t time; +extern REAL learning_signal; + typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply @@ -16,6 +19,86 @@ typedef struct eprop_syn_state_t { int32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; +///////////////////////////////////////////////////////////// +// definition for LIF neuron parameters +typedef struct neuron_params_t { + // membrane voltage [mV] + REAL V_init; + + // membrane resting voltage [mV] + REAL V_rest; + + // membrane capacitance [nF] + REAL c_m; + + // membrane decay time constant + REAL tau_m; + + // offset current [nA] + REAL I_offset; + + // post-spike reset membrane voltage [mV] + REAL V_reset; + + // refractory time of neuron [ms] + REAL T_refract_ms; + + // initial refractory timer value (saved) + int32_t refract_timer_init; + + // The time step in milliseconds + REAL time_step; + + + // Poisson compartment params +// REAL mean_isi_ticks; +// REAL time_to_spike_ticks; +// +// int32_t time_since_last_spike; +// REAL rate_at_last_setting; +// REAL rate_update_threshold; + + REAL L; // learning signal + REAL w_fb; // feedback weight + uint32_t window_size; + + // globals here + mars_kiss64_seed_t kiss_seed; // array of 4 values (?) + REAL ticks_per_second; + REAL readout_V_0; + REAL readout_V_1; +// REAL prob_command; + REAL rate_on; + REAL rate_off; + REAL mean_0; + REAL mean_1; + REAL cross_entropy; + uint32_t p_key; + uint32_t p_pop_size; + REAL eta; + uint32_t number_of_cues; + + + // array of synaptic states - peak fan-in of >250 for this case + eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; + + + // Poisson compartment params +// REAL mean_isi_ticks; +// REAL time_to_spike_ticks; +// +// int32_t time_since_last_spike; +// REAL rate_at_last_setting; +// REAL rate_update_threshold; + + +// // Should be in global params +// mars_kiss64_seed_t spike_source_seed; // array of 4 values +//// UFRACT seconds_per_tick; +// REAL ticks_per_second; + +}; + ///////////////////////////////////////////////////////////// // definition for LIF neuron parameters typedef struct neuron_t { @@ -58,10 +141,25 @@ typedef struct neuron_t { REAL w_fb; // feedback weight uint32_t window_size; + // former globals + mars_kiss64_seed_t kiss_seed; // array of 4 values (?) + REAL ticks_per_second; + REAL readout_V_0; + REAL readout_V_1; +// REAL prob_command; + REAL rate_on; + REAL rate_off; + REAL mean_0; + REAL mean_1; + REAL cross_entropy; + uint32_t p_key; + uint32_t p_pop_size; + REAL eta; + uint32_t number_of_cues; + // array of synaptic states - peak fan-in of >250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; - // Poisson compartment params // REAL mean_isi_ticks; // REAL time_to_spike_ticks; @@ -78,21 +176,305 @@ typedef struct neuron_t { } neuron_t; -typedef struct global_neuron_params_t { - mars_kiss64_seed_t kiss_seed; // array of 4 values - REAL ticks_per_second; - REAL readout_V_0; - REAL readout_V_1; +//typedef struct global_neuron_params_t { +// mars_kiss64_seed_t kiss_seed; // array of 4 values +// REAL ticks_per_second; +// REAL readout_V_0; +// REAL readout_V_1; +//// REAL prob_command; +// REAL rate_on; +// REAL rate_off; +// REAL mean_0; +// REAL mean_1; +// REAL cross_entropy; +// uint32_t p_key; +// uint32_t p_pop_size; +// REAL eta; +// uint32_t number_of_cues; +//} global_neuron_params_t; + +//! \brief Performs a ceil operation on an accum +//! \param[in] value The value to ceil +//! \return The ceil of the value +static inline int32_t lif_ceil_accum(REAL value) { + int32_t bits = bitsk(value); + int32_t integer = bits >> 15; + int32_t fraction = bits & 0x7FFF; + if (fraction > 0) { + return integer + 1; + } + return integer; +} + +static inline void neuron_model_initialise( + neuron_t *state, neuron_params_t *params, uint32_t n_steps_per_timestep) { + REAL ts = kdivui(params->time_step, n_steps_per_timestep); + state->V_membrane = params->V_init; + state->V_rest = params->V_rest; + state->R_membrane = kdivk(params->tau_m, params->c_m); + state->exp_TC = expk(-kdivk(ts, params->tau_m)); + state->I_offset = params->I_offset; + state->refract_timer = params->refract_timer_init; + state->V_reset = params->V_reset; + state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); + + // for everything else just copy across for now + state->L = params->L; + state->w_fb = params->w_fb; + + state->window_size = params->window_size; + + // former globals + for (uint32_t n_seed = 0; n_seed < 4; n_seed++) { + state->kiss_seed[n_seed] = params->kiss_seed[n_seed]; // array of 4 values (?) + } + state->ticks_per_second = params->ticks_per_second; + state->readout_V_0 = params->readout_V_0; + state->readout_V_1 = params->readout_V_1; // REAL prob_command; - REAL rate_on; - REAL rate_off; - REAL mean_0; - REAL mean_1; - REAL cross_entropy; - uint32_t p_key; - uint32_t p_pop_size; - REAL eta; - uint32_t number_of_cues; -} global_neuron_params_t; + state->rate_on = params->rate_on; + state->rate_off = params->rate_off; + state->mean_0 = params->mean_0; + state->mean_1 = params->mean_1; + state->cross_entropy = params->cross_entropy; + state->p_key = params->p_key; + state->p_pop_size = params->p_pop_size; + state->eta = params->eta; + state->number_of_cues = params->number_of_cues; +// local_eta = params->eta; + + log_info("Check p_key %u p_pop_size %u", params->p_key, params->p_pop_size); + log_info("Check number_of_cues %u eta %k", params->number_of_cues, params->eta); + log_info("mean_0 %k mean_1 %k rate_on %k rate_off %k readout_V_0 %k readout_V_1 %k", + params->mean_0, params->mean_1, params->rate_on, params->rate_off, + params->readout_V_0, params->readout_V_1); + + for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { + state->syn_state[n_syn] = params->syn_state[n_syn]; + } + +} + +static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { + // TODO: probably more parameters need copying across at this point, syn_state for a start + params->V_init = state->V_membrane; + params->refract_timer_init = state->refract_timer; + params->L = state->L; + params->w_fb = state->w_fb; + + for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { + params->syn_state[n_syn] = state->syn_state[n_syn]; + } +} + + +// simple Leaky I&F ODE +static inline void lif_neuron_closed_form( + neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { + + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + + // update membrane voltage + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +} + +//void neuron_model_set_global_neuron_params( +// global_neuron_params_pointer_t params) { +// use(params); +// +// local_eta = params->eta; +// +//// io_printf(IO_BUF, "local eta = %k\n", local_eta); +//// io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); +//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +//// io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); +//// io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); +//// io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); +//// io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); +//// io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); +//// io_printf(IO_BUF, "p_key = %u\n", params->p_key); +//// io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); +//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); +//// io_printf(IO_BUF, "local eta = %k\n", params->); +// +// // Does Nothing - no params +//} + +state_t neuron_model_state_update( + uint16_t num_excitatory_inputs, input_t* exc_input, + uint16_t num_inhibitory_inputs, input_t* inh_input, + input_t external_bias, REAL current_offset, neuron_t *restrict neuron, + REAL B_t) { + + log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); + log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); +// io_printf(IO_BUF, "Exc 1: %12.6k, Exc 2: %12.6k - ", exc_input[0], exc_input[1]); +// io_printf(IO_BUF, "Inh 1: %12.6k, Inh 2: %12.6k - %u\n", inh_input[0], inh_input[1], time); + use(B_t); + + // If outside of the refractory period + if (neuron->refract_timer <= 0) { +// REAL total_exc = 0; +// REAL total_inh = 0; +// +// total_exc += exc_input[0]; +// total_inh += inh_input[0]; +// for (int i=0; i < num_excitatory_inputs; i++){ +// total_exc += exc_input[i]; +// } +// for (int i=0; i< num_inhibitory_inputs; i++){ +// total_inh += inh_input[i]; +// } + // Get the input in nA + input_t input_this_timestep = + exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; + + lif_neuron_closed_form( + neuron, neuron->V_membrane, input_this_timestep); + } else { + + // countdown refractory timer + neuron->refract_timer -= 1; + } + + uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? + +// if(learning_signal){ +// io_printf(IO_BUF, "learning signal = %k\n", learning_signal); +// } +// if (neuron->V_membrane > 10.k){ +// v_mem_error = neuron->V_membrane - 10.k; +//// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); +// } +// else if (neuron->V_membrane < -10.k){ +// v_mem_error = neuron->V_membrane + 10.k; +//// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); +// } +// else{ +// v_mem_error = 0.k; +// } +// learning_signal += v_mem_error * 0.1; + + neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); + REAL local_eta = neuron->eta; + +// REAL tau_decay = expk(-1.k / 1500.k); + // All operations now need doing once per eprop synapse + for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ + // ****************************************************************** + // Low-pass filter incoming spike train + // ****************************************************************** + neuron->syn_state[syn_ind].z_bar = + neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + + // ****************************************************************** + // Update eligibility vector + // ****************************************************************** +// neuron->syn_state[syn_ind].el_a = +// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + +// (rho - neuron->psi * neuron->beta) * +// neuron->syn_state[syn_ind].el_a; + + + // ****************************************************************** + // Update eligibility trace + // ****************************************************************** +// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - +// neuron->beta * neuron->syn_state[syn_ind].el_a); +// +// neuron->syn_state[syn_ind].e_bar = +// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar +// + (1 - neuron->exp_TC) * temp_elig_trace; + + // ****************************************************************** + // Update cached total weight change + // ****************************************************************** + + REAL this_dt_weight_change = +// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; + local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; + + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; +// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ +// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " +// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" +// "L = %k = %k * %k = l * w_fb\n" +// "this dw = %k \t tot dw %k\n" +// , +// total_synapses_per_neuron, +// syn_ind, +// neuron->syn_state[syn_ind].z_bar_inp, +// neuron->syn_state[syn_ind].z_bar, +// time, +// neuron->L, learning_signal, neuron -> w_fb, +// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w +// ); +// } + // reset input (can't have more than one spike per timestep + neuron->syn_state[syn_ind].z_bar_inp = 0; + + // decrease timestep counter preventing rapid updates +// if (neuron->syn_state[syn_ind].update_ready > 0){ +// io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); + neuron->syn_state[syn_ind].update_ready -= 1; +// } +// else{ +// io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); +// } + + } + + return neuron->V_membrane; +} + +void neuron_model_has_spiked(neuron_t *restrict neuron) { + + // reset membrane voltage + neuron->V_membrane = neuron->V_reset; + + // reset refractory timer + neuron->refract_timer = neuron->T_refract; +} + +state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) { + return neuron->V_membrane; +} + +void neuron_model_print_state_variables(const neuron_t *neuron) { + log_debug("V membrane = %11.4k mv", neuron->V_membrane); +} + +void neuron_model_print_parameters(const neuron_t *neuron) { +// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); +// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); +// +// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); +// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); +// +// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); +// +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// +// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); +// +// io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); +// +// io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); + +// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); +// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); +// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", +// neuron->time_to_spike_ticks); + +// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); +// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); +// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); +// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); +//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); +// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); +} #endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index f8e36242f25..a9775f7bb7d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -7,7 +7,7 @@ #define SYNAPSES_PER_NEURON 250 -extern uint32_t time; +//extern uint32_t time; extern REAL learning_signal; //extern REAL local_eta; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index e66278504ce..80e5c90dfe9 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -24,30 +24,31 @@ // Plasticity includes #include "maths.h" #include "post_events.h" +#include "synapse_dynamics_stdp_common.h" -#include "weight_dependence/weight.h" -#include "timing_dependence/timing.h" -#include -#include -#include +//#include "weight_dependence/weight.h" +//#include "timing_dependence/timing.h" +//#include +//#include +//#include -#include +//#include //#include #include -extern neuron_pointer_t neuron_array; +extern neuron_t *neuron_array; //extern global_neuron_params_pointer_t global_parameters; -static uint32_t synapse_type_index_bits; -static uint32_t synapse_index_bits; -static uint32_t synapse_index_mask; -static uint32_t synapse_type_index_mask; -static uint32_t synapse_delay_index_type_bits; -static uint32_t synapse_type_mask; +//static uint32_t synapse_type_index_bits; +//static uint32_t synapse_index_bits; +//static uint32_t synapse_index_mask; +//static uint32_t synapse_type_index_mask; +//static uint32_t synapse_delay_index_type_bits; +//static uint32_t synapse_type_mask; -uint32_t num_plastic_pre_synaptic_events = 0; -uint32_t plastic_saturation_count = 0; +//uint32_t num_plastic_pre_synaptic_events = 0; +//uint32_t plastic_saturation_count = 0; //--------------------------------------- // Macros @@ -81,15 +82,23 @@ uint32_t plastic_saturation_count = 0; uint32_t RECURRENT_SYNAPSE_OFFSET = 100; -//--------------------------------------- -// Structures -//--------------------------------------- -typedef struct { - pre_trace_t prev_trace; - uint32_t prev_time; -} pre_event_history_t; +////--------------------------------------- +//// Structures +////--------------------------------------- +//typedef struct { +// pre_trace_t prev_trace; +// uint32_t prev_time; +//} pre_event_history_t; +// +//post_event_history_t *post_event_history; -post_event_history_t *post_event_history; +//! The format of the plastic data region of a synaptic row +struct synapse_row_plastic_data_t { + //! The pre-event history + pre_event_history_t history; + //! The per-synapse information + plastic_synapse_t synapses[]; +}; /* PRIVATE FUNCTIONS */ @@ -176,20 +185,19 @@ static inline pre_event_history_t *plastic_event_history( } void synapse_dynamics_print_plastic_synapses( - address_t plastic_region_address, address_t fixed_region_address, + synapse_row_plastic_data_t *plastic_region_data, + synapse_row_fixed_part_t *fixed_region, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - use(plastic_region_address); - use(fixed_region_address); - use(ring_buffer_to_input_buffer_left_shifts); + __use(plastic_region_data); + __use(fixed_region); + __use(ring_buffer_to_input_buffer_left_shifts); #if LOG_LEVEL >= LOG_DEBUG // Extract separate arrays of weights (from plastic region), // Control words (from fixed region) and number of plastic synapses - plastic_synapse_t *plastic_words = plastic_synapses(plastic_region_address); - const control_t *control_words = - synapse_row_plastic_controls(fixed_region_address); - size_t plastic_synapse = - synapse_row_num_plastic_controls(fixed_region_address); + const plastic_synapse_t *plastic_words = plastic_region_data->synapses; + const control_t *control_words = synapse_row_plastic_controls(fixed_region); + size_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); log_debug("Plastic region %u synapses\n", plastic_synapse); @@ -211,10 +219,10 @@ void synapse_dynamics_print_plastic_synapses( synapses_print_weight( weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", - synapse_row_sparse_delay(control_word, synapse_type_index_bits), + synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), synapse_types_get_type_char(synapse_type), synapse_row_sparse_index(control_word, synapse_index_mask), - SYNAPSE_DELAY_MASK, synapse_type_index_bits); + synapse_delay_mask, synapse_type_index_bits); } #endif // LOG_LEVEL >= LOG_DEBUG } @@ -229,57 +237,44 @@ static inline index_t sparse_axonal_delay(uint32_t x) { #endif } -address_t synapse_dynamics_initialise( +bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - // Load timing dependence data - address_t weight_region_address = timing_initialise(address); - if (address == NULL) { - return NULL; - } +// // Load timing dependence data +// address_t weight_region_address = timing_initialise(address); +// if (address == NULL) { +// return NULL; +// } +// +// // Load weight dependence data +// address_t weight_result = weight_initialise( +// weight_region_address, n_synapse_types, +// ring_buffer_to_input_buffer_left_shifts); +// if (weight_result == NULL) { +// return NULL; +// } +// +// post_event_history = post_events_init_buffers(n_neurons); +// if (post_event_history == NULL) { +// return NULL; +// } - // Load weight dependence data - address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts); - if (weight_result == NULL) { - return NULL; + if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, + ring_buffer_to_input_buffer_left_shifts)) { + return false; } post_event_history = post_events_init_buffers(n_neurons); if (post_event_history == NULL) { - return NULL; - } - - uint32_t n_neurons_power_2 = n_neurons; - uint32_t log_n_neurons = 1; - if (n_neurons != 1) { - if (!is_power_of_2(n_neurons)) { - n_neurons_power_2 = next_power_of_2(n_neurons); - } - log_n_neurons = ilog_2(n_neurons_power_2); + return false; } - uint32_t n_synapse_types_power_2 = n_synapse_types; - if (!is_power_of_2(n_synapse_types)) { - n_synapse_types_power_2 = next_power_of_2(n_synapse_types); - } - uint32_t log_n_synapse_types = ilog_2(n_synapse_types_power_2); - - synapse_type_index_bits = log_n_neurons + log_n_synapse_types; - synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; - synapse_index_bits = log_n_neurons; - synapse_index_mask = (1 << synapse_index_bits) - 1; - synapse_delay_index_type_bits = - SYNAPSE_DELAY_BITS + synapse_type_index_bits; - synapse_type_mask = (1 << log_n_synapse_types) - 1; - - return weight_result; + return true; // weight_result; } -static inline final_state_t eprop_plasticity_update(update_state_t current_state, - REAL delta_w){ +static inline final_state_t eprop_plasticity_update( + update_state_t current_state, REAL delta_w){ // Test weight change // delta_w = -0.1k; @@ -291,7 +286,7 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state // int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING // int16_t delta_w_int = (int) delta_w; // >> 15; - if (delta_w){ + if (delta_w){ // TODO: delta_w_int instead? if (PRINT_PLASTICITY){ io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" // ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" @@ -304,16 +299,20 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } if (delta_w_int < 0){ - current_state = weight_one_term_apply_depression(current_state, (int16_t)(delta_w_int << 0)); + current_state = weight_one_term_apply_depression( + current_state, delta_w_int << 3); +// current_state, (int16_t)(delta_w_int << 0)); } else { - current_state = weight_one_term_apply_potentiation(current_state, (int16_t)(delta_w_int << 0)); + current_state = weight_one_term_apply_potentiation( + current_state, delta_w_int << 3); +// current_state, (int16_t)(delta_w_int << 0)); } } else { // if (PRINT_PLASTICITY){ // io_printf(IO_BUF, "delta_w: %k\n", delta_w); // } - current_state = current_state; +// current_state = current_state; } // Calculate regularisation error @@ -325,25 +324,21 @@ static inline final_state_t eprop_plasticity_update(update_state_t current_state } - - bool synapse_dynamics_process_plastic_synapses( - address_t plastic_region_address, address_t fixed_region_address, - weight_t *ring_buffers, uint32_t time) { + synapse_row_plastic_data_t *plastic_region_address, + synapse_row_fixed_part_t *fixed_region, + weight_t *ring_buffers, uint32_t time, uint32_t colour_delay, + bool *write_back) { // Extract separate arrays of plastic synapses (from plastic region), // Control words (from fixed region) and number of plastic synapses - plastic_synapse_t *plastic_words = - plastic_synapses(plastic_region_address); - const control_t *control_words = - synapse_row_plastic_controls(fixed_region_address); - size_t plastic_synapse = - synapse_row_num_plastic_controls(fixed_region_address); + plastic_synapse_t *plastic_words = plastic_region_address->synapses; + const control_t *control_words = synapse_row_plastic_controls(fixed_region); + size_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); num_plastic_pre_synaptic_events += plastic_synapse; - // Could maybe have a single z_bar for the entire synaptic row and update it once here for all synaptic words? - - + // Could maybe have a single z_bar for the entire synaptic row + // and update it once here for all synaptic words? // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { @@ -355,9 +350,9 @@ bool synapse_dynamics_process_plastic_synapses( // 16-bits of 32-bit fixed synapse so same functions can be used // uint32_t delay_axonal = sparse_axonal_delay(control_word); - uint32_t delay = 1.0k; - uint32_t syn_ind_from_delay = - synapse_row_sparse_delay(control_word, synapse_type_index_bits); + uint32_t delay = 1; // 1.0k; ?? + uint32_t syn_ind_from_delay = synapse_row_sparse_delay( + control_word, synapse_type_index_bits, synapse_delay_mask); // uint32_t delay_dendritic = synapse_row_sparse_delay( // control_word, synapse_type_index_bits); @@ -368,8 +363,8 @@ bool synapse_dynamics_process_plastic_synapses( uint32_t type_index = synapse_row_sparse_type_index( control_word, synapse_type_index_mask); - - int32_t neuron_ind = synapse_row_sparse_index(control_word, synapse_index_mask); + int32_t neuron_ind = synapse_row_sparse_index( + control_word, synapse_index_mask); // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, @@ -379,23 +374,23 @@ bool synapse_dynamics_process_plastic_synapses( syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } + + neuron_t *neuron = &neuron_array[neuron_ind]; + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; // !!!! Check what units this is in - same as weight? !!!! + // Create update state from the plastic synaptic word update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); - neuron_pointer_t neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024; // !!!! Check what units this is in - same as weight? !!!! - - - if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", -// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); - - io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", - neuron_ind, syn_ind_from_delay, type, - current_state.initial_weight, - neuron->syn_state[syn_ind_from_delay].delta_w, time); - } +// if (PRINT_PLASTICITY){ +//// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", +//// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); +// +// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", +// neuron_ind, syn_ind_from_delay, type, +// current_state.initial_weight, +// neuron->syn_state[syn_ind_from_delay].delta_w, time); +// } // Perform weight update: only if batch time has elapsed final_state_t final_state; @@ -431,10 +426,10 @@ bool synapse_dynamics_process_plastic_synapses( // Add contribution to synaptic input // Convert into ring buffer offset - uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined( + uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index_combined( // delay_axonal + delay_dendritic + time, type_index, - synapse_type_index_bits); + synapse_type_index_bits, synapse_delay_mask); // Check for ring buffer saturation int16_t accumulation = ring_buffers[ring_buffer_index] + @@ -483,20 +478,23 @@ void synapse_dynamics_process_post_synaptic_event( timing_add_post_spike(time, last_post_time, last_post_trace)); } -input_t synapse_dynamics_get_intrinsic_bias( - uint32_t time, index_t neuron_index) { - use(time); - use(neuron_index); - return 0.0k; -} - -uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { - return num_plastic_pre_synaptic_events; -} +//input_t synapse_dynamics_get_intrinsic_bias( +// uint32_t time, index_t neuron_index) { +// use(time); +// use(neuron_index); +// return 0.0k; +//} +// +//uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { +// return num_plastic_pre_synaptic_events; +//} +// +//uint32_t synapse_dynamics_get_plastic_saturation_count(void) { +// return plastic_saturation_count; +//} -uint32_t synapse_dynamics_get_plastic_saturation_count(void) { - return plastic_saturation_count; -} +// TODO: fix below to match other dynamics impls so that structural +// plasticity can be used #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 3c0d060d792..193d186a9cf 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -485,7 +485,8 @@ void synapse_dynamics_process_post_synaptic_event( // return plastic_saturation_count; //} -// TODO: fix below to match other dynamics impls +// TODO: fix below to match other dynamics impls so that structural +// plasticity can be used #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c index a53749f7d01..2190fbc82e7 100644 --- a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c +++ b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c @@ -1034,7 +1034,13 @@ static void timer_callback(UNUSED uint timer_count, UNUSED uint unused) { //! \param[in] key: Received multicast key //! \param[in] payload: Received multicast payload static void multicast_packet_callback(uint key, uint payload) { + +// log_info("multicast packet callback SSP key %u payload %u time %u", +// key, payload, time); + uint32_t id = key & ssp_params.set_rate_neuron_id_mask; + +// log_info("id value is %u mask %u", id, ssp_params.set_rate_neuron_id_mask); if ((id < ssp_params.first_source_id) || (id - ssp_params.first_source_id >= ssp_params.n_spike_sources)) { return; diff --git a/spynnaker/pyNN/extra_models/__init__.py b/spynnaker/pyNN/extra_models/__init__.py index 3a31e445e9e..2ec3fff66e4 100644 --- a/spynnaker/pyNN/extra_models/__init__.py +++ b/spynnaker/pyNN/extra_models/__init__.py @@ -31,7 +31,8 @@ IzkCondExpBase as Izhikevich_cond, IFCurrExpSEMDBase as IF_curr_exp_sEMD, EPropAdaptive as EPropAdaptive, - SinusoidReadout as SinusoidReadout) + SinusoidReadout as SinusoidReadout, + LeftRightReadout as LeftRightReadout) # Variable rate poisson from spynnaker.pyNN.models.spike_source import SpikeSourcePoissonVariable @@ -42,7 +43,7 @@ 'Izhikevich_cond', 'IF_curr_dual_exp', 'IF_curr_exp_sEMD', # Eprop implementation and related models (Adam Perrett/Oliver Rhodes) - 'EPropAdaptive', 'SinusoidReadout', + 'EPropAdaptive', 'SinusoidReadout', 'LeftRightReadout', # Neuromodulation synapse dynamics (Mantas Mikaitis) 'Neuromodulation', diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 282aacf17cf..f34a945af06 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -681,20 +681,6 @@ def set_current_state_values(self, name, value, selector=None): self.__state_variables[name].set_value_by_selector( selector, value) - - # This was added to the eprop_adaptive branch; does it need adding to - # generate_data_spec (which has now been moved to the machine vertex...) - - # if isinstance(self.__pynn_model._model.neuron_model, NeuronModelLeftRightReadout): - # poisson_key = routing_info.get_first_key_from_pre_vertex(placement.vertex, "CONTROL") - # self.__pynn_model._model.neuron_model.set_poisson_key(poisson_key) - # - # # Get the poisson key - # p_key = routing_info.get_first_key_from_pre_vertex( - # vertex, constants.LIVE_POISSON_CONTROL_PARTITION_ID) - # if hasattr(self.__neuron_impl, "set_poisson_key"): - # self.__neuron_impl.set_poisson_key(p_key) - @overrides(PopulationApplicationVertex.get_state_variables) def get_state_variables(self): return self.__pynn_model.default_initial_values.keys() @@ -888,17 +874,9 @@ def __str__(self): def __repr__(self): return self.__str__() - # This was in the eprop_adpative branch but I don't think it's needed now? - - # def get_n_keys_for_partition(self, partition, graph_mapper): - # if partition.identifier == constants.LIVE_POISSON_CONTROL_PARTITION_ID: - # n_keys = 0 - # for edge in partition.edges: - # slice = graph_mapper.get_slice(edge.post_vertex) - # n_keys += slice.n_atoms - # return n_keys - # else: - # return self.n_atoms + @property + def _pynn_model(self): + return self.__pynn_model @overrides(AbstractCanReset.reset_to_first_timestep) def reset_to_first_timestep(self): diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index 2ee749f0e19..8d38df11441 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -29,11 +29,11 @@ from .eprop_adaptive import EPropAdaptive # from .store_recall_readout import StoreRecallReadout from .sinusoid_readout import SinusoidReadout -# from .left_right_readout import LeftRightReadout +from .left_right_readout import LeftRightReadout __all__ = ["EIFConductanceAlphaPopulation", "HHCondExp", "IFCondAlpha", "IFCondExpBase", "IFCurrAlpha", "IFCurrDualExpBase", "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", - "EPropAdaptive", "SinusoidReadout"] # , "StoreRecallReadout", "LeftRightReadout"] + "EPropAdaptive", "SinusoidReadout", "LeftRightReadout"] # , "StoreRecallReadout" diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index af4f5d73c7d..1f8f5ec5384 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -19,7 +19,7 @@ def __init__( v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, + # tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, # mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, rate_on=40, rate_off=0, poisson_pop_size=10, @@ -36,10 +36,11 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, number_of_cues) + rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, + number_of_cues) synapse_type = SynapseTypeEPropAdaptive( - tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, + # tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) input_type = InputTypeCurrent() @@ -50,4 +51,4 @@ def __init__( model_name="left_right_readout", binary="left_right_readout.aplx", neuron_model=neuron_model, input_type=input_type, - synapse_type=synapse_type, threshold_type=threshold_type) \ No newline at end of file + synapse_type=synapse_type, threshold_type=threshold_type) diff --git a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py index 434aa868f65..b2d7c34fe23 100644 --- a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py +++ b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py @@ -86,11 +86,9 @@ def __init__( if self.__additional_input_type is not None: self.__components.append(self.__additional_input_type) - # TODO: not convinced this is needed... ? - # @property - # @overrides(AbstractNeuronImpl.neuron_model) - # def neuron_model(self): - # return self.__neuron_model + @property + def neuron_model(self): + return self.__neuron_model @property def n_steps_per_timestep(self): diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 3a66050b8ab..e5b616db1f2 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -19,10 +19,10 @@ # from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout from .neuron_model_sinusoid_readout import ( NeuronModelLeakyIntegrateAndFireSinusoidReadout) -# from .neuron_model_left_right_readout import NeuronModelLeftRightReadout +from .neuron_model_left_right_readout import NeuronModelLeftRightReadout __all__ = ["NeuronModelIzh", "NeuronModelLeakyIntegrateAndFire", "NeuronModelEPropAdaptive", - "NeuronModelLeakyIntegrateAndFireSinusoidReadout"] + "NeuronModelLeakyIntegrateAndFireSinusoidReadout", + "NeuronModelLeftRightReadout"] # "NeuronModelStoreRecallReadout", - # "NeuronModelLeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 57040469623..151975a200b 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -1,13 +1,16 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items -from .abstract_neuron_model import AbstractNeuronModel +from spynnaker.pyNN.models.neuron.implementations import ( + AbstractStandardNeuronComponent) +from spynnaker.pyNN.utilities.struct import Struct +from spynnaker.pyNN.data import SpynnakerDataView # constants SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state -MICROSECONDS_PER_SECOND = 1000000.0 -MICROSECONDS_PER_MILLISECOND = 1000.0 + +# MICROSECONDS_PER_SECOND = 1000000.0 +# MICROSECONDS_PER_MILLISECOND = 1000.0 V = "v" V_REST = "v_rest" TAU_M = "tau_m" @@ -15,7 +18,10 @@ I_OFFSET = "i_offset" V_RESET = "v_reset" TAU_REFRAC = "tau_refrac" -COUNT_REFRAC = "count_refrac" +# COUNT_REFRAC = "count_refrac" +TIMESTEP = "timestep" +REFRACT_TIMER = "refract_timer" + # Learning signal L = "learning_signal" W_FB = "feedback_weight" @@ -32,9 +38,15 @@ RATE_AT_LAST_SETTING = "rate_at_last_setting" # RATE_UPDATE_THRESHOLD = "rate_update_threshold" # PROB_COMMAND = "prob_command" +MEAN_L = "mean_l" +MEAN_R = "mean_r" RATE_ON = "rate_on" RATE_OFF = "rate_off" POISSON_POP_SIZE = 'poisson_pop_size' +POISSON_KEY = 'poisson_key' +CROSS_ENTROPY = 'cross_entropy' +ETA = 'eta' +NUMBER_OF_CUES = 'number_of_cues' DELTA_W = "delta_w" Z_BAR_OLD = "z_bar_old" @@ -43,45 +55,45 @@ # E_BAR = "e_bar" UPDATE_READY = "update_ready" -UNITS = { - V: 'mV', - V_REST: 'mV', - TAU_M: 'ms', - CM: 'nF', - I_OFFSET: 'nA', - V_RESET: 'mV', - TAU_REFRAC: 'ms' -} +# UNITS = { +# V: 'mV', +# V_REST: 'mV', +# TAU_M: 'ms', +# CM: 'nF', +# I_OFFSET: 'nA', +# V_RESET: 'mV', +# TAU_REFRAC: 'ms' +# } -class NeuronModelLeftRightReadout(AbstractNeuronModel): +class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): __slots__ = [ - "_v_init", - "_v_rest", - "_tau_m", - "_cm", - "_i_offset", - "_v_reset", - "_tau_refrac", + "__v_init", + "__v_rest", + "__tau_m", + "__cm", + "__i_offset", + "__v_reset", + "__tau_refrac", # "_mean_isi_ticks", # "_time_to_spike_ticks", # "_time_since_last_spike", # "_rate_at_last_setting", # "_rate_update_threshold", # "_prob_command", - "_rate_off", - "_rate_on", - "_l", - "_w_fb", - "_window_size", - "_eta", - "_mean_l", - "_mean_r", - "_cross_entropy", - "_poisson_key", - "_poisson_pop_size", - "_n_keys_in_target", - "_number_of_cues" + "__rate_off", + "__rate_on", + "__l", + "__w_fb", + "__window_size", + "__eta", + "__mean_l", + "__mean_r", + "__cross_entropy", + "__poisson_key", + "__poisson_pop_size", + "__n_keys_in_target", + "__number_of_cues" ] def __init__( @@ -89,130 +101,158 @@ def __init__( # mean_isi_ticks, time_to_spike_ticks, # rate_update_threshold, # prob_command, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, number_of_cues): - - global_data_types = [ - DataType.UINT32, # MARS KISS seed - DataType.UINT32, # MARS KISS seed - DataType.UINT32, # MARS KISS seed - DataType.UINT32, # MARS KISS seed - DataType.S1615, # ticks_per_second - DataType.S1615, # global mem pot - DataType.S1615, # global mem pot 2 - DataType.S1615, # rate on - DataType.S1615, # rate off - DataType.S1615, # mean left activation - DataType.S1615, # mean right activation - DataType.S1615, # cross entropy - DataType.UINT32, # poisson key - DataType.UINT32, # poisson pop size - DataType.S1615, # eta - DataType.UINT32, # number of cues - ] - data_types = [ - DataType.S1615, # v - DataType.S1615, # v_rest - DataType.S1615, # r_membrane (= tau_m / cm) - DataType.S1615, # exp_tc (= e^(-ts / tau_m)) - DataType.S1615, # i_offset - DataType.INT32, # count_refrac - DataType.S1615, # v_reset - DataType.INT32, # tau_refrac + rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, + number_of_cues): + + # global_data_types = [ + # DataType.UINT32, # MARS KISS seed + # DataType.UINT32, # MARS KISS seed + # DataType.UINT32, # MARS KISS seed + # DataType.UINT32, # MARS KISS seed + # DataType.S1615, # ticks_per_second + # DataType.S1615, # global mem pot + # DataType.S1615, # global mem pot 2 + # DataType.S1615, # rate on + # DataType.S1615, # rate off + # DataType.S1615, # mean left activation + # DataType.S1615, # mean right activation + # DataType.S1615, # cross entropy + # DataType.UINT32, # poisson key + # DataType.UINT32, # poisson pop size + # DataType.S1615, # eta + # DataType.UINT32, # number of cues + # ] + struct_neuron_vals = [ + (DataType.S1615, V), # v + (DataType.S1615, V_REST), # v_rest + (DataType.S1615, CM), # r_membrane (= tau_m / cm) + (DataType.S1615, TAU_M), # exp_tc (= e^(-ts / tau_m)) + (DataType.S1615, I_OFFSET), # i_offset + (DataType.S1615, V_RESET), # v_reset + (DataType.S1615, TAU_REFRAC), # tau_refrac + (DataType.INT32, REFRACT_TIMER), # count_refrac + (DataType.S1615, TIMESTEP), # timestep # Learning signal - DataType.S1615, # L - DataType.S1615, # w_fb - DataType.UINT32 # window_size + (DataType.S1615, L), # L + (DataType.S1615, W_FB), # w_fb + (DataType.UINT32, WINDOW_SIZE), # window_size + # former global parameters + (DataType.UINT32, SEED1), + (DataType.UINT32, SEED2), + (DataType.UINT32, SEED3), + (DataType.UINT32, SEED4), # + (DataType.S1615, TICKS_PER_SECOND), + (DataType.S1615, TIME_SINCE_LAST_SPIKE), # apparently set to 0.0 on first timestep + (DataType.S1615, RATE_AT_LAST_SETTING), # apparently set to 0.0 on first timestep + (DataType.S1615, RATE_ON), + (DataType.S1615, RATE_OFF), + (DataType.S1615, MEAN_L), + (DataType.S1615, MEAN_R), + (DataType.S1615, CROSS_ENTROPY), + (DataType.UINT32, POISSON_KEY), + (DataType.UINT32, POISSON_POP_SIZE), + (DataType.S1615, ETA), + (DataType.UINT32, NUMBER_OF_CUES) ] + # Synapse states - always initialise to zero - eprop_syn_state = [ # synaptic state, one per synapse (kept in DTCM) - DataType.S1615, # delta_w - DataType.S1615, # z_bar_old - DataType.S1615, # z_bar - # DataType.S1615, # ep_a - # DataType.S1615, # e_bar - DataType.INT32 # update_ready - ] - # Extend to include fan-in for each neuron - data_types.extend(eprop_syn_state * SYNAPSES_PER_NEURON) - - super(NeuronModelLeftRightReadout, self).__init__( - data_types=data_types, - - global_data_types=global_data_types - ) + for n in range(SYNAPSES_PER_NEURON): + struct_neuron_vals.extend( + [(DataType.S1615, DELTA_W+str(n)), + (DataType.S1615, Z_BAR_OLD+str(n)), + (DataType.S1615, Z_BAR+str(n)), + (DataType.UINT32, UPDATE_READY+str(n))]) + + super().__init__( + [Struct(struct_neuron_vals)], + {V: 'mV', V_REST: 'mV', TAU_M: 'ms', CM: 'nF', I_OFFSET: 'nA', + V_RESET: 'mV', TAU_REFRAC: 'ms'}) if v_init is None: v_init = v_rest - self._v_init = v_init - self._v_rest = v_rest - self._tau_m = tau_m - self._cm = cm - self._i_offset = i_offset - self._v_reset = v_reset - self._tau_refrac = tau_refrac + self.__v_init = v_init + self.__v_rest = v_rest + self.__tau_m = tau_m + self.__cm = cm + self.__i_offset = i_offset + self.__v_reset = v_reset + self.__tau_refrac = tau_refrac # self._mean_isi_ticks = mean_isi_ticks # self._time_to_spike_ticks = time_to_spike_ticks # self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation # self._rate_at_last_setting = 0 # self._rate_update_threshold = 2 # self._prob_command = prob_command - self._rate_off = rate_off - self._rate_on = rate_on - self._mean_l = 0.0 - self._mean_r = 0.0 - self._cross_entropy = 0.0 - self._poisson_key = None - self._poisson_pop_size = poisson_pop_size - self._l = l - self._w_fb = w_fb - self._eta = eta - self._window_size = window_size - self._number_of_cues = number_of_cues - - self._n_keys_in_target = poisson_pop_size * 4 + self.__rate_off = rate_off + self.__rate_on = rate_on + self.__mean_l = 0.0 + self.__mean_r = 0.0 + self.__cross_entropy = 0.0 + self.__poisson_key = 0 # None TODO: work out how to pass this in + self.__poisson_pop_size = poisson_pop_size + self.__l = l + self.__w_fb = w_fb + self.__eta = eta + self.__window_size = window_size + self.__number_of_cues = number_of_cues + + self.__n_keys_in_target = poisson_pop_size * 4 def set_poisson_key(self, p_key): - self._poisson_key = p_key + self.__poisson_key = p_key - @overrides(AbstractNeuronModel.get_n_cpu_cycles) - def get_n_cpu_cycles(self, n_neurons): - # A bit of a guess - return 100 * n_neurons + # @overrides(AbstractNeuronModel.get_n_cpu_cycles) + # def get_n_cpu_cycles(self, n_neurons): + # # A bit of a guess + # return 100 * n_neurons - @overrides(AbstractNeuronModel.add_parameters) + @overrides(AbstractStandardNeuronComponent.add_parameters) def add_parameters(self, parameters): - parameters[V_REST] = self._v_rest - parameters[TAU_M] = self._tau_m - parameters[CM] = self._cm - parameters[I_OFFSET] = self._i_offset - parameters[V_RESET] = self._v_reset - parameters[TAU_REFRAC] = self._tau_refrac - parameters[L] = self._l - parameters[W_FB] = self._w_fb - parameters[WINDOW_SIZE] = self._window_size + parameters[V_REST] = self.__v_rest + parameters[TAU_M] = self.__tau_m + parameters[CM] = self.__cm + parameters[I_OFFSET] = self.__i_offset + parameters[V_RESET] = self.__v_reset + parameters[TAU_REFRAC] = self.__tau_refrac + parameters[TIMESTEP] = SpynnakerDataView.get_simulation_time_step_ms() + + parameters[L] = self.__l + parameters[W_FB] = self.__w_fb + parameters[WINDOW_SIZE] = self.__window_size + # These should probably have defaults earlier than this parameters[SEED1] = 10065 parameters[SEED2] = 232 parameters[SEED3] = 3634 parameters[SEED4] = 4877 # parameters[PROB_COMMAND] = self._prob_command - parameters[RATE_ON] = self._rate_on - parameters[RATE_OFF] = self._rate_off + parameters[RATE_ON] = self.__rate_on + parameters[RATE_OFF] = self.__rate_off - parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() - parameters[POISSON_POP_SIZE] = self._poisson_pop_size + parameters[TICKS_PER_SECOND] = 0.0 # set in get_valuers() + parameters[TIME_SINCE_LAST_SPIKE] = 0.0 + parameters[RATE_AT_LAST_SETTING] = 0.0 + parameters[POISSON_POP_SIZE] = self.__poisson_pop_size # parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold # parameters[TARGET_DATA] = self._target_data - - @overrides(AbstractNeuronModel.add_state_variables) + parameters[MEAN_L] = self.__mean_l + parameters[MEAN_R] = self.__mean_r + parameters[CROSS_ENTROPY] = self.__cross_entropy + parameters[POISSON_KEY] = self.__poisson_key # not sure this is needed here + print("in add_parameters, poisson key is ", self.__poisson_key) + parameters[POISSON_POP_SIZE] = self.__poisson_pop_size + parameters[ETA] = self.__eta + parameters[NUMBER_OF_CUES] = self.__number_of_cues + + @overrides(AbstractStandardNeuronComponent.add_state_variables) def add_state_variables(self, state_variables): - state_variables[V] = self._v_init - state_variables[COUNT_REFRAC] = 0 + state_variables[V] = self.__v_init + state_variables[REFRACT_TIMER] = 0 #learning params - state_variables[L] = self._l + state_variables[L] = self.__l # state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks # state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential # state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike @@ -224,101 +264,101 @@ def add_state_variables(self, state_variables): state_variables[Z_BAR+str(n)] = 0 # state_variables[EP_A+str(n)] = 0 # state_variables[E_BAR+str(n)] = 0 - state_variables[UPDATE_READY+str(n)] = self._window_size - - - @overrides(AbstractNeuronModel.get_units) - def get_units(self, variable): - return UNITS[variable] - - @overrides(AbstractNeuronModel.has_variable) - def has_variable(self, variable): - return variable in UNITS - - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) - def get_values(self, parameters, state_variables, vertex_slice, ts): - - # Add the rest of the data - values = [state_variables[V], - parameters[V_REST], - parameters[TAU_M] / parameters[CM], - parameters[TAU_M].apply_operation( - operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - parameters[I_OFFSET], state_variables[COUNT_REFRAC], - parameters[V_RESET], - parameters[TAU_REFRAC].apply_operation( - operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), - - state_variables[L], - parameters[W_FB], - parameters[WINDOW_SIZE] - ] - - # create synaptic state - init all state to zero - eprop_syn_init = [0, # delta w - 0, # z_bar_inp - 0,#, # z_bar - # 0, # el_a - # 0] # e_bar - self._window_size, #int(numpy.random.rand()*1024) # update_ready - ] - # extend to appropriate fan-in - values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) - - return values - - @overrides(AbstractNeuronModel.update_values) - def update_values(self, values, parameters, state_variables): - - # Read the data - (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, - _v_reset, _tau_refrac, - _l, _w_fb, window_size, delta_w, z_bar_old, z_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! - # todo check alignment on this - - # Copy the changed data only - state_variables[V] = _v - - state_variables[L] = _l + state_variables[UPDATE_READY+str(n)] = self.__window_size - for n in range(SYNAPSES_PER_NEURON): - state_variables[DELTA_W+str(n)] = delta_w[n] - state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] - state_variables[Z_BAR+str(n)] = z_bar[n] - # state_variables[EP_A+str(n)] = ep_a[n] - # state_variables[E_BAR+str(n)] = e_bar[n] - state_variables[UPDATE_READY] = update_ready[n] - - # Global params - @inject_items({"machine_time_step": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_global_values, - additional_arguments={'machine_time_step'}) - def get_global_values(self, machine_time_step): - vals = [ - 1, # seed 1 - 2, # seed 2 - 3, # seed 3 - 4, # seed 4 - MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second - 0.0, # set to 0, as will be set in first timestep of model anyway - 0.0, # set to 0, as will be set in first timestep of model anyway - self._rate_on, - self._rate_off, - self._mean_l, - self._mean_r, - self._cross_entropy, - self._poisson_key, - self._poisson_pop_size, - self._eta, - self._number_of_cues - ] - - return vals - @property - def prob_command(self): - return self._prob_command + # @overrides(AbstractNeuronModel.get_units) + # def get_units(self, variable): + # return UNITS[variable] + # + # @overrides(AbstractNeuronModel.has_variable) + # def has_variable(self, variable): + # return variable in UNITS + # + # @inject_items({"ts": "MachineTimeStep"}) + # @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + # def get_values(self, parameters, state_variables, vertex_slice, ts): + # + # # Add the rest of the data + # values = [state_variables[V], + # parameters[V_REST], + # parameters[TAU_M] / parameters[CM], + # parameters[TAU_M].apply_operation( + # operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), + # parameters[I_OFFSET], state_variables[COUNT_REFRAC], + # parameters[V_RESET], + # parameters[TAU_REFRAC].apply_operation( + # operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), + # + # state_variables[L], + # parameters[W_FB], + # parameters[WINDOW_SIZE] + # ] + # + # # create synaptic state - init all state to zero + # eprop_syn_init = [0, # delta w + # 0, # z_bar_inp + # 0,#, # z_bar + # # 0, # el_a + # # 0] # e_bar + # self._window_size, #int(numpy.random.rand()*1024) # update_ready + # ] + # # extend to appropriate fan-in + # values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) + # + # return values + # + # @overrides(AbstractNeuronModel.update_values) + # def update_values(self, values, parameters, state_variables): + # + # # Read the data + # (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, + # _v_reset, _tau_refrac, + # _l, _w_fb, window_size, delta_w, z_bar_old, z_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! + # # todo check alignment on this + # + # # Copy the changed data only + # state_variables[V] = _v + # + # state_variables[L] = _l + # + # for n in range(SYNAPSES_PER_NEURON): + # state_variables[DELTA_W+str(n)] = delta_w[n] + # state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] + # state_variables[Z_BAR+str(n)] = z_bar[n] + # # state_variables[EP_A+str(n)] = ep_a[n] + # # state_variables[E_BAR+str(n)] = e_bar[n] + # state_variables[UPDATE_READY] = update_ready[n] + # + # # Global params + # @inject_items({"machine_time_step": "MachineTimeStep"}) + # @overrides(AbstractNeuronModel.get_global_values, + # additional_arguments={'machine_time_step'}) + # def get_global_values(self, machine_time_step): + # vals = [ + # 1, # seed 1 + # 2, # seed 2 + # 3, # seed 3 + # 4, # seed 4 + # MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second + # 0.0, # set to 0, as will be set in first timestep of model anyway + # 0.0, # set to 0, as will be set in first timestep of model anyway + # self._rate_on, + # self._rate_off, + # self._mean_l, + # self._mean_r, + # self._cross_entropy, + # self._poisson_key, + # self._poisson_pop_size, + # self._eta, + # self._number_of_cues + # ] + # + # return vals + + # @property + # def prob_command(self): + # return self.__prob_command # @prob_command.setter # def prob_command(self, prob_command): @@ -326,91 +366,91 @@ def prob_command(self): @property def rate_on(self): - return self._rate_on + return self.__rate_on @rate_on.setter def rate_on(self, rate_on): - self._rate_on = rate_on + self.__rate_on = rate_on @property def rate_off(self): - return self._rate_off + return self.__rate_off @rate_on.setter def rate_on(self, rate_off): - self._rate_off = rate_off + self.__rate_off = rate_off @property def v_init(self): - return self._v + return self.__v_init @v_init.setter def v_init(self, v_init): - self._v = v_init + self.__v_init = v_init @property def v_rest(self): - return self._v_rest + return self.__v_rest @v_rest.setter def v_rest(self, v_rest): - self._v_rest = v_rest + self.__v_rest = v_rest @property def tau_m(self): - return self._tau_m + return self.__tau_m @tau_m.setter def tau_m(self, tau_m): - self._tau_m = tau_m + self.__tau_m = tau_m @property def cm(self): - return self._cm + return self.__cm @cm.setter def cm(self, cm): - self._cm = cm + self.__cm = cm @property def i_offset(self): - return self._i_offset + return self.__i_offset @i_offset.setter def i_offset(self, i_offset): - self._i_offset = i_offset + self.__i_offset = i_offset @property def v_reset(self): - return self._v_reset + return self.__v_reset @v_reset.setter def v_reset(self, v_reset): - self._v_reset = v_reset + self.__v_reset = v_reset @property def tau_refrac(self): - return self._tau_refrac + return self.__tau_refrac @tau_refrac.setter def tau_refrac(self, tau_refrac): - self._tau_refrac = tau_refrac + self.__tau_refrac = tau_refrac @property def w_fb(self): - return self._w_fb + return self.__w_fb @w_fb.setter def w_fb(self, new_value): - self._w_fb = new_value + self.__w_fb = new_value @property def window_size(self): - return self._window_size + return self.__window_size @window_size.setter def window_size(self, new_value): - self._window_size = new_value + self.__window_size = new_value # @property # def mean_isi_ticks(self): @@ -426,4 +466,4 @@ def window_size(self, new_value): # # @mean_isi_ticks.setter # def time_to_spike_ticks(self, new_time_to_spike_ticks): - # self._time_to_spike_ticks = new_time_to_spike_ticks \ No newline at end of file + # self._time_to_spike_ticks = new_time_to_spike_ticks diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 543c381b778..0db78bff775 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -95,7 +95,7 @@ def __init__( (DataType.S1615, TIMESTEP), # timestep # Learning signal (DataType.S1615, L), # L - (DataType.S1615, W_FB), # w_fb + (DataType.S1615, W_FB) # w_fb ] # former global parameters diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index e6406c5d149..76e3a694cee 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -19,6 +19,10 @@ from spinn_front_end_common.abstract_models import ( AbstractGeneratesDataSpecification, AbstractRewritesDataSpecification) from spinn_front_end_common.interface.provenance import ProvenanceWriter +from spynnaker.pyNN.data import SpynnakerDataView +from spynnaker.pyNN.models.neuron.neuron_models import ( + NeuronModelLeftRightReadout) +from spynnaker.pyNN.utilities import constants from .population_machine_common import CommonRegions, PopulationMachineCommon from .population_machine_neurons import ( NeuronRegions, PopulationMachineNeurons, NeuronProvenance) @@ -293,6 +297,15 @@ def generate_data_specification(self, spec, placement): self.vertex_slice)) self._write_common_data_spec(spec, rec_regions) + # Set the poisson key for eprop left-right + routing_info = SpynnakerDataView.get_routing_infos() + if isinstance(self._app_vertex._pynn_model._model.neuron_model, + NeuronModelLeftRightReadout): + poisson_key = routing_info.get_first_key_from_pre_vertex( + placement.vertex, constants.LIVE_POISSON_CONTROL_PARTITION_ID) + self._app_vertex._pynn_model._model.neuron_model.set_poisson_key( + poisson_key) + self._write_neuron_data_spec(spec, self.__ring_buffer_shifts) self._write_synapse_data_spec( @@ -400,4 +413,17 @@ def set_do_synapse_regeneration(self): @overrides(PopulationMachineCommon.get_n_keys_for_partition) def get_n_keys_for_partition(self, partition_id): n_colours = 2 ** self._app_vertex.n_colour_bits - return self._vertex_slice.n_atoms * n_colours + if partition_id == constants.LIVE_POISSON_CONTROL_PARTITION_ID: + n_keys = 0 + partitions = ( + SpynnakerDataView.get_outgoing_edge_partitions_starting_at_vertex( + self._app_vertex)) + for partition in partitions: + if partition.identifier == constants.LIVE_POISSON_CONTROL_PARTITION_ID: + for edge in partition.edges: + n_keys += edge.post_vertex.n_atoms + # n_keys += slice.n_atoms + partition_id, n_keys, n_colours) + return n_keys * n_colours + else: + return self._vertex_slice.n_atoms * n_colours From af35d2c0b1c06bcd1bd7fced911371f654df8e0e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 4 May 2023 14:25:55 +0100 Subject: [PATCH 093/123] Add comment and fix broken loop --- spynnaker/pyNN/models/neuron/population_machine_vertex.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 76e3a694cee..665b703f8c2 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -415,6 +415,7 @@ def get_n_keys_for_partition(self, partition_id): n_colours = 2 ** self._app_vertex.n_colour_bits if partition_id == constants.LIVE_POISSON_CONTROL_PARTITION_ID: n_keys = 0 + # Seems like overkill, there should be a simpler way to do this partitions = ( SpynnakerDataView.get_outgoing_edge_partitions_starting_at_vertex( self._app_vertex)) @@ -422,8 +423,6 @@ def get_n_keys_for_partition(self, partition_id): if partition.identifier == constants.LIVE_POISSON_CONTROL_PARTITION_ID: for edge in partition.edges: n_keys += edge.post_vertex.n_atoms - # n_keys += slice.n_atoms - partition_id, n_keys, n_colours) return n_keys * n_colours else: return self._vertex_slice.n_atoms * n_colours From afe47db23be81295352eeb89f23c80e288613050 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 5 May 2023 14:47:24 +0100 Subject: [PATCH 094/123] Tidying up for earlier compiler versions to fit in ITCM --- .../neuron_impl_eprop_adaptive.h | 17 ++++--- .../neuron_impl_left_right_readout.h | 47 +++++++++---------- .../models/neuron_model_eprop_adaptive_impl.h | 14 +++--- .../neuron_model_left_right_readout_impl.h | 10 ++-- .../weight_dependence/weight_eprop_reg_impl.c | 2 +- .../synapse_type_eprop_adaptive.h | 11 ++--- neural_modelling/src/neuron/synapses.c | 4 -- 7 files changed, 50 insertions(+), 55 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index cd3b9ab800e..202f369c598 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -304,11 +304,11 @@ static void neuron_impl_load_neuron_parameters( // } // This can probably be commented out at some point as well - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - log_debug("Neuron id %u", n); - neuron_model_print_state_variables(&neuron_array[n]); - } +// for (index_t n = 0; n < n_neurons; n++) { +// neuron_model_print_parameters(&neuron_array[n]); +// log_debug("Neuron id %u", n); +// neuron_model_print_state_variables(&neuron_array[n]); +// } #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); @@ -503,8 +503,10 @@ static void neuron_impl_do_timestep_update( // - global_parameters->core_target_rate; // REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / neuron_impl_neurons_in_partition); // REAL reg_learning_signal = (global_parameters->core_pop_rate / neuron_impl_neurons_in_partition) - global_parameters->core_target_rate; - REAL reg_learning_signal = ( - neuron->core_pop_rate / neuron_impl_neurons_in_partition) - neuron->core_target_rate; +// REAL reg_learning_signal = ( +// neuron->core_pop_rate / neuron_impl_neurons_in_partition) - neuron->core_target_rate; + REAL reg_learning_signal = kdivui( + neuron->core_pop_rate, neuron_impl_neurons_in_partition) - neuron->core_target_rate; // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, reg_learning_signal); @@ -515,6 +517,7 @@ static void neuron_impl_do_timestep_update( // log_info("Check: voltage %k neuron->B %k time %u", voltage, neuron->B, time); + // Can't replace this (yet) with kdivk as that only works for positive accums state_t nu = (voltage - neuron->B)/neuron->B; if (nu > ZERO){ diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 40229872db3..4380290ed61 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -78,17 +78,15 @@ static uint n_steps_per_timestep; extern bool use_key; // TODO: are these parameters needed? -static REAL next_spike_time = 0; //extern uint32_t time; extern uint32_t *neuron_keys; extern REAL learning_signal; -static uint32_t target_ind = 0; // recording params (?) -uint32_t is_it_right = 0; +//uint32_t is_it_right = 0; //uint32_t choice = 0; -// Left right parameters +// Left right state parameters typedef enum { STATE_CUE, @@ -309,7 +307,7 @@ static void neuron_impl_do_timestep_update( // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; - bool spike = false; +// bool spike = false; // current_time = time & 0x3ff; // repeats on a cycle of 1024 entries in array @@ -428,8 +426,7 @@ static void neuron_impl_do_timestep_update( softmax_0 = 0k; softmax_1 = 0k; if (use_key) { - // I don't understand, this just sends zero - // Oh, maybe it's a "completed" signal + // This sends a "completed" signal send_spike_mc_payload( neuron_keys[neuron_index], bitsk(neuron->cross_entropy)); // while (!spin1_send_mc_packet( @@ -455,8 +452,8 @@ static void neuron_impl_do_timestep_update( if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ // pick new value and broadcast - REAL random_value = (REAL)(mars_kiss64_seed( - neuron->kiss_seed) / (REAL)0xffffffff); // 0-1 + REAL random_value = kdivui( + (REAL)(mars_kiss64_seed(neuron->kiss_seed)), UINT32_MAX); // 0-1 if (random_value < 0.5k){ current_cue_direction = 0; } @@ -556,8 +553,11 @@ static void neuron_impl_do_timestep_update( } else{ // accum denominator = 1.k / (exp_1 + exp_0); - softmax_0 = exp_0 / (exp_1 + exp_0); - softmax_1 = exp_1 / (exp_1 + exp_0); +// softmax_0 = exp_0 / (exp_1 + exp_0); +// softmax_1 = exp_1 / (exp_1 + exp_0); + // These divides are okay in kdivk because exp is always positive + softmax_0 = kdivk(exp_0, (exp_1 + exp_0)); + softmax_1 = kdivk(exp_1, (exp_1 + exp_0)); } // io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); // What to do if log(0)? @@ -568,7 +568,7 @@ static void neuron_impl_do_timestep_update( glob_neuron->cross_entropy = -logk(softmax_1); } learning_signal = softmax_0; - is_it_right = 1; +// is_it_right = 1; } else{ for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { @@ -577,7 +577,7 @@ static void neuron_impl_do_timestep_update( glob_neuron->cross_entropy = -logk(softmax_0); } learning_signal = softmax_0 - 1.k; - is_it_right = 0; +// is_it_right = 0; } // if (learning_signal > 0.5){ // learning_signal = 1k; @@ -665,15 +665,15 @@ static void neuron_impl_do_timestep_update( // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } - // If spike occurs, communicate to relevant parts of model - if (spike) { - // Call relevant model-based functions - // Tell the neuron model - // neuron_model_has_spiked(neuron); - - // Tell the additional input - additional_input_has_spiked(additional_input); - } + // This model doesn't spike so this can be commented out +// if (spike) { +// // Call relevant model-based functions +// // Tell the neuron model +// // neuron_model_has_spiked(neuron); +// +// // Tell the additional input +// additional_input_has_spiked(additional_input); +// } // Shape the existing input according to the included rule synapse_types_shape_input(synapse_type); @@ -681,9 +681,6 @@ static void neuron_impl_do_timestep_update( #if LOG_LEVEL >= LOG_DEBUG neuron_model_print_state_variables(neuron); #endif // LOG_LEVEL >= LOG_DEBUG - - // Return the boolean to the model timestep update - // return spike; } // log_info("end of do_timestep_update time %u", time); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 8e143ec4fe2..9e06c08c7f1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -243,9 +243,9 @@ static inline void neuron_model_initialise( state->V_reset = params->V_reset; state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); - log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract_ms %k T_refract %d", - state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, - state->refract_timer, state->V_reset, params->T_refract_ms, state->T_refract); +// log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract_ms %k T_refract %d", +// state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, +// state->refract_timer, state->V_reset, params->T_refract_ms, state->T_refract); // for everything else just copy across for now state->z = params->z; @@ -263,16 +263,16 @@ static inline void neuron_model_initialise( state->window_size = params->window_size; state->number_of_cues = params->number_of_cues; - log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", - state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); +// log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", +// state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); state->core_pop_rate = params->pop_rate; state->core_target_rate = params->target_rate; state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); state->eta = params->eta; - log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", - state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); +// log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", +// state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 2852784498b..d3257b66990 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -243,11 +243,11 @@ static inline void neuron_model_initialise( state->number_of_cues = params->number_of_cues; // local_eta = params->eta; - log_info("Check p_key %u p_pop_size %u", params->p_key, params->p_pop_size); - log_info("Check number_of_cues %u eta %k", params->number_of_cues, params->eta); - log_info("mean_0 %k mean_1 %k rate_on %k rate_off %k readout_V_0 %k readout_V_1 %k", - params->mean_0, params->mean_1, params->rate_on, params->rate_off, - params->readout_V_0, params->readout_V_1); +// log_info("Check p_key %u p_pop_size %u", params->p_key, params->p_pop_size); +// log_info("Check number_of_cues %u eta %k", params->number_of_cues, params->eta); +// log_info("mean_0 %k mean_1 %k rate_on %k rate_off %k readout_V_0 %k readout_V_1 %k", +// params->mean_0, params->mean_1, params->rate_on, params->rate_off, +// params->readout_V_0, params->readout_V_1); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c index 9887bec5a64..a74165befd5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c @@ -68,7 +68,7 @@ address_t weight_initialise( // Copy weight shift weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; - log_info("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k reg_rate:%k", + log_debug("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k reg_rate:%k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, dtcm_copy[s].reg_rate); diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h index b955b37162c..6c4bda48f3c 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h @@ -205,8 +205,7 @@ static inline const char *synapse_types_get_type_char( //! \return Nothing static inline void synapse_types_print_input( synapse_types_t *parameter) { - io_printf( - IO_BUF, "%12.6k + %12.6k - %12.6k - %12.6k", + log_debug("%12.6k + %12.6k - %12.6k - %12.6k", parameter->exc, parameter->exc2, parameter->inh, parameter->inh2); } @@ -215,10 +214,10 @@ static inline void synapse_types_print_input( //! \param[in] parameter: the pointer to the parameters to print static inline void synapse_types_print_parameters( synapse_types_t *parameter) { - log_info("exc_init = %11.4k\n", parameter->exc); - log_info("exc2_init = %11.4k\n", parameter->exc2); - log_info("inh_init = %11.4k\n", parameter->inh); - log_info("inh2_init = %11.4k\n", parameter->inh2); + log_debug("exc_init = %11.4k\n", parameter->exc); + log_debug("exc2_init = %11.4k\n", parameter->exc2); + log_debug("inh_init = %11.4k\n", parameter->inh); + log_debug("inh2_init = %11.4k\n", parameter->inh2); } #endif // _SYNAPSE_TYPES_ERBP_IMPL_H_ diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 34c54acf185..ed929227357 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -239,10 +239,6 @@ static inline bool process_fixed_synapses( // Add weight to current ring buffer value int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // switch to saturated arithmetic to avoid complicated saturation check, will it check saturation at both ends? - int32_t test = -22; - log_info("Check weight: %d accumulation %d test %d buffer %d time %u", - weight, accumulation, test, ring_buffers[ring_buffer_index], time); - // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) // **NOTE** 0x10000 can be expressed as an ARM literal, // but 0xFFFF cannot. Therefore, we use (0x10000 - 1) From 3e6da015520f1425d4d6bfc079958cf27d091c4c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 5 May 2023 14:59:16 +0100 Subject: [PATCH 095/123] Switch this calculation back for now as kdivui doesn't seem to work --- .../neuron/implementations/neuron_impl_left_right_readout.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 4380290ed61..7e73cc46ccb 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -452,8 +452,10 @@ static void neuron_impl_do_timestep_update( if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ // pick new value and broadcast - REAL random_value = kdivui( - (REAL)(mars_kiss64_seed(neuron->kiss_seed)), UINT32_MAX); // 0-1 +// REAL random_value = kdivui( +// (REAL)(mars_kiss64_seed(neuron->kiss_seed)), UINT32_MAX); // 0-1 + REAL random_value = ( + (REAL)mars_kiss64_seed(neuron->kiss_seed) / (REAL)UINT32_MAX); // 0-1 if (random_value < 0.5k){ current_cue_direction = 0; } From 07e09f55954f755947b1f7ac2caf1f860e02b81e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 5 May 2023 17:03:48 +0100 Subject: [PATCH 096/123] More comments on dividing negative values --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 202f369c598..831dc49c827 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -519,6 +519,9 @@ static void neuron_impl_do_timestep_update( // Can't replace this (yet) with kdivk as that only works for positive accums state_t nu = (voltage - neuron->B)/neuron->B; + // The below works but doesn't save ITCM +// state_t nu = kdivi(bitsk(voltage-neuron->B), bitsk(neuron->B)); + if (nu > ZERO){ neuron->z = 1.0k * neuron->A; // implements refractory period From 3f14ee2ad4cf602a4f6127a7cbb000f95e2ced28 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 9 May 2023 15:14:27 +0100 Subject: [PATCH 097/123] Tidying up (working?) C code --- .../neuron_impl_eprop_adaptive.h | 283 +------------ .../neuron_impl_left_right_readout.h | 229 ++--------- .../neuron_impl_sinusoid_readout.h | 109 +---- .../models/neuron_model_eprop_adaptive_impl.c | 380 ------------------ .../models/neuron_model_eprop_adaptive_impl.h | 293 ++------------ .../neuron_model_left_right_readout_impl.c | 214 ---------- .../neuron_model_left_right_readout_impl.h | 224 +---------- .../neuron_model_sinusoid_readout_impl.c | 178 -------- .../neuron_model_sinusoid_readout_impl.h | 125 +----- 9 files changed, 135 insertions(+), 1900 deletions(-) delete mode 100644 neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c delete mode 100644 neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c delete mode 100644 neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 831dc49c827..1e4e9cb302d 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -10,7 +10,7 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. -x * + * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ @@ -68,9 +68,7 @@ enum bitfield_recording_indices { #include -//extern uint32_t time; extern REAL learning_signal; -//uint32_t neurons_in_pop; uint32_t neuron_impl_neurons_in_partition; //! Array of neuron states @@ -85,28 +83,13 @@ static additional_input_t *additional_input_array; //! Threshold states array static threshold_type_t *threshold_type_array; -//! Global parameters for the neurons -//global_neuron_params_t global_parameters; - //! The synapse shaping parameters static synapse_types_t *synapse_types_array; -//! The number of steps to run per timestep +//! The number of steps to run per timestep (not set in this impl yet) static uint n_steps_per_timestep; -// Bool to regularise on the first run -static bool initial_regularise = true; - static bool neuron_impl_initialise(uint32_t n_neurons) { - // allocate DTCM for the global parameter details -// if (sizeof(global_neuron_params_t)) { -// global_parameters = spin1_malloc(sizeof(global_neuron_params_t)); -// if (global_parameters == NULL) { -// log_error("Unable to allocate global neuron parameters" -// "- Out of DTCM"); -// return false; -// } -// } // Allocate DTCM for neuron array if (sizeof(neuron_t)) { @@ -241,75 +224,6 @@ static void neuron_impl_load_neuron_parameters( spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); } - // - -// if (sizeof(global_neuron_params_t)) { -// log_debug("writing neuron global parameters"); -// spin1_memcpy(global_parameters, &address[next], -// sizeof(global_neuron_params_t)); -// next += n_words_needed(sizeof(global_neuron_params_t)); -// } - -// if (sizeof(neuron_t)) { -// log_debug("reading neuron local parameters"); -// spin1_memcpy(neuron_array, &address[next], -// n_neurons * sizeof(neuron_t)); -// next += n_words_needed(n_neurons * sizeof(neuron_t)); -// } -// -// if (sizeof(input_type_t)) { -// log_debug("reading input type parameters"); -// spin1_memcpy(input_type_array, &address[next], -// n_neurons * sizeof(input_type_t)); -// next += n_words_needed(n_neurons * sizeof(input_type_t)); -// } -// -// if (sizeof(threshold_type_t)) { -// log_debug("reading threshold type parameters"); -// spin1_memcpy(threshold_type_array, &address[next], -// n_neurons * sizeof(threshold_type_t)); -// next += n_words_needed(n_neurons * sizeof(threshold_type_t)); -// } -// -// if (sizeof(synapse_param_t)) { -// log_debug("reading synapse parameters"); -// spin1_memcpy(neuron_synapse_shaping_params, &address[next], -// n_neurons * sizeof(synapse_param_t)); -// next += n_words_needed(n_neurons * sizeof(synapse_param_t)); -// } -// -// if (sizeof(additional_input_t)) { -// log_debug("reading additional input type parameters"); -// spin1_memcpy(additional_input_array, &address[next], -// n_neurons * sizeof(additional_input_t)); -// next += n_words_needed(n_neurons * sizeof(additional_input_t)); -// } - - // Global parameters are no longer a thing, so work out where these need to go -// neuron_model_set_global_neuron_params(global_parameters); - - // ********************************************** - // ******** for eprop regularisation ************ - // ********************************************** - - // I don't think the below is necessary any more assuming these get set correctly - // in the neuron model (these are now set in neuron_model_initialise() ) -// if (initial_regularise) { -// global_parameters->core_target_rate = global_parameters->core_target_rate; -//// * n_neurons; // scales target rate depending on number of neurons -// global_parameters->core_pop_rate = 0.k;//global_parameters->core_pop_rate; -//// * n_neurons; // scale initial value, too -// -// initial_regularise = false; -// } - - // This can probably be commented out at some point as well -// for (index_t n = 0; n < n_neurons; n++) { -// neuron_model_print_parameters(&neuron_array[n]); -// log_debug("Neuron id %u", n); -// neuron_model_print_state_variables(&neuron_array[n]); -// } - #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { @@ -320,22 +234,11 @@ static void neuron_impl_load_neuron_parameters( #endif // LOG_LEVEL >= LOG_DEBUG } -//static bool neuron_impl_do_timestep_update(index_t neuron_index, -// input_t external_bias, state_t *recorded_variable_values) { - static void neuron_impl_do_timestep_update( uint32_t timer_count, uint32_t time, uint32_t n_neurons) { -// if (neuron_index == 0) { -// // Decay global rate trace (only done once per core per timestep) -// global_parameters->core_pop_rate = global_parameters->core_pop_rate -// * global_parameters->rate_exp_TC; -// } - for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { -// log_info("timestep_update neuron_index %u time %u ", neuron_index, time); - // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; @@ -346,8 +249,8 @@ static void neuron_impl_do_timestep_update( input_type_t *input_type = &input_type_array[neuron_index]; // Get threshold and additional input parameters for this neuron - // threshold_type_pointer_t threshold_type = - // &threshold_type_array[neuron_index]; +// threshold_type_pointer_t threshold_type = +// &threshold_type_array[neuron_index]; additional_input_t *additional_input = &additional_input_array[neuron_index]; synapse_types_t *synapse_type = @@ -360,8 +263,6 @@ static void neuron_impl_do_timestep_update( state_t B_t = neuron->B; // cache last timestep threshold level state_t z_t = neuron->z; - // recorded_variable_values[V_RECORDING_INDEX] = voltage; - // Get the exc and inh values from the synapses input_t exc_values[NUM_EXCITATORY_RECEPTORS]; input_t* exc_syn_values = synapse_types_get_excitatory_input( @@ -376,197 +277,66 @@ static void neuron_impl_do_timestep_update( input_t* inh_input_values = input_type_get_input_value( inh_syn_values, input_type, NUM_INHIBITORY_RECEPTORS); - // // Sum g_syn contributions from all receptors for recording - // REAL total_exc = 0; - // REAL total_inh = 0; - // - // for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { - // total_exc += exc_input_values[i]; - // } - // for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { - // total_inh += inh_input_values[i]; - // } - - // // Call functions to get the input values to be recorded - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - // global_parameters->core_pop_rate; - // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( exc_input_values, input_type, voltage); input_type_convert_inhibitory_input_to_current( inh_input_values, input_type, voltage); + // Get current offset REAL current_offset = current_source_get_offset(time, neuron_index); + // Get any additional bias input_t external_bias = additional_input_get_input_value_as_current( additional_input, voltage); // determine if a spike should occur threshold_type_update_threshold(neuron->z, neuron); - // Record B - // if (neuron_index == 0){ - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = global_parameters->core_pop_rate / neurons_in_pop; // divide by neurons on core to get average per neuron contribution to core pop rate - //// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; // divide by neurons on core to get average per neuron contribution to core pop rate - // } - // else{ - // - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - // B_t; // neuron->B; - //// neuron->L; - // // neuron->syn_state[0].z_bar; - // // global_parameters->core_target_rate; - // // neuron->syn_state[0].e_bar; - // // neuron->syn_state[neuron_index].el_a; - // // exc_input_values[0]; // record input input (signed) - // // learning_signal * neuron->w_fb; - // } - // if(neuron_index % 2 == 0){ - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; - //// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; - //// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; - // } - //// else if (neuron_index == 0){ - //// } - // else{ - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; - //// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; - //// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; - // } - - // Simplified what was below this to choose which delta_w to record for different indices + // Record different synapse delta_w for different indices if ((neuron_index == 0) || (neuron_index == 1) || (neuron_index == 2)) { -// log_info("z_bar_inp %k (10 + neuron_index) %u L %k time %u", -// neuron->syn_state[10+neuron_index].z_bar_inp, 10+neuron_index, neuron->L, time); neuron_recording_record_accum( GSYN_INH_RECORDING_INDEX, neuron_index, neuron->syn_state[10+neuron_index].delta_w); } else { -// log_info("z_bar_inp %k neuron_index %u L %k time %u", -// neuron->syn_state[0+neuron_index].z_bar_inp, neuron_index, neuron->L, time); neuron_recording_record_accum( GSYN_INH_RECORDING_INDEX, neuron_index, neuron->syn_state[0+neuron_index].delta_w); } -// if(neuron_index == 0){ -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].el_a; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; -// // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; -// } -// // else if (neuron_index == 0){ -// // } -// else if(neuron_index == 1){ -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; -// // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; -// } -// else if(neuron_index == 2){ -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[20+neuron_index].el_a; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].delta_w; -// // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].e_bar; -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_value; -// } -// else{ -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[10+neuron_index].el_a; -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].delta_w; -// // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0+neuron_index].e_bar; -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = *exc_input_values; -// } -// // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[neuron_index].el_a; -// // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->B; - -// log_info("Updating neuron parameters B_t = %k ", B_t); - // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, external_bias, current_offset, neuron, B_t); -// REAL accum_time = (accum)(time%13000) * 0.001; -// if (!accum_time){ -// accum_time += 1.k; -// } - // REAL reg_learning_signal = (global_parameters->core_pop_rate - //// / ((accum)(time%1300) - //// / (1.225k - // / (accum_time - // * (accum)neuron_impl_neurons_in_partition)) - // - global_parameters->core_target_rate; - // REAL reg_learning_signal = global_parameters->core_target_rate - (global_parameters->core_pop_rate / neuron_impl_neurons_in_partition); - // REAL reg_learning_signal = (global_parameters->core_pop_rate / neuron_impl_neurons_in_partition) - global_parameters->core_target_rate; -// REAL reg_learning_signal = ( -// neuron->core_pop_rate / neuron_impl_neurons_in_partition) - neuron->core_target_rate; + // Calculate and record regularised learning signal REAL reg_learning_signal = kdivui( neuron->core_pop_rate, neuron_impl_neurons_in_partition) - neuron->core_target_rate; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = reg_learning_signal;//global_parameters->core_pop_rate; neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, reg_learning_signal); - // Also update Z (including using refractory period information) - // TODO: there's quite a few divides lurking in this code, it may - // be worth looking to see if any of them can be replaced - -// log_info("Check: voltage %k neuron->B %k time %u", voltage, neuron->B, time); - // Can't replace this (yet) with kdivk as that only works for positive accums state_t nu = (voltage - neuron->B)/neuron->B; // The below works but doesn't save ITCM // state_t nu = kdivi(bitsk(voltage-neuron->B), bitsk(neuron->B)); - + // Also update Z (including using refractory period information) if (nu > ZERO){ neuron->z = 1.0k * neuron->A; // implements refractory period } - bool spike = z_t; -// log_info("time %u neuron_index %u z_t %u spike %u z %k nu %k", -// time, neuron_index, z_t, spike, neuron->z, nu); - - - // ********************************************************* // Record updated state - // Record V (just as cheap to set then to gate later) -// recorded_variable_values[V_RECORDING_INDEX] = voltage; // result; neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, voltage); - - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - // neuron->syn_state[0].delta_w; - // neuron->syn_state[0].z_bar; - // exc_input_values[0]; // record input input (signed) - // z_t; - // global_parameters->core_pop_rate; - // neuron->B; - // neuron->syn_state[0].z_bar; - - // // Record B - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - //// B_t; // neuron->B; - //// global_parameters->core_target_rate; - //// neuron->syn_state[0].e_bar; - // neuron->syn_state[0].el_a; - // total_inh; // total synaptic input from input layer - // ********************************************************* - - // If spike occurs, communicate to relevant parts of model if (spike) { - // io_printf(IO_BUF, "neuron %u spiked with beta = %k, B_t = %k\n", neuron_index, neuron->beta, neuron->B); // Call relevant model-based functions // Tell the neuron model neuron_model_has_spiked(neuron); - // io_printf(IO_BUF, "neuron %u thresholded beta = %k, B_t = %k\n", neuron_index, neuron->beta, neuron->B); // Tell the additional input additional_input_has_spiked(additional_input); @@ -594,8 +364,6 @@ static void neuron_impl_do_timestep_update( neuron_model_print_state_variables(neuron); #endif // LOG_LEVEL >= LOG_DEBUG -// // Return the boolean to the model timestep update -// return spike; } } @@ -604,26 +372,12 @@ static void neuron_impl_do_timestep_update( static void neuron_impl_store_neuron_parameters( address_t address, uint32_t next, uint32_t n_neurons) { log_debug("writing parameters"); - //if (global_parameters == NULL) { - // log_error("global parameter storage not allocated"); - // rt_error(RTE_SWERR); - // return; - //} // Skip steps per timestep next += 1; -// if (sizeof(global_neuron_params_t)) { -// log_debug("writing neuron global parameters"); -// spin1_memcpy(&address[next], global_parameters, -// sizeof(global_neuron_params_t)); -// next += n_words_needed(sizeof(global_neuron_params_t)); -// } - if (sizeof(neuron_t)) { log_debug("writing neuron local parameters"); -// spin1_memcpy(&address[next], neuron_array, -// n_neurons * sizeof(neuron_t)); neuron_params_t *params = (neuron_params_t *) &address[next]; for (uint32_t i = 0; i < n_neurons; i++) { neuron_model_save_state(&neuron_array[i], ¶ms[i]); @@ -631,15 +385,6 @@ static void neuron_impl_store_neuron_parameters( next += n_words_needed(n_neurons * sizeof(neuron_params_t)); } - // Not completely sure this next loop for printing is necessary - log_debug("****** STORING ******"); - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - log_debug("Neuron id %u", n); - neuron_model_print_state_variables(&neuron_array[n]); - } - log_debug("****** STORING COMPLETE ******"); - if (sizeof(input_type_t)) { log_debug("writing input type parameters"); input_type_params_t *params = (input_type_params_t *) &address[next]; @@ -676,6 +421,15 @@ static void neuron_impl_store_neuron_parameters( next += n_words_needed(n_neurons * sizeof(additional_input_params_t)); } + // Not completely sure this next loop for printing is necessary + log_debug("****** STORING ******"); + for (index_t n = 0; n < n_neurons; n++) { + neuron_model_print_parameters(&neuron_array[n]); + log_debug("Neuron id %u", n); + neuron_model_print_state_variables(&neuron_array[n]); + } + log_debug("****** STORING COMPLETE ******"); + log_debug("neuron 0 'global' parameters, core_target_rate, core_pop_rate %k %k", &neuron_array[0].core_target_rate, &neuron_array[0].core_pop_rate); } @@ -694,7 +448,6 @@ void neuron_impl_print_inputs(uint32_t n_neurons) { if (!empty) { log_debug("-------------------------------------\n"); - for (index_t i = 0; i < n_neurons; i++) { synapse_types_t *params = &synapse_types_array[i]; input_t exc_values[NUM_EXCITATORY_RECEPTORS]; diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 7e73cc46ccb..56d0bfa51bd 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -65,9 +65,6 @@ static additional_input_t *additional_input_array; //! Threshold states array static threshold_type_t *threshold_type_array; -//! Global parameters for the neurons -//static global_neuron_params_pointer_t global_parameters; - // The synapse shaping parameters static synapse_types_t *synapse_types_array; @@ -77,12 +74,10 @@ static uint n_steps_per_timestep; //! Whether key is set, from neuron.c extern bool use_key; -// TODO: are these parameters needed? -//extern uint32_t time; extern uint32_t *neuron_keys; extern REAL learning_signal; -// recording params (?) +// recording params (TODO: check these aren't needed?) //uint32_t is_it_right = 0; //uint32_t choice = 0; @@ -94,6 +89,7 @@ typedef enum STATE_PROMPT, } left_right_state_t; +// Left right parameters left_right_state_t current_state = 0; uint32_t current_time = 0; uint32_t cue_number = 0; @@ -103,27 +99,14 @@ uint32_t wait_between_cues = 50; // ms uint32_t duration_of_cue = 100; // ms uint32_t wait_before_result = 1000; // ms but should be a range between 500-1500 uint32_t prompt_duration = 150; //ms -//uint32_t ticks_for_mean = 0; bool start_prompt = false; accum softmax_0 = 0k; accum softmax_1 = 0k; -//REAL payload; bool completed_broadcast = true; static bool neuron_impl_initialise(uint32_t n_neurons) { - // allocate DTCM for the global parameter details -// if (sizeof(global_neuron_params_t) > 0) { -// global_parameters = (global_neuron_params_t *) spin1_malloc( -// sizeof(global_neuron_params_t)); -// if (global_parameters == NULL) { -// log_error("Unable to allocate global neuron parameters" -// "- Out of DTCM"); -// return false; -// } -// } - // Allocate DTCM for neuron array if (sizeof(neuron_t) != 0) { neuron_array = spin1_malloc(n_neurons * sizeof(neuron_t)); @@ -177,10 +160,6 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { // Seed the random input validate_mars_kiss64_seed(neuron_array->kiss_seed); - // Initialise pointers to Neuron parameters in STDP code -// synapse_dynamics_set_neuron_array(neuron_array); -// log_info("set pointer to neuron array in stdp code"); - return true; } @@ -261,29 +240,6 @@ static void neuron_impl_load_neuron_parameters( if (save_initial_state) { spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); } -// io_printf(IO_BUF, "\nPrinting global params\n"); -// io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); -// io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); -// io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); -// io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); -// io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); -//// io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); -// io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); -// io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); -// io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); -// io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); -// io_printf(IO_BUF, "poisson key: %u \n\n", global_parameters->p_key); -// io_printf(IO_BUF, "poisson pop size: %u \n\n", global_parameters->p_pop_size); - - - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - } - -// io_printf(IO_BUF, "size of global params: %u", -// sizeof(global_neuron_params_t)); - - #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); @@ -295,26 +251,15 @@ static void neuron_impl_load_neuron_parameters( #endif // LOG_LEVEL >= LOG_DEBUG } - - - static void neuron_impl_do_timestep_update( uint32_t timer_count, uint32_t time, uint32_t n_neurons) { for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { -// log_info("neuron_index %u time %u n_neurons %u", neuron_index, time, n_neurons); - // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; // bool spike = false; - // current_time = time & 0x3ff; // repeats on a cycle of 1024 entries in array - - // io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); - // io_printf(IO_BUF, "Target: %k\n\n", - // global_parameters->target_V[target_ind]); - // Get the input_type parameters and voltage for this neuron input_type_t *input_type = &input_type_array[neuron_index]; @@ -343,21 +288,6 @@ static void neuron_impl_do_timestep_update( input_t* inh_input_values = input_type_get_input_value( inh_syn_values, input_type, NUM_INHIBITORY_RECEPTORS); - // Sum g_syn contributions from all receptors for recording - // REAL total_exc = 0; - // REAL total_inh = 0; - // - // for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ - // total_exc += exc_input_values[i]; - // } - // for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ - // total_inh += inh_input_values[i]; - // } - - // Call functions to get the input values to be recorded - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( exc_input_values, input_type, voltage); @@ -370,7 +300,6 @@ static void neuron_impl_do_timestep_update( additional_input, voltage); if (neuron_index == 0){ - // io_printf(IO_BUF, "n0 - "); // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, @@ -382,45 +311,29 @@ static void neuron_impl_do_timestep_update( neuron_t *glob_neuron = &neuron_array[glob_n]; glob_neuron->readout_V_0 = result; } -// global_parameters->readout_V_0 = result; - } else if (neuron_index == 1){ - // io_printf(IO_BUF, "n1 - "); // update neuron parameters - // learning_signal *= -1.k; state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, external_bias, current_offset, neuron, -50k); - // learning_signal *= -1.k; + // Finally, set global membrane potential to updated value for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { // Get the neuron itself neuron_t *glob_neuron = &neuron_array[glob_n]; glob_neuron->readout_V_1 = result; } -// global_parameters->readout_V_1 = result; } - // if (neuron_index == 0){ - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_0; - // } - // else if (neuron_index == 1){ - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = global_parameters->readout_V_1; - // } - // io_printf(IO_BUF, "state = %u - %u\n", current_state, time); + if (cue_number == 0 && completed_broadcast){ // reset start of new test - // io_printf(IO_BUF, "time entering reset %u\n", time); - // io_printf(IO_BUF, "Resetting\n"); completed_broadcast = false; current_time = time; current_state = STATE_CUE; accumulative_direction = 0; // error params -// global_parameters->cross_entropy = 0.k; neuron->cross_entropy = 0.k; learning_signal = 0.k; -// global_parameters->mean_0 = 0.k; -// global_parameters->mean_1 = 0.k; neuron->mean_0 = 0.k; neuron->mean_1 = 0.k; softmax_0 = 0k; @@ -436,18 +349,15 @@ static void neuron_impl_do_timestep_update( // } } } - // io_printf(IO_BUF, "current_state = %u, cue_number = %u, direction = %u, time = %u\n", current_state, cue_number, current_cue_direction, time); + // In this state the environment is giving the left/right cues to the agent - if (current_state == STATE_CUE){ - // io_printf(IO_BUF, "time entering cue %u\n", time); - if (neuron_index == 0){ - // if it's current in the waiting time between cues do nothing - // if ((time - current_time) % (wait_between_cues + duration_of_cue) < wait_between_cues){ - // do nothing? - // } - // begin sending left/right cue + if (current_state == STATE_CUE) { + if (neuron_index == 0) { + // if it's currently in the waiting time between cues do nothing + + // Otherwise, begin sending left/right cue if ((time - current_time) % - (wait_between_cues + duration_of_cue) >= wait_between_cues){ + (wait_between_cues + duration_of_cue) >= wait_between_cues) { // pick broadcast if just entered if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ @@ -456,21 +366,17 @@ static void neuron_impl_do_timestep_update( // (REAL)(mars_kiss64_seed(neuron->kiss_seed)), UINT32_MAX); // 0-1 REAL random_value = ( (REAL)mars_kiss64_seed(neuron->kiss_seed) / (REAL)UINT32_MAX); // 0-1 - if (random_value < 0.5k){ + if (random_value < 0.5k) { current_cue_direction = 0; } else{ current_cue_direction = 1; } - // current_cue_direction = (current_cue_direction + 1) % 2; accumulative_direction += current_cue_direction; REAL payload; payload = neuron->rate_on; -// io_printf(IO_BUF, "poisson setting 1, direction = %u\n", current_cue_direction); for (int j = current_cue_direction*neuron->p_pop_size; j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++){ -// log_info("current cue direction %u payload %k key index %u time %u neuron_index %u", -// current_cue_direction, payload, j, time, neuron_index); send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); // spin1_send_mc_packet( // neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); @@ -478,44 +384,41 @@ static void neuron_impl_do_timestep_update( } } // turn off and reset if finished - else if ((time - current_time) % (wait_between_cues + duration_of_cue) == 0 && (time - current_time) > 0){//(wait_between_cues + duration_of_cue) - 1){ + else if ((time - current_time) % (wait_between_cues + duration_of_cue) == 0 && + (time - current_time) > 0) {//(wait_between_cues + duration_of_cue) - 1){ cue_number += 1; REAL payload; payload = neuron->rate_off; - // io_printf(IO_BUF, "poisson setting 2, direction = %u\n", current_cue_direction); for (int j = current_cue_direction*neuron->p_pop_size; - j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++){ + j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++) { send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); // spin1_send_mc_packet( // neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); } - if (cue_number >= neuron->number_of_cues){ + if (cue_number >= neuron->number_of_cues) { current_state = (current_state + 1) % 3; } } } } else if (current_state == STATE_WAITING){ - // io_printf(IO_BUF, "time entering wait %u\n", time); // waiting for prompt, all things ok - if (cue_number >= neuron->number_of_cues){ + if (cue_number >= neuron->number_of_cues) { current_time = time; cue_number = 0; } - if ((time - current_time) >= wait_before_result){ + if ((time - current_time) >= wait_before_result) { current_state = (current_state + 1) % 3; start_prompt = true; } } else if (current_state == STATE_PROMPT){ - // io_printf(IO_BUF, "time entering prompt %u\n", time); if (start_prompt && neuron_index == 1){ current_time = time; // send packets to the variable poissons with the updated states for (int i = 0; i < 4; i++){ REAL payload; payload = neuron->rate_on; - // io_printf(IO_BUF, "poisson setting 3, turning on prompt\n"); for (int j = 2*neuron->p_pop_size; j < 2*neuron->p_pop_size + neuron->p_pop_size; j++){ send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); @@ -524,44 +427,32 @@ static void neuron_impl_do_timestep_update( } } } - if (neuron_index == 2){ // this is the error source + + // This is the error source + if (neuron_index == 2) { // Switched to always broadcasting error but with packet - // ticks_for_mean += 1; //todo is it a running error like this over prompt? start_prompt = false; - // io_printf(IO_BUF, "maybe here - %k - %k\n", global_parameters->mean_0, global_parameters->mean_1); - // io_printf(IO_BUF, "ticks %u - accum %k - ", ticks_for_mean, (accum)ticks_for_mean); - // Softmax of the exc and inh inputs representing 1 and 0 respectively - // may need to scale to stop huge numbers going in the exp - // io_printf(IO_BUF, "v0 %k - v1 %k\n", global_parameters->readout_V_0, global_parameters->readout_V_1); - // global_parameters->mean_0 += global_parameters->readout_V_0; - // global_parameters->mean_1 += global_parameters->readout_V_1; - // divide -> * 1/x - // io_printf(IO_BUF, " umm "); - // accum exp_0 = expk(global_parameters->mean_0 / (accum)ticks_for_mean); - // accum exp_1 = expk(global_parameters->mean_1 / (accum)ticks_for_mean); accum exp_0 = expk(neuron->readout_V_0);// * 0.1k); accum exp_1 = expk(neuron->readout_V_1);// * 0.1k); - // io_printf(IO_BUF, "or here - "); - // Um... how can an exponential be zero? - if (exp_0 == 0k && exp_1 == 0k){ - if (neuron->readout_V_0 > neuron->readout_V_1){ + + // TODO: I'm not sure how an exponential can be zero? + // Set up softmax calculation + if (exp_0 == 0k && exp_1 == 0k) { + if (neuron->readout_V_0 > neuron->readout_V_1) { softmax_0 = 1k; softmax_1 = 0k; } - else{ + else { softmax_0 = 0k; softmax_1 = 1k; } } - else{ - // accum denominator = 1.k / (exp_1 + exp_0); -// softmax_0 = exp_0 / (exp_1 + exp_0); -// softmax_1 = exp_1 / (exp_1 + exp_0); + else { // These divides are okay in kdivk because exp is always positive softmax_0 = kdivk(exp_0, (exp_1 + exp_0)); softmax_1 = kdivk(exp_1, (exp_1 + exp_0)); } - // io_printf(IO_BUF, "soft0 %k - soft1 %k - v0 %k - v1 %k\n", softmax_0, softmax_1, global_parameters->readout_V_0, global_parameters->readout_V_1); + // What to do if log(0)? if (accumulative_direction > neuron->number_of_cues >> 1){ for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { @@ -581,15 +472,6 @@ static void neuron_impl_do_timestep_update( learning_signal = softmax_0 - 1.k; // is_it_right = 0; } - // if (learning_signal > 0.5){ - // learning_signal = 1k; - // } - // else if (learning_signal < -0.5){ - // learning_signal = -1k; - // } - // else{ - // learning_signal = 0k; - // } if (use_key) { send_spike_mc_payload(neuron_keys[neuron_index], bitsk(learning_signal)); // while (!spin1_send_mc_packet( @@ -597,16 +479,10 @@ static void neuron_impl_do_timestep_update( // spin1_delay_us(1); // } } - // if(learning_signal){ - // io_printf(IO_BUF, "learning signal before cast = %k\n", learning_signal); - // } - // learning_signal = global_parameters->cross_entropy; - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = - // io_printf(IO_BUF, "broadcasting error\n"); } + + // The current broadcast may have completed so check if ((time - current_time) >= prompt_duration && neuron_index == 0){ - // io_printf(IO_BUF, "time entering end of test %u\n", time); - // io_printf(IO_BUF, "poisson setting 4, turning off prompt\n"); current_state = 0; completed_broadcast = true; for (int i = 0; i < 4; i++){ @@ -622,49 +498,27 @@ static void neuron_impl_do_timestep_update( } } - // learning_signal = global_parameters->cross_entropy; - -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = learning_signal;//exc_input_values[0];//neuron->syn_state[1].update_ready;// -// recorded_variable_values[V_RECORDING_INDEX] = voltage; -// log_info("neuron_index %u time %u record learning signal %k", -// neuron_index, time, learning_signal); + // Record learning signal and voltage neuron_recording_record_accum( GSYN_INH_RECORDING_INDEX, neuron_index, learning_signal); neuron_recording_record_accum( V_RECORDING_INDEX, neuron_index, voltage); - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = ; - // if (neuron_index == 2){ - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = accumulative_direction; - // } - // else { - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = 3.5; - // } - if (neuron_index == 2){ //this neuron does nothing - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[90].z_bar; - // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[90].z_bar; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[50].delta_w; + + // Record delta_w from different synapse states depending on neuron index + if (neuron_index == 2) { neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, neuron->syn_state[50].delta_w); - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = is_it_right; } - else if (neuron_index == 1){ - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].z_bar; - // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[55].z_bar; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[40].delta_w; + else if (neuron_index == 1) { neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, neuron->syn_state[40].delta_w); - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } - else{ - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].z_bar; - // recorded_variable_values[V_RECORDING_INDEX] = neuron->syn_state[1].z_bar; -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = neuron->syn_state[0].delta_w; + else { neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, neuron->syn_state[0].delta_w); - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = softmax_0; } // This model doesn't spike so this can be commented out @@ -684,14 +538,8 @@ static void neuron_impl_do_timestep_update( neuron_model_print_state_variables(neuron); #endif // LOG_LEVEL >= LOG_DEBUG } - -// log_info("end of do_timestep_update time %u", time); } - - - - //! \brief stores neuron parameter back into sdram //! \param[in] address: the address in sdram to start the store static void neuron_impl_store_neuron_parameters( @@ -703,8 +551,6 @@ static void neuron_impl_store_neuron_parameters( if (sizeof(neuron_t)) { log_debug("writing neuron local parameters"); -// spin1_memcpy(&address[next], neuron_array, -// n_neurons * sizeof(neuron_t)); neuron_params_t *params = (neuron_params_t *) &address[next]; for (uint32_t i = 0; i < n_neurons; i++) { neuron_model_save_state(&neuron_array[i], ¶ms[i]); @@ -763,7 +609,6 @@ void neuron_impl_print_inputs(uint32_t n_neurons) { if (!empty) { log_debug("-------------------------------------\n"); - for (index_t i = 0; i < n_neurons; i++) { synapse_types_t *params = &synapse_types_array[i]; input_t exc_values[NUM_EXCITATORY_RECEPTORS]; diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index b42af2db917..6d20952d245 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -17,7 +17,7 @@ #include #include #include -#include // maybe not needed? +#include // TODO: maybe not needed? //! Indices for recording of words enum word_recording_indices { @@ -65,9 +65,6 @@ static additional_input_t *additional_input_array; //! Threshold states array static threshold_type_t *threshold_type_array; -////! Global parameters for the neurons -//static global_neuron_params_pointer_t global_parameters; - // The synapse shaping parameters static synapse_types_t *synapse_types_array; @@ -78,26 +75,14 @@ static uint n_steps_per_timestep; extern bool use_key; // TODO: check if these other parameters are needed -static REAL next_spike_time = 0; +//static REAL next_spike_time = 0; extern uint32_t time; extern uint32_t *neuron_keys; extern REAL learning_signal; static uint32_t target_ind = 0; - static bool neuron_impl_initialise(uint32_t n_neurons) { - // allocate DTCM for the global parameter details -// if (sizeof(global_neuron_params_t) > 0) { -// global_parameters = (global_neuron_params_t *) spin1_malloc( -// sizeof(global_neuron_params_t)); -// if (global_parameters == NULL) { -// log_error("Unable to allocate global neuron parameters" -// "- Out of DTCM"); -// return false; -// } -// } - // Allocate DTCM for neuron array if (sizeof(neuron_t)) { neuron_array = spin1_malloc(n_neurons * sizeof(neuron_t)); @@ -148,10 +133,6 @@ static bool neuron_impl_initialise(uint32_t n_neurons) { } } - // Initialise pointers to Neuron parameters in STDP code -// synapse_dynamics_set_neuron_array(neuron_array); -// log_info("set pointer to neuron array in stdp code"); - return true; } @@ -233,21 +214,6 @@ static void neuron_impl_load_neuron_parameters( spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); } -// io_printf(IO_BUF, "\nPrinting global params\n"); -// io_printf(IO_BUF, "seed 1: %u \n", global_parameters->spike_source_seed[0]); -// io_printf(IO_BUF, "seed 2: %u \n", global_parameters->spike_source_seed[1]); -// io_printf(IO_BUF, "seed 3: %u \n", global_parameters->spike_source_seed[2]); -// io_printf(IO_BUF, "seed 4: %u \n", global_parameters->spike_source_seed[3]); -// io_printf(IO_BUF, "eta: %k \n\n", neuron_array[0]->eta); - - - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - } - -// io_printf(IO_BUF, "size of global params: %u", -// sizeof(global_neuron_params_t)); - #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { @@ -258,22 +224,16 @@ static void neuron_impl_load_neuron_parameters( #endif // LOG_LEVEL >= LOG_DEBUG } - static void neuron_impl_do_timestep_update( uint32_t timer_count, uint32_t time, uint32_t n_neurons) { for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; - bool spike = false; -// neuron_t *neuron_zero = &neuron_array[0]; + bool spike = false; // TODO: don't think this is needed target_ind = time & 0x3ff; // repeats on a cycle of 1024 entries in array - // io_printf(IO_BUF, "Updating Neuron Index: %u\n", neuron_index); - // io_printf(IO_BUF, "Target: %k\n\n", - // global_parameters->target_V[target_ind]); - // Get the input_type parameters and voltage for this neuron input_type_t *input_type = &input_type_array[neuron_index]; @@ -288,7 +248,6 @@ static void neuron_impl_do_timestep_update( // Get the voltage state_t voltage = neuron_model_get_membrane_voltage(neuron); - // Get the exc and inh values from the synapses input_t exc_values[NUM_EXCITATORY_RECEPTORS]; input_t* exc_syn_values = synapse_types_get_excitatory_input( @@ -303,21 +262,6 @@ static void neuron_impl_do_timestep_update( input_t* inh_input_values = input_type_get_input_value( inh_syn_values, input_type, NUM_INHIBITORY_RECEPTORS); - // Sum g_syn contributions from all receptors for recording - // REAL total_exc = 0; - // REAL total_inh = 0; - // - // for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ - // total_exc += exc_input_values[i]; - // } - // for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ - // total_inh += inh_input_values[i]; - // } - - // Call functions to get the input values to be recorded - // recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - // Call functions to convert exc_input and inh_input to current input_type_convert_excitatory_input_to_current( exc_input_values, input_type, voltage); @@ -328,11 +272,8 @@ static void neuron_impl_do_timestep_update( input_t external_bias = additional_input_get_input_value_as_current( additional_input, voltage); - // This is clearly overwritten so why is it here? -// recorded_variable_values[V_RECORDING_INDEX] = voltage; -// neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, voltage); + // Update neuron only on index 0 if (neuron_index == 0){ - // update neuron parameters state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, @@ -341,62 +282,33 @@ static void neuron_impl_do_timestep_update( // Calculate error REAL error = result - neuron->target_V[target_ind]; learning_signal = error; - // Record Error - // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = - // error; - // neuron->syn_state[3].delta_w; - // neuron->syn_state[0].z_bar; - -// log_info("neuron_index %u time %u voltage %k result %k exc input %k targetV %k", -// neuron_index, time, voltage, result, exc_input_values[0], -// neuron->target_V[target_ind]); // Record readout neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, result); -// neuron_recording_record_accum(V_RECORDING_INDEX, neuron_index, voltage); -// recorded_variable_values[V_RECORDING_INDEX] = -// result; - // neuron->syn_state[0].z_bar; - // Send error (learning signal) as packet with payload - // ToDo can't I just alter the global variable here? - // Another option is just to use "send_spike" instead... ? // send_spike_mc_payload(key, bitsk(error)); if (use_key) { send_spike_mc_payload(neuron_keys[neuron_index], bitsk(error)); } -// log_info("send learning signal key %u neuron_index %u", neuron_keys[neuron_index], neuron_index); -// while (!spin1_send_mc_packet( -// neuron_keys[neuron_index], bitsk(error), 1 )) { -// spin1_delay_us(1); -// } } else{ // Record 'Error' neuron_recording_record_accum( V_RECORDING_INDEX, neuron_index, neuron->target_V[target_ind]); -// recorded_variable_values[V_RECORDING_INDEX] = -// // neuron->syn_state[0].z_bar; -// global_parameters->target_V[target_ind]; -// // recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = -// // - global_parameters->target_V[target_ind]; } -// recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = neuron->syn_state[neuron_index*20].z_bar; + + // Record z_bar neuron_recording_record_accum( GSYN_INH_RECORDING_INDEX, neuron_index, neuron->syn_state[neuron_index*20].z_bar); - // Record target -// recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = -// // global_parameters->target_V[target_ind]; -// neuron->syn_state[neuron_index*20].delta_w; -// // exc_input_values[0]; + // Record target delta_w neuron_recording_record_accum( GSYN_EXC_RECORDING_INDEX, neuron_index, neuron->syn_state[neuron_index*20].delta_w); // If spike occurs, communicate to relevant parts of model - // TODO I don't know why this is here + // TODO I don't know why this is here since this can (currently) never happen if (spike) { // Call relevant model-based functions // Tell the neuron model @@ -413,8 +325,6 @@ static void neuron_impl_do_timestep_update( neuron_model_print_state_variables(neuron); #endif // LOG_LEVEL >= LOG_DEBUG - // Return the boolean to the model timestep update -// return spike; } } @@ -430,8 +340,6 @@ static void neuron_impl_store_neuron_parameters( if (sizeof(neuron_t)) { log_debug("writing neuron local parameters"); -// spin1_memcpy(&address[next], neuron_array, -// n_neurons * sizeof(neuron_t)); neuron_params_t *params = (neuron_params_t *) &address[next]; for (uint32_t i = 0; i < n_neurons; i++) { neuron_model_save_state(&neuron_array[i], ¶ms[i]); @@ -490,7 +398,6 @@ void neuron_impl_print_inputs(uint32_t n_neurons) { if (!empty) { log_debug("-------------------------------------\n"); - for (index_t i = 0; i < n_neurons; i++) { synapse_types_t *params = &synapse_types_array[i]; input_t exc_values[NUM_EXCITATORY_RECEPTORS]; diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c deleted file mode 100644 index 18f91d6722d..00000000000 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.c +++ /dev/null @@ -1,380 +0,0 @@ -///* -// * Copyright (c) 2017-2019 The University of Manchester -// * -// * This program is free software: you can redistribute it and/or modify -// * it under the terms of the GNU General Public License as published by -// * the Free Software Foundation, either version 3 of the License, or -// * (at your option) any later version. -// * -// * This program is distributed in the hope that it will be useful, -// * but WITHOUT ANY WARRANTY; without even the implied warranty of -// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// * GNU General Public License for more details. -// * -// * You should have received a copy of the GNU General Public License -// * along with this program. If not, see . -// */ -// -//#include "neuron_model_eprop_adaptive_impl.h" -// -//#include -// -//bool printed_value = false; -//REAL v_mem_error; -//REAL new_learning_signal; -//extern REAL learning_signal; -////REAL local_eta; -//extern uint32_t time; -////extern global_neuron_params_pointer_t global_parameters; -//extern uint32_t syn_dynamics_neurons_in_partition; -// -//// simple Leaky I&F ODE -//static inline void lif_neuron_closed_form( -// neuron_t *neuron, REAL V_prev, input_t input_this_timestep, -// REAL B_t) { -// -// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; -// -// // update membrane voltage -// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) -// - neuron->z * B_t; // this line achieves reset -//} -// -////void neuron_model_set_global_neuron_params( -//// global_neuron_params_pointer_t params) { -//// use(params); -//// -//// local_eta = params->eta; -//// io_printf(IO_BUF, "local eta = %k\n", local_eta); -//// io_printf(IO_BUF, "core_pop_rate = %k\n", params->core_pop_rate); -//// io_printf(IO_BUF, "core_target_rate = %k\n", params->core_target_rate); -//// io_printf(IO_BUF, "rate_exp_TC = %k\n\n", params->rate_exp_TC); -//// // Does Nothing - no params -////} -// -//state_t neuron_model_state_update( -// uint16_t num_excitatory_inputs, input_t* exc_input, -// uint16_t num_inhibitory_inputs, input_t* inh_input, -// input_t external_bias, REAL current_offset, neuron_t *restrict neuron, // this has a *restrict on it in LIF? -// REAL B_t) { -// -// log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); -// log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); -// -//// REAL total_exc = 0; -//// REAL total_inh = 0; -//// -//// for (int i=0; i < num_excitatory_inputs; i++) { -//// total_exc += exc_input[i]; -//// } -//// for (int i=0; i< num_inhibitory_inputs; i++) { -//// total_inh += inh_input[i]; -//// } -// // Get the input in nA -// input_t input_this_timestep = -// exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; -// -// lif_neuron_closed_form( -// neuron, neuron->V_membrane, input_this_timestep, B_t); -// -// // If outside of the refractory period -// if (neuron->refract_timer <= 0) { -// // Allow spiking again -// neuron->A = 1; -// } else { -// // Neuron cannot fire, as neuron->A=0; -// // countdown refractory timer -// neuron->refract_timer -= 1; -// } -// -// -// // ****************************************************************** -// // Update Psi (pseudo-derivative) (done once for each postsynaptic neuron) -// // ****************************************************************** -// REAL psi_temp1 = (neuron->V_membrane - neuron->B) * (1/neuron->b_0); -// REAL psi_temp2 = ((absk(psi_temp1))); -// neuron->psi = ((1.0k - psi_temp2) > 0.0k)? -// (1.0k/neuron->b_0) * -// 0.3k * //todo why is this commented? -// (1.0k - psi_temp2) : 0.0k; -//// if (neuron->refract_timer){ -//// neuron->psi = 0.0k; -//// } -// neuron->psi *= neuron->A; -// -//// This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. -//// This should make it possible to do a pause and resume cycle and have reliable unloading of data. -// uint32_t total_input_synapses_per_neuron = 40; //todo should this be fixed? -// uint32_t total_recurrent_synapses_per_neuron = 0; //todo should this be fixed? -// uint32_t recurrent_offset = 100; -// -//// neuron->psi = neuron->psi << 10; -// -//// REAL rho = neuron->rho;//expk(-1.k / 1500.k); // adpt -// REAL rho = (accum)decay_s1615(1.k, neuron->e_to_dt_on_tau_a); -//// REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); -//// io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); -// -// REAL accum_time = (accum)(time%neuron->window_size) * 0.001k; -// if (!accum_time){ -// accum_time += 1.k; -// } -//// io_printf(IO_BUF, "time = %u, mod = %u, accum = %k, /s:%k, rate:%k, accum t:%k\n", time, time%1300, (accum)(time%1300), -//// (accum)(time%1300) * 0.001k, (accum)(time%1300) * 0.001k * (accum)syn_dynamics_neurons_in_partition, -//// accum_time); -// -// if (neuron->V_membrane > neuron->B){ -// v_mem_error = neuron->V_membrane - neuron->B; -//// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); -// } -// else if (neuron->V_membrane < -neuron->B){ -// v_mem_error = neuron->V_membrane + neuron->B; -//// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); -// } -// else{ -// v_mem_error = 0.k; -// } -//// learning_signal += v_mem_error; -// -//// REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; -//// REAL reg_learning_signal = (global_parameters->core_pop_rate // make it work for different ts -////// / ((accum)(time%1300) -////// / (1.225k // 00000!!!!! -//// / (accum_time -//// * (accum)syn_dynamics_neurons_in_partition)) -//// - global_parameters->core_target_rate; -// -// REAL reg_learning_signal = (neuron->core_pop_rate // make it work for different ts -//// / ((accum)(time%1300) -//// / (1.225k // 00000!!!!! -// / (accum_time -// * (accum)syn_dynamics_neurons_in_partition)) -// - neuron->core_target_rate; -// -//// io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); -// if (time % neuron->window_size == neuron->window_size - 1 & !printed_value){ //hardcoded time of reset -//// io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\tL:%k, v_mem:%k\n", -//// time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate, -//// learning_signal-v_mem_error, v_mem_error); -//// global_parameters->core_pop_rate = 0.k; -//// REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) -//// / (accum)syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; -//// io_printf(IO_BUF, "2 %u, rate at reset:%k, L:%k, rate:%k\n", time, reg_learning_signal, learning_signal, global_parameters->core_pop_rate); -// printed_value = true; -// } -// if (time % neuron->window_size == 0){ -//// new_learning_signal = 0.k; -//// global_parameters->core_pop_rate = 0.k; -// printed_value = false; -// } -//// neuron->L = learning_signal * neuron->w_fb; -//// learning_signal *= neuron->w_fb; -//// if (learning_signal != 0.k && new_learning_signal != learning_signal){ -//// if (new_learning_signal != learning_signal){// && time%1300 > 1100){ -//// io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); -//// if (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){ -// new_learning_signal = (learning_signal * neuron->w_fb) + v_mem_error; -//// } -//// new_learning_signal = learning_signal; -//// } -//// neuron->L = learning_signal; -// -// uint32_t test_length = (150*neuron->number_of_cues)+1000+150; -// if(neuron->number_of_cues == 0){ -// test_length = neuron->window_size; -// } -// -// if (time % neuron->window_size > test_length * 2){ //todo make this relative to number of cues -// neuron->L = new_learning_signal + (reg_learning_signal);// * 0.1k); -// } -// else{ -// neuron->L = new_learning_signal; -// } -//// neuron->L = learning_signal * neuron->w_fb; // turns of all reg -// neuron->L = new_learning_signal; -// // Copy eta here instead? -// REAL local_eta = neuron->eta; -//// if (time % 99 == 0){ -//// io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); -//// } -// if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ -//// io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); -// neuron->B = neuron->b_0; -// neuron->b = 0.k; -// neuron->V_membrane = neuron->V_rest; -// neuron->refract_timer = 0; -// neuron->z = 0.k; -//// io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); -// } -//// io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); -// // All operations now need doing once per eprop synapse -// for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ -// if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ -// neuron->syn_state[syn_ind].z_bar_inp = 0.k; -// neuron->syn_state[syn_ind].z_bar = 0.k; -// neuron->syn_state[syn_ind].el_a = 0.k; -// neuron->syn_state[syn_ind].e_bar = 0.k; -// } -// // ****************************************************************** -// // Low-pass filter incoming spike train -// // ****************************************************************** -// neuron->syn_state[syn_ind].z_bar = -// neuron->syn_state[syn_ind].z_bar * neuron->exp_TC -// + -// (1 - neuron->exp_TC) * -// neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update -// -// -// // ****************************************************************** -// // Update eligibility vector -// // ****************************************************************** -// neuron->syn_state[syn_ind].el_a = -// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -// (rho - neuron->psi * neuron->beta) * -// neuron->syn_state[syn_ind].el_a; -//// (rho) * neuron->syn_state[syn_ind].el_a; -// -// -// // ****************************************************************** -// // Update eligibility trace -// // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); -//// 0); -// -// neuron->syn_state[syn_ind].e_bar = -// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -// + (1 - neuron->exp_TC) * temp_elig_trace; -// -// // ****************************************************************** -// // Update cached total weight change -// // ****************************************************************** -// REAL this_dt_weight_change = -// local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; -// neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) -// -//// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -//// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -//// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -//// "L = %k = %k * %k = l * w_fb\n" -//// "this dw = %k \t tot dw %k\n" -//// , -//// total_synapses_per_neuron, -//// syn_ind, -//// neuron->syn_state[syn_ind].z_bar_inp, -//// neuron->syn_state[syn_ind].z_bar, -//// time, -//// neuron->L, learning_signal, neuron -> w_fb, -//// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -//// ); -//// } -// // reset input (can't have more than one spike per timestep -// neuron->syn_state[syn_ind].z_bar_inp = 0; -// -// // decrease timestep counter preventing rapid updates -//// if (neuron->syn_state[syn_ind].update_ready > 0){ -//// io_printf(IO_BUF, "ff reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); -// neuron->syn_state[syn_ind].update_ready -= 1; -//// } -//// else{ -//// io_printf(IO_BUF, "ff not reducing %u\n", syn_ind); -//// } -// -//// io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", -//// local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); -// -// } -// -// -// // All operations now need doing once per recurrent eprop synapse -// for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ -// if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ -// neuron->syn_state[syn_ind].z_bar_inp = 0.k; -// neuron->syn_state[syn_ind].z_bar = 0.k; -// neuron->syn_state[syn_ind].el_a = 0.k; -// neuron->syn_state[syn_ind].e_bar = 0.k; -// } -// // ****************************************************************** -// // Low-pass filter incoming spike train -// // ****************************************************************** -// neuron->syn_state[syn_ind].z_bar = -// neuron->syn_state[syn_ind].z_bar * neuron->exp_TC -// + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update -// -// -// // ****************************************************************** -// // Update eligibility vector -// // ****************************************************************** -// neuron->syn_state[syn_ind].el_a = -// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -// (rho - neuron->psi * neuron->beta) * -// neuron->syn_state[syn_ind].el_a; -//// (rho) * neuron->syn_state[syn_ind].el_a; -// -// -// // ****************************************************************** -// // Update eligibility trace -// // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); -//// 0); -// -// neuron->syn_state[syn_ind].e_bar = -// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -// + (1 - neuron->exp_TC) * temp_elig_trace; -// -// // ****************************************************************** -// // Update cached total weight change -// // ****************************************************************** -// REAL this_dt_weight_change = -// local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; -// neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) -// -//// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -//// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -//// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -//// "L = %k = %k * %k = l * w_fb\n" -//// "this dw = %k \t tot dw %k\n" -//// , -//// total_synapses_per_neuron, -//// syn_ind, -//// neuron->syn_state[syn_ind].z_bar_inp, -//// neuron->syn_state[syn_ind].z_bar, -//// time, -//// neuron->L, learning_signal, neuron -> w_fb, -//// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -//// ); -//// } -// // reset input (can't have more than one spike per timestep -// neuron->syn_state[syn_ind].z_bar_inp = 0; -// -// // decrease timestep counter preventing rapid updates -//// if (neuron->syn_state[syn_ind].update_ready > 0){ -//// io_printf(IO_BUF, "recducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); -// neuron->syn_state[syn_ind].update_ready -= 1; -//// } -//// else{ -//// io_printf(IO_BUF, "not recducing %u\n", syn_ind); -//// } -// -//// io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", -//// local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); -// -// } -// -// return neuron->V_membrane; -//} -// -//void neuron_model_has_spiked(neuron_t *restrict neuron) { -// // reset z to zero -// neuron->z = 0; -//// neuron->V_membrane = neuron->V_rest; // Not sure this should be commented out -// // Set refractory timer -// neuron->refract_timer = neuron->T_refract - 1; -// neuron->A = 0; -//} -// -//state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) { -// return neuron->V_membrane; -//} -// diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 9e06c08c7f1..1641472517d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -25,15 +25,10 @@ #define SYNAPSES_PER_NEURON 250 static bool printed_value = false; -//REAL v_mem_error; -//REAL new_learning_signal; extern REAL learning_signal; -//REAL local_eta; -extern uint32_t time; -//extern global_neuron_params_pointer_t global_parameters; +extern uint32_t time; // this is probably unnecessary extern uint32_t syn_dynamics_neurons_in_partition; - typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply REAL z_bar_inp; @@ -72,10 +67,6 @@ struct neuron_params_t { // The time step in milliseconds REAL time_step; - // TODO: double-check that everything above this point is needed - - // TODO: see whether anything below this point should be approached in a similar way - // Neuron spike train REAL z; @@ -91,9 +82,7 @@ struct neuron_params_t { REAL b; // b(t) REAL b_0; // small b^0 uint32_t tau_a; -// decay_t e_to_dt_on_tau_a; // rho REAL beta; -// decay_t adpt; // (1-rho) REAL scalar; REAL L; // learning signal @@ -112,8 +101,6 @@ struct neuron_params_t { //! eprop neuron state -//! TODO: work to make this do something like what happens for LIF - struct neuron_t { // membrane voltage [mV] REAL V_membrane; @@ -141,10 +128,6 @@ struct neuron_t { // refractory time of neuron [timesteps] int32_t T_refract; - // TODO: double-check that everything above this point is needed - - // TODO: check approach for values below this (but these should be the same) - // Neuron spike train REAL z; @@ -177,45 +160,22 @@ struct neuron_t { // array of synaptic states - peak fan-in of 250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; - }; -//neuron_t *neuron_array; - -//typedef struct global_neuron_params_t { -// REAL core_pop_rate; -// REAL core_target_rate; -// REAL rate_exp_TC; -// REAL eta; // learning rate -//} global_neuron_params_t; -// TODO: use the threshold type for this instead? +// TODO: use the threshold type for this instead static inline void threshold_type_update_threshold(state_t z, neuron_t *threshold_type){ -// _print_threshold_params(threshold_type); - - + // TODO: Better names for these variables s1615 temp1 = decay_s1615(threshold_type->b, threshold_type->e_to_dt_on_tau_a); s1615 temp2 = decay_s1615(threshold_type->scalar, threshold_type->adpt) * z; - threshold_type->b = temp1 - + temp2; - // io_printf(IO_BUF, "temp1: %k; temp2: %k\n", temp1, temp2); - -// // Evolve threshold dynamics (decay to baseline) and adapt if z=nonzero -// // Update small b (same regardless of spike - uses z from previous timestep) -// threshold_type->b = -// decay_s1615(threshold_type->b, threshold_type->e_to_dt_on_tau_a) -// + decay_s1615(1000k, threshold_type->adpt) // fold scaling into decay to increase precision -// * z; // stored on neuron -// -// io_printf(IO_BUF, "before B = %k, temp1 = %k, temp2 = %k, b = %k, b_0 = %k, beta = %k", -// threshold_type->B, temp1, temp2, threshold_type->b, threshold_type->b_0, threshold_type->beta); + threshold_type->b = temp1 + temp2; + // Update large B threshold_type->B = threshold_type->b_0 + threshold_type->beta*threshold_type->b; -// io_printf(IO_BUF, "\nafter B = %k\n", threshold_type->B); } //! \brief Performs a ceil operation on an accum @@ -280,7 +240,6 @@ static inline void neuron_model_initialise( } static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { - // TODO: probably more parameters need copying across at this point, syn_state for a start params->V_init = state->V_membrane; params->refract_timer_init = state->refract_timer; params->z = state->z; @@ -289,22 +248,15 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par params->B = state->B; params->b = state->b; params->b_0 = state->b_0; -// state->e_to_dt_on_tau_a = expk(-kdivk(ts, params->tau_a)); params->beta = state->beta; -// state->adpt = 1 - expk(-kdivk(ts, params->tau_a)); params->scalar = state->scalar; params->L = state->L; params->w_fb = state->w_fb; params->window_size = state->window_size; params->number_of_cues = state->number_of_cues; -// log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", -// state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); - -// state->core_pop_rate = 0.0k; params->pop_rate = state->core_pop_rate; params->target_rate = state->core_target_rate; -// state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); params->eta = state->eta; for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { @@ -320,21 +272,9 @@ static inline void lif_neuron_closed_form( // update membrane voltage neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) - - neuron->z * B_t; // this line achieves reset + - neuron->z * B_t; // this line achieves reset (?) } -//void neuron_model_set_global_neuron_params( -// global_neuron_params_pointer_t params) { -// use(params); -// -// local_eta = params->eta; -// io_printf(IO_BUF, "local eta = %k\n", local_eta); -// io_printf(IO_BUF, "core_pop_rate = %k\n", params->core_pop_rate); -// io_printf(IO_BUF, "core_target_rate = %k\n", params->core_target_rate); -// io_printf(IO_BUF, "rate_exp_TC = %k\n\n", params->rate_exp_TC); -// // Does Nothing - no params -//} - state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, @@ -344,24 +284,10 @@ state_t neuron_model_state_update( log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); -// log_info("z_bar_inp (0) %k z_bar_inp (1) %k", -// neuron->syn_state[0].z_bar_inp, neuron->syn_state[1].z_bar_inp); - -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// for (int i=0; i < num_excitatory_inputs; i++) { -// total_exc += exc_input[i]; -// } -// for (int i=0; i< num_inhibitory_inputs; i++) { -// total_inh += inh_input[i]; -// } // Get the input in nA input_t input_this_timestep = exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; -// log_info("exc input 0 %k exc input 1 %k I_offset %k", exc_input[0], exc_input[1], neuron->I_offset); - lif_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep, B_t); @@ -375,7 +301,6 @@ state_t neuron_model_state_update( neuron->refract_timer -= 1; } - // ****************************************************************** // Update Psi (pseudo-derivative) (done once for each postsynaptic neuron) // ****************************************************************** @@ -385,62 +310,37 @@ state_t neuron_model_state_update( (1.0k/neuron->b_0) * 0.3k * //todo why is this commented? (1.0k - psi_temp2) : 0.0k; -// if (neuron->refract_timer){ -// neuron->psi = 0.0k; -// } neuron->psi *= neuron->A; -// log_info("check psi %k and A %k psi_temp1 %k psi_temp2 %k", -// neuron->psi, neuron->A, psi_temp1, psi_temp2); - -// This parameter is OK to update, as the actual size of the array is set in the header file, which matches the Python code. -// This should make it possible to do a pause and resume cycle and have reliable unloading of data. + // This parameter is OK to update, as the actual size of the array is set in the + // header file, which matches the Python code. This should make it possible to + // do a pause and resume cycle and have reliable unloading of data. uint32_t total_input_synapses_per_neuron = 40; //todo should this be fixed? uint32_t total_recurrent_synapses_per_neuron = 0; //todo should this be fixed? uint32_t recurrent_offset = 100; -// neuron->psi = neuron->psi << 10; - -// REAL rho = neuron->rho;//expk(-1.k / 1500.k); // adpt - // CHECK: but I think this has already been calculated above... ? + // TODO: check if this has already been calculated above... REAL rho = neuron->e_to_dt_on_tau_a; // decay_s1615(1.k, neuron->e_to_dt_on_tau_a); -// REAL rho_3 = (accum)decay_s1615(1000.k, neuron->e_to_dt_on_tau_a); -// io_printf(IO_BUF, "1:%k, 2:%k, 3:%k, 4:%k\n", rho, rho_2, rho_3, neuron->rho); + // TODO: Is there a better way of doing this? REAL accum_time = (accum)(time%neuron->window_size) * 0.001k; if (!accum_time){ accum_time += 1.k; } -// io_printf(IO_BUF, "time = %u, mod = %u, accum = %k, /s:%k, rate:%k, accum t:%k\n", time, time%1300, (accum)(time%1300), -// (accum)(time%1300) * 0.001k, (accum)(time%1300) * 0.001k * (accum)syn_dynamics_neurons_in_partition, -// accum_time); REAL v_mem_error; if (neuron->V_membrane > neuron->B){ v_mem_error = neuron->V_membrane - neuron->B; -// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); } else if (neuron->V_membrane < -neuron->B){ v_mem_error = neuron->V_membrane + neuron->B; -// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); } else{ v_mem_error = 0.k; } -// learning_signal += v_mem_error; - -// REAL reg_error = (global_parameters->core_target_rate - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; -// REAL reg_learning_signal = (global_parameters->core_pop_rate // make it work for different ts -//// / ((accum)(time%1300) -//// / (1.225k // 00000!!!!! -// / (accum_time -// * (accum)syn_dynamics_neurons_in_partition)) -// - global_parameters->core_target_rate; - -// log_info("update learning signal syn_dynamics_neurons_in_partition %u ", -// syn_dynamics_neurons_in_partition); + // Calculate regularised learning signal REAL reg_learning_signal = (neuron->core_pop_rate // make it work for different ts // / ((accum)(time%1300) // / (1.225k // 00000!!!!! @@ -448,70 +348,46 @@ state_t neuron_model_state_update( * (accum)syn_dynamics_neurons_in_partition)) - neuron->core_target_rate; -// io_printf(IO_BUF, "rls: %k\n", reg_learning_signal); - if (time % neuron->window_size == neuron->window_size - 1 & !printed_value){ //hardcoded time of reset -// io_printf(IO_BUF, "1 %u, rate err:%k, spikes:%k, target:%k\tL:%k, v_mem:%k\n", -// time, reg_learning_signal, global_parameters->core_pop_rate, global_parameters->core_target_rate, -// learning_signal-v_mem_error, v_mem_error); -// global_parameters->core_pop_rate = 0.k; -// REAL reg_learning_signal = ((global_parameters->core_pop_rate / 1.225k)//(accum)(time%1300)) -// / (accum)syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; -// io_printf(IO_BUF, "2 %u, rate at reset:%k, L:%k, rate:%k\n", time, reg_learning_signal, learning_signal, global_parameters->core_pop_rate); + // hardcoded reset + if (time % neuron->window_size == neuron->window_size - 1 & !printed_value) { printed_value = true; } if (time % neuron->window_size == 0){ -// new_learning_signal = 0.k; + // TODO: does this need editing to be done for all neurons? // global_parameters->core_pop_rate = 0.k; printed_value = false; } -// neuron->L = learning_signal * neuron->w_fb; -// learning_signal *= neuron->w_fb; -// if (learning_signal != 0.k && new_learning_signal != learning_signal){ -// if (new_learning_signal != learning_signal){// && time%1300 > 1100){ -// io_printf(IO_BUF, "L:%k, rL:%k, cL:%k, nL:%k\n", learning_signal, reg_learning_signal, learning_signal + reg_learning_signal, new_learning_signal); -// if (reg_learning_signal > 0.5k || reg_learning_signal < -0.5k){ + + // Calculate new learning signal REAL new_learning_signal = (learning_signal * neuron->w_fb) + v_mem_error; -// } -// new_learning_signal = learning_signal; -// } -// neuron->L = learning_signal; uint32_t test_length = (150*neuron->number_of_cues)+1000+150; - if(neuron->number_of_cues == 0){ + if(neuron->number_of_cues == 0) { test_length = neuron->window_size; } - if (time % neuron->window_size > test_length * 2){ //todo make this relative to number of cues + // TODO make this relative to number of cues? + if (time % neuron->window_size > test_length * 2) { neuron->L = new_learning_signal + (reg_learning_signal);// * 0.1k); } else{ neuron->L = new_learning_signal; } -// neuron->L = learning_signal * neuron->w_fb; // turns of all reg + neuron->L = new_learning_signal; - // Copy eta here instead? - REAL local_eta = neuron->eta; - -// log_info("neuron L %k local_eta %k learning_signal %k w_fb %k v_mem_error %k", -// neuron->L, local_eta, learning_signal, neuron->w_fb, v_mem_error); -// if (time % 99 == 0){ -// io_printf(IO_BUF, "during B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); -// } + // eta used to be a global parameter, but now just copy from neuron + REAL local_eta = neuron->eta; + + // Reset parameter check if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ -// io_printf(IO_BUF, "before B = %k, b = %k\n", neuron->B, neuron->b); neuron->B = neuron->b_0; neuron->b = 0.k; neuron->V_membrane = neuron->V_rest; neuron->refract_timer = 0; neuron->z = 0.k; -// io_printf(IO_BUF, "reset B = %k, b = %k\n", neuron->B, neuron->b); } -// log_info("Before eprop synapse update z_bar_inp (0) %k z_bar_inp (1) %k time %u", -// neuron->syn_state[0].z_bar_inp, neuron->syn_state[1].z_bar_inp, time); - - // io_printf(IO_BUF, "check B = %k, b = %k, time = %u\n", neuron->B, neuron->b, time); - // All operations now need doing once per eprop synapse + // All subsequent operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; @@ -528,10 +404,6 @@ state_t neuron_model_state_update( (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update -// if (syn_ind < 13) { -// log_info("z_bar %k syn_ind %u", neuron->syn_state[syn_ind].z_bar, syn_ind); -// } - // ****************************************************************** // Update eligibility vector @@ -540,28 +412,17 @@ state_t neuron_model_state_update( (neuron->psi * neuron->syn_state[syn_ind].z_bar) + (rho - neuron->psi * neuron->beta) * neuron->syn_state[syn_ind].el_a; -// (rho) * neuron->syn_state[syn_ind].el_a; - -// if (syn_ind < 13) { -// log_info("el_a %k syn_ind %u", neuron->syn_state[syn_ind].el_a, syn_ind); -// } - // ****************************************************************** // Update eligibility trace // ****************************************************************** REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - neuron->beta * neuron->syn_state[syn_ind].el_a); -// 0); neuron->syn_state[syn_ind].e_bar = neuron->exp_TC * neuron->syn_state[syn_ind].e_bar + (1 - neuron->exp_TC) * temp_elig_trace; -// if (syn_ind < 13) { -// log_info("e_bar %k syn_ind %u", neuron->syn_state[syn_ind].e_bar, syn_ind); -// } - // ****************************************************************** // Update cached total weight change // ****************************************************************** @@ -569,44 +430,14 @@ state_t neuron_model_state_update( local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) -// if (syn_ind < 13) { -// log_info("delta_w %k syn_ind %u", neuron->syn_state[syn_ind].delta_w, syn_ind); -// } - -// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -// "L = %k = %k * %k = l * w_fb\n" -// "this dw = %k \t tot dw %k\n" -// , -// total_synapses_per_neuron, -// syn_ind, -// neuron->syn_state[syn_ind].z_bar_inp, -// neuron->syn_state[syn_ind].z_bar, -// time, -// neuron->L, learning_signal, neuron -> w_fb, -// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -// ); -// } // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; // decrease timestep counter preventing rapid updates -// if (neuron->syn_state[syn_ind].update_ready > 0){ -// io_printf(IO_BUF, "ff reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); neuron->syn_state[syn_ind].update_ready -= 1; -// } -// else{ -// io_printf(IO_BUF, "ff not reducing %u\n", syn_ind); -// } - -// io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", -// local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); - } - - // All operations now need doing once per recurrent eprop synapse + // All further operations now need doing once per recurrent eprop synapse for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ neuron->syn_state[syn_ind].z_bar_inp = 0.k; @@ -617,10 +448,10 @@ state_t neuron_model_state_update( // ****************************************************************** // Low-pass filter incoming spike train // ****************************************************************** + // updating z_bar is problematic, if spike could come and interrupt neuron update neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - + + (1 - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // ****************************************************************** // Update eligibility vector @@ -629,15 +460,12 @@ state_t neuron_model_state_update( (neuron->psi * neuron->syn_state[syn_ind].z_bar) + (rho - neuron->psi * neuron->beta) * neuron->syn_state[syn_ind].el_a; -// (rho) * neuron->syn_state[syn_ind].el_a; - // ****************************************************************** // Update eligibility trace // ****************************************************************** REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - neuron->beta * neuron->syn_state[syn_ind].el_a); -// 0); neuron->syn_state[syn_ind].e_bar = neuron->exp_TC * neuron->syn_state[syn_ind].e_bar @@ -650,36 +478,11 @@ state_t neuron_model_state_update( local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) -// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -// "L = %k = %k * %k = l * w_fb\n" -// "this dw = %k \t tot dw %k\n" -// , -// total_synapses_per_neuron, -// syn_ind, -// neuron->syn_state[syn_ind].z_bar_inp, -// neuron->syn_state[syn_ind].z_bar, -// time, -// neuron->L, learning_signal, neuron -> w_fb, -// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -// ); -// } // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; // decrease timestep counter preventing rapid updates -// if (neuron->syn_state[syn_ind].update_ready > 0){ -// io_printf(IO_BUF, "recducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); neuron->syn_state[syn_ind].update_ready -= 1; -// } -// else{ -// io_printf(IO_BUF, "not recducing %u\n", syn_ind); -// } - -// io_printf(IO_BUF, "eta: %k, l: %k, ebar: %k, delta_w: %k, this dt: %k\n", -// local_eta, neuron->L, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].delta_w, this_dt_weight_change); - } return neuron->V_membrane; @@ -688,7 +491,8 @@ state_t neuron_model_state_update( void neuron_model_has_spiked(neuron_t *restrict neuron) { // reset z to zero neuron->z = 0; -// neuron->V_membrane = neuron->V_rest; // Not sure this should be commented out + // TODO: Not sure this should be commented out +// neuron->V_membrane = neuron->V_rest; // Set refractory timer neuron->refract_timer = neuron->T_refract - 1; neuron->A = 0; @@ -698,9 +502,6 @@ state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) { return neuron->V_membrane; } - - - void neuron_model_print_state_variables(const neuron_t *neuron) { log_debug("V membrane = %11.4k mv", neuron->V_membrane); log_debug("learning = %k ", neuron->L); @@ -715,25 +516,17 @@ void neuron_model_print_state_variables(const neuron_t *neuron) { } void neuron_model_print_parameters(const neuron_t *neuron) { -// io_printf(IO_BUF, "V reset = %11.4k mv\n\n", neuron->V_reset); -// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); -// -// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); -// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); -// -// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); -// -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// -// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); -// -// io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); -// -// io_printf(IO_BUF, "window size = %u ts\n", neuron->window_size); -// -// io_printf(IO_BUF, "beta = %k n/a\n", neuron->beta); -// -// io_printf(IO_BUF, "adpt = %k n/a\n", neuron->adpt); + log_debug("V reset = %11.4k mv\n\n", neuron->V_reset); + log_debug("V rest = %11.4k mv\n", neuron->V_rest); + log_debug("I offset = %11.4k nA\n", neuron->I_offset); + log_debug("R membrane = %11.4k Mohm\n", neuron->R_membrane); + log_debug("exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); + log_debug("T refract = %u timesteps\n", neuron->T_refract); + log_debug("learning = %k n/a\n", neuron->L); + log_debug("feedback w = %k n/a\n\n", neuron->w_fb); + log_debug("window size = %u ts\n", neuron->window_size); + log_debug("beta = %k n/a\n", neuron->beta); + log_debug("adpt = %k n/a\n", neuron->adpt); } #endif // _NEURON_MODEL_EPROP_ADAPTIVE_IMPL_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c deleted file mode 100644 index 9ddb65d370a..00000000000 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.c +++ /dev/null @@ -1,214 +0,0 @@ -//#include "neuron_model_left_right_readout_impl.h" -// -//#include -// -//extern uint32_t time; -//extern REAL learning_signal; -//REAL local_eta; -//REAL v_mem_error; -// -//// simple Leaky I&F ODE -//static inline void _lif_neuron_closed_form( -// neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { -// -// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; -// -// // update membrane voltage -// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); -//} -// -//void neuron_model_set_global_neuron_params( -// global_neuron_params_pointer_t params) { -// use(params); -// -// local_eta = params->eta; -// -//// io_printf(IO_BUF, "local eta = %k\n", local_eta); -//// io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); -//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -//// io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); -//// io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); -//// io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); -//// io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); -//// io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); -//// io_printf(IO_BUF, "p_key = %u\n", params->p_key); -//// io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); -//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -//// io_printf(IO_BUF, "local eta = %k\n", params->); -// -// // Does Nothing - no params -//} -// -//state_t neuron_model_state_update( -// uint16_t num_excitatory_inputs, input_t* exc_input, -// uint16_t num_inhibitory_inputs, input_t* inh_input, -// input_t external_bias, neuron_pointer_t neuron, REAL dummy) { -// -// log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); -// log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); -//// io_printf(IO_BUF, "Exc 1: %12.6k, Exc 2: %12.6k - ", exc_input[0], exc_input[1]); -//// io_printf(IO_BUF, "Inh 1: %12.6k, Inh 2: %12.6k - %u\n", inh_input[0], inh_input[1], time); -// use(dummy); -// -// // If outside of the refractory period -// if (neuron->refract_timer <= 0) { -//// REAL total_exc = 0; -//// REAL total_inh = 0; -//// -//// total_exc += exc_input[0]; -//// total_inh += inh_input[0]; -//// for (int i=0; i < num_excitatory_inputs; i++){ -//// total_exc += exc_input[i]; -//// } -//// for (int i=0; i< num_inhibitory_inputs; i++){ -//// total_inh += inh_input[i]; -//// } -// // Get the input in nA -// input_t input_this_timestep = -// exc_input[0] + exc_input[1] + neuron->I_offset; -// -// _lif_neuron_closed_form( -// neuron, neuron->V_membrane, input_this_timestep); -// } else { -// -// // countdown refractory timer -// neuron->refract_timer -= 1; -// } -// -// uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? -// -//// if(learning_signal){ -//// io_printf(IO_BUF, "learning signal = %k\n", learning_signal); -//// } -//// if (neuron->V_membrane > 10.k){ -//// v_mem_error = neuron->V_membrane - 10.k; -////// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); -//// } -//// else if (neuron->V_membrane < -10.k){ -//// v_mem_error = neuron->V_membrane + 10.k; -////// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); -//// } -//// else{ -//// v_mem_error = 0.k; -//// } -//// learning_signal += v_mem_error * 0.1; -// -// neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); -//// REAL tau_decay = expk(-1.k / 1500.k); -// // All operations now need doing once per eprop synapse -// for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ -// // ****************************************************************** -// // Low-pass filter incoming spike train -// // ****************************************************************** -// neuron->syn_state[syn_ind].z_bar = -// neuron->syn_state[syn_ind].z_bar * neuron->exp_TC -// + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update -// -// -// // ****************************************************************** -// // Update eligibility vector -// // ****************************************************************** -//// neuron->syn_state[syn_ind].el_a = -//// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -//// (rho - neuron->psi * neuron->beta) * -//// neuron->syn_state[syn_ind].el_a; -// -// -// // ****************************************************************** -// // Update eligibility trace -// // ****************************************************************** -//// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -//// neuron->beta * neuron->syn_state[syn_ind].el_a); -//// -//// neuron->syn_state[syn_ind].e_bar = -//// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -//// + (1 - neuron->exp_TC) * temp_elig_trace; -// -// // ****************************************************************** -// // Update cached total weight change -// // ****************************************************************** -// -// REAL this_dt_weight_change = -//// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; -// local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; -// -// neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; -//// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -//// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -//// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -//// "L = %k = %k * %k = l * w_fb\n" -//// "this dw = %k \t tot dw %k\n" -//// , -//// total_synapses_per_neuron, -//// syn_ind, -//// neuron->syn_state[syn_ind].z_bar_inp, -//// neuron->syn_state[syn_ind].z_bar, -//// time, -//// neuron->L, learning_signal, neuron -> w_fb, -//// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -//// ); -//// } -// // reset input (can't have more than one spike per timestep -// neuron->syn_state[syn_ind].z_bar_inp = 0; -// -// // decrease timestep counter preventing rapid updates -//// if (neuron->syn_state[syn_ind].update_ready > 0){ -//// io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); -// neuron->syn_state[syn_ind].update_ready -= 1; -//// } -//// else{ -//// io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); -//// } -// -// } -// -// return neuron->V_membrane; -//} -// -//void neuron_model_has_spiked(neuron_pointer_t neuron) { -// -// // reset membrane voltage -// neuron->V_membrane = neuron->V_reset; -// -// // reset refractory timer -// neuron->refract_timer = neuron->T_refract; -//} -// -//state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { -// return neuron->V_membrane; -//} -// -//void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { -// log_debug("V membrane = %11.4k mv", neuron->V_membrane); -//} -// -//void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { -//// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); -//// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); -//// -//// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); -//// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); -//// -//// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); -//// -//// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -//// -//// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); -//// -//// io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); -//// -//// io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); -// -//// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -//// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); -//// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", -//// neuron->time_to_spike_ticks); -// -//// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -//// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -//// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -//// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -////// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -//// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); -//} diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index d3257b66990..505aca03c1a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -1,21 +1,17 @@ -#ifndef _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ -#define _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ +#ifndef _NEURON_MODEL_LIF_CURR_LEFT_RIGHT_READOUT_IMPL_H_ +#define _NEURON_MODEL_LIF_CURR_LEFT_RIGHT_READOUT_IMPL_H_ #include "neuron_model.h" #include "random.h" #define SYNAPSES_PER_NEURON 250 -//extern uint32_t time; extern REAL learning_signal; - typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply REAL z_bar_inp; REAL z_bar; // low-pass filtered spike train -// REAL el_a; // adaptive component of eligibility vector -// REAL e_bar; // low-pass filtered eligibility trace int32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; @@ -49,25 +45,15 @@ typedef struct neuron_params_t { // The time step in milliseconds REAL time_step; - - // Poisson compartment params -// REAL mean_isi_ticks; -// REAL time_to_spike_ticks; -// -// int32_t time_since_last_spike; -// REAL rate_at_last_setting; -// REAL rate_update_threshold; - REAL L; // learning signal REAL w_fb; // feedback weight uint32_t window_size; // globals here - mars_kiss64_seed_t kiss_seed; // array of 4 values (?) + mars_kiss64_seed_t kiss_seed; // array of 4 values REAL ticks_per_second; REAL readout_V_0; REAL readout_V_1; -// REAL prob_command; REAL rate_on; REAL rate_off; REAL mean_0; @@ -78,25 +64,8 @@ typedef struct neuron_params_t { REAL eta; uint32_t number_of_cues; - // array of synaptic states - peak fan-in of >250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; - - - // Poisson compartment params -// REAL mean_isi_ticks; -// REAL time_to_spike_ticks; -// -// int32_t time_since_last_spike; -// REAL rate_at_last_setting; -// REAL rate_update_threshold; - - -// // Should be in global params -// mars_kiss64_seed_t spike_source_seed; // array of 4 values -//// UFRACT seconds_per_tick; -// REAL ticks_per_second; - }; ///////////////////////////////////////////////////////////// @@ -128,25 +97,15 @@ typedef struct neuron_t { // refractory time of neuron [timesteps] int32_t T_refract; - - // Poisson compartment params -// REAL mean_isi_ticks; -// REAL time_to_spike_ticks; -// -// int32_t time_since_last_spike; -// REAL rate_at_last_setting; -// REAL rate_update_threshold; - REAL L; // learning signal REAL w_fb; // feedback weight uint32_t window_size; // former globals - mars_kiss64_seed_t kiss_seed; // array of 4 values (?) + mars_kiss64_seed_t kiss_seed; // array of 4 values REAL ticks_per_second; REAL readout_V_0; REAL readout_V_1; -// REAL prob_command; REAL rate_on; REAL rate_off; REAL mean_0; @@ -160,39 +119,8 @@ typedef struct neuron_t { // array of synaptic states - peak fan-in of >250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; - // Poisson compartment params -// REAL mean_isi_ticks; -// REAL time_to_spike_ticks; -// -// int32_t time_since_last_spike; -// REAL rate_at_last_setting; -// REAL rate_update_threshold; - - -// // Should be in global params -// mars_kiss64_seed_t spike_source_seed; // array of 4 values -//// UFRACT seconds_per_tick; -// REAL ticks_per_second; - } neuron_t; -//typedef struct global_neuron_params_t { -// mars_kiss64_seed_t kiss_seed; // array of 4 values -// REAL ticks_per_second; -// REAL readout_V_0; -// REAL readout_V_1; -//// REAL prob_command; -// REAL rate_on; -// REAL rate_off; -// REAL mean_0; -// REAL mean_1; -// REAL cross_entropy; -// uint32_t p_key; -// uint32_t p_pop_size; -// REAL eta; -// uint32_t number_of_cues; -//} global_neuron_params_t; - //! \brief Performs a ceil operation on an accum //! \param[in] value The value to ceil //! \return The ceil of the value @@ -226,8 +154,9 @@ static inline void neuron_model_initialise( // former globals for (uint32_t n_seed = 0; n_seed < 4; n_seed++) { - state->kiss_seed[n_seed] = params->kiss_seed[n_seed]; // array of 4 values (?) + state->kiss_seed[n_seed] = params->kiss_seed[n_seed]; // array of 4 values } + state->ticks_per_second = params->ticks_per_second; state->readout_V_0 = params->readout_V_0; state->readout_V_1 = params->readout_V_1; @@ -241,7 +170,6 @@ static inline void neuron_model_initialise( state->p_pop_size = params->p_pop_size; state->eta = params->eta; state->number_of_cues = params->number_of_cues; -// local_eta = params->eta; // log_info("Check p_key %u p_pop_size %u", params->p_key, params->p_pop_size); // log_info("Check number_of_cues %u eta %k", params->number_of_cues, params->eta); @@ -252,7 +180,6 @@ static inline void neuron_model_initialise( for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; } - } static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { @@ -278,29 +205,6 @@ static inline void lif_neuron_closed_form( neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); } -//void neuron_model_set_global_neuron_params( -// global_neuron_params_pointer_t params) { -// use(params); -// -// local_eta = params->eta; -// -//// io_printf(IO_BUF, "local eta = %k\n", local_eta); -//// io_printf(IO_BUF, "readout_V_0 = %k\n", params->readout_V_0); -//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -//// io_printf(IO_BUF, "rate_on = %k\n", params->rate_on); -//// io_printf(IO_BUF, "rate_off = %k\n", params->rate_off); -//// io_printf(IO_BUF, "mean_0 = %k\n", params->mean_0); -//// io_printf(IO_BUF, "mean_1 = %k\n", params->mean_1); -//// io_printf(IO_BUF, "cross_entropy = %k\n", params->cross_entropy); -//// io_printf(IO_BUF, "p_key = %u\n", params->p_key); -//// io_printf(IO_BUF, "p_pop_size = %u\n", params->p_pop_size); -//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -//// io_printf(IO_BUF, "readout_V_1 = %k\n", params->readout_V_1); -//// io_printf(IO_BUF, "local eta = %k\n", params->); -// -// // Does Nothing - no params -//} - state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, @@ -309,23 +213,10 @@ state_t neuron_model_state_update( log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); -// io_printf(IO_BUF, "Exc 1: %12.6k, Exc 2: %12.6k - ", exc_input[0], exc_input[1]); -// io_printf(IO_BUF, "Inh 1: %12.6k, Inh 2: %12.6k - %u\n", inh_input[0], inh_input[1], time); use(B_t); // If outside of the refractory period if (neuron->refract_timer <= 0) { -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// total_exc += exc_input[0]; -// total_inh += inh_input[0]; -// for (int i=0; i < num_excitatory_inputs; i++){ -// total_exc += exc_input[i]; -// } -// for (int i=0; i< num_inhibitory_inputs; i++){ -// total_inh += inh_input[i]; -// } // Get the input in nA input_t input_this_timestep = exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; @@ -340,27 +231,10 @@ state_t neuron_model_state_update( uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? -// if(learning_signal){ -// io_printf(IO_BUF, "learning signal = %k\n", learning_signal); -// } -// if (neuron->V_membrane > 10.k){ -// v_mem_error = neuron->V_membrane - 10.k; -//// io_printf(IO_BUF, "> %k = %k - %k\n", v_mem_error, neuron->V_membrane, neuron->B); -// } -// else if (neuron->V_membrane < -10.k){ -// v_mem_error = neuron->V_membrane + 10.k; -//// io_printf(IO_BUF, "< %k = %k - %k\n", v_mem_error, -neuron->V_membrane, neuron->B); -// } -// else{ -// v_mem_error = 0.k; -// } -// learning_signal += v_mem_error * 0.1; - neuron->L = learning_signal * neuron->w_fb; //* ((accum)syn_ind * -1.k); REAL local_eta = neuron->eta; -// REAL tau_decay = expk(-1.k / 1500.k); - // All operations now need doing once per eprop synapse + // All subsequent operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ // ****************************************************************** // Low-pass filter incoming spike train @@ -369,62 +243,19 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - - // ****************************************************************** - // Update eligibility vector - // ****************************************************************** -// neuron->syn_state[syn_ind].el_a = -// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -// (rho - neuron->psi * neuron->beta) * -// neuron->syn_state[syn_ind].el_a; - - - // ****************************************************************** - // Update eligibility trace - // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); -// -// neuron->syn_state[syn_ind].e_bar = -// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -// + (1 - neuron->exp_TC) * temp_elig_trace; - // ****************************************************************** // Update cached total weight change // ****************************************************************** - REAL this_dt_weight_change = -// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; -// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -// "L = %k = %k * %k = l * w_fb\n" -// "this dw = %k \t tot dw %k\n" -// , -// total_synapses_per_neuron, -// syn_ind, -// neuron->syn_state[syn_ind].z_bar_inp, -// neuron->syn_state[syn_ind].z_bar, -// time, -// neuron->L, learning_signal, neuron -> w_fb, -// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -// ); -// } + // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; // decrease timestep counter preventing rapid updates -// if (neuron->syn_state[syn_ind].update_ready > 0){ -// io_printf(IO_BUF, "lr reducing %u -- update:%u\n", syn_ind, neuron->syn_state[syn_ind].update_ready - 1); neuron->syn_state[syn_ind].update_ready -= 1; -// } -// else{ -// io_printf(IO_BUF, "lr not reducing %u\n", syn_ind); -// } - } return neuron->V_membrane; @@ -448,33 +279,16 @@ void neuron_model_print_state_variables(const neuron_t *neuron) { } void neuron_model_print_parameters(const neuron_t *neuron) { -// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); -// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); -// -// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); -// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); -// -// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); -// -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// -// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); -// -// io_printf(IO_BUF, "feedback w = %k n/a\n", neuron->w_fb); -// -// io_printf(IO_BUF, "window size = %u n/a\n", neuron->window_size); - -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); -// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", -// neuron->time_to_spike_ticks); - -// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); + log_debug("V reset = %11.4k mv\n", neuron->V_reset); + log_debug("V rest = %11.4k mv\n", neuron->V_rest); + log_debug("I offset = %11.4k nA\n", neuron->I_offset); + log_debug("R membrane = %11.4k Mohm\n", neuron->R_membrane); + log_debug("exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); + log_debug("T refract = %u timesteps\n", neuron->T_refract); + log_debug("learning = %k n/a\n", neuron->L); + log_debug("feedback w = %k n/a\n", neuron->w_fb); + log_debug("window size = %u n/a\n", neuron->window_size); + log_debug("T refract = %u timesteps\n", neuron->T_refract); } -#endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ +#endif // _NEURON_MODEL_LIF_CURR_LEFT_RIGHT_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c deleted file mode 100644 index 2e3eacfbd1d..00000000000 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.c +++ /dev/null @@ -1,178 +0,0 @@ -//#include "neuron_model_sinusoid_readout_impl.h" -// -//#include -// -//extern uint32_t time; -//extern REAL learning_signal; -//REAL local_eta; -// -//// simple Leaky I&F ODE -//static inline void _lif_neuron_closed_form( -// neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { -// -// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; -// -// // update membrane voltage -// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); -//} -// -//void neuron_model_set_global_neuron_params( -// global_neuron_params_pointer_t params) { -// use(params); -// -// local_eta = params->eta; -// io_printf(IO_BUF, "local eta = %k\n", local_eta); -// -// // Does Nothing - no params -//} -// -//state_t neuron_model_state_update( -// uint16_t num_excitatory_inputs, input_t* exc_input, -// uint16_t num_inhibitory_inputs, input_t* inh_input, -// input_t external_bias, neuron_pointer_t neuron, REAL dummy) { -// -// log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); -// log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); -// use(dummy); -// -// // If outside of the refractory period -// if (neuron->refract_timer <= 0) { -//// REAL total_exc = 0; -//// REAL total_inh = 0; -//// -//// total_exc += exc_input[0]; -//// total_inh += inh_input[0]; -//// for (int i=0; i < num_excitatory_inputs; i++){ -//// total_exc += exc_input[i]; -//// } -//// for (int i=0; i< num_inhibitory_inputs; i++){ -//// total_inh += inh_input[i]; -//// } -// // Get the input in nA -// input_t input_this_timestep = -// exc_input[0] + exc_input[1] + neuron->I_offset; -// -// _lif_neuron_closed_form( -// neuron, neuron->V_membrane, input_this_timestep); -// } else { -// -// // countdown refractory timer -// neuron->refract_timer -= 1; -// } -// -// uint32_t total_synapses_per_neuron = 100; //todo should this be fixed? -// -// neuron->L = learning_signal * neuron->w_fb; -// -// // All operations now need doing once per eprop synapse -// for (uint32_t syn_ind=0; syn_ind < total_synapses_per_neuron; syn_ind++){ -// // ****************************************************************** -// // Low-pass filter incoming spike train -// // ****************************************************************** -// neuron->syn_state[syn_ind].z_bar = -// neuron->syn_state[syn_ind].z_bar * neuron->exp_TC -//// + (1 - neuron->exp_TC) * -// + -// neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update -// -// -// // ****************************************************************** -// // Update eligibility vector -// // ****************************************************************** -//// neuron->syn_state[syn_ind].el_a = -//// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -//// (rho - neuron->psi * neuron->beta) * -//// neuron->syn_state[syn_ind].el_a; -// -// -// // ****************************************************************** -// // Update eligibility trace -// // ****************************************************************** -//// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -//// neuron->beta * neuron->syn_state[syn_ind].el_a); -//// -//// neuron->syn_state[syn_ind].e_bar = -//// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -//// + (1 - neuron->exp_TC) * temp_elig_trace; -// -// // ****************************************************************** -// // Update cached total weight change -// // ****************************************************************** -// REAL this_dt_weight_change = -//// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; -// local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; -// -// neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; -//// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -//// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -//// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -//// "L = %k = %k * %k = l * w_fb\n" -//// "this dw = %k \t tot dw %k\n" -//// , -//// total_synapses_per_neuron, -//// syn_ind, -//// neuron->syn_state[syn_ind].z_bar_inp, -//// neuron->syn_state[syn_ind].z_bar, -//// time, -//// neuron->L, learning_signal, neuron -> w_fb, -//// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -//// ); -//// } -// // reset input (can't have more than one spike per timestep -// neuron->syn_state[syn_ind].z_bar_inp = 0; -// -// -// // decrease timestep counter preventing rapid updates -// if (neuron->syn_state[syn_ind].update_ready > 0){ -// neuron->syn_state[syn_ind].update_ready -= 1; -// } -// -// } -// -// return neuron->V_membrane; -//} -// -//void neuron_model_has_spiked(neuron_pointer_t neuron) { -// -// // reset membrane voltage -// neuron->V_membrane = neuron->V_reset; -// -// // reset refractory timer -// neuron->refract_timer = neuron->T_refract; -//} -// -//state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { -// return neuron->V_membrane; -//} -// -//void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { -// log_debug("V membrane = %11.4k mv", neuron->V_membrane); -//} -// -//void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { -// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); -// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); -// -// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); -// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); -// -// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); -// -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// -// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); -// -// io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); -// -//// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -//// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); -//// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", -//// neuron->time_to_spike_ticks); -// -//// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -//// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -//// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -//// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -////// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -//// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); -//} diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index a9775f7bb7d..b6907bb7c1b 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -7,17 +7,12 @@ #define SYNAPSES_PER_NEURON 250 -//extern uint32_t time; extern REAL learning_signal; -//extern REAL local_eta; - typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply REAL z_bar_inp; REAL z_bar; // low-pass filtered spike train -// REAL el_a; // adaptive component of eligibility vector -// REAL e_bar; // low-pass filtered eligibility trace uint32_t update_ready; // counter to enable batch update (i.e. don't perform on every spike). }eprop_syn_state_t; @@ -51,8 +46,6 @@ struct neuron_params_t { // The time step in milliseconds REAL time_step; - // TODO: double-check that everything above this point is needed - REAL L; // learning signal REAL w_fb; // feedback weight @@ -102,29 +95,8 @@ typedef struct neuron_t { // array of synaptic states - peak fan-in of >250 for this case eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; - // Poisson compartment params -// REAL mean_isi_ticks; -// REAL time_to_spike_ticks; -// -// int32_t time_since_last_spike; -// REAL rate_at_last_setting; -// REAL rate_update_threshold; - -// // Should be in global params -// mars_kiss64_seed_t spike_source_seed; // array of 4 values -//// UFRACT seconds_per_tick; -// REAL ticks_per_second; - } neuron_t; -//typedef struct global_neuron_params_t { -//// mars_kiss64_seed_t spike_source_seed; // array of 4 values -//// REAL ticks_per_second; -//// REAL readout_V; -// REAL target_V[1024]; -// REAL eta; -//} global_neuron_params_t; - //! \brief Performs a ceil operation on an accum //! \param[in] value The value to ceil //! \return The ceil of the value @@ -158,16 +130,13 @@ static inline void neuron_model_initialise( state->target_V[n_v] = params->target_V[n_v]; } state->eta = params->eta; -// local_eta = params->eta; for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; } - } static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { - // TODO: probably more parameters need copying across at this point, syn_state for a start params->V_init = state->V_membrane; params->refract_timer_init = state->refract_timer; params->L = state->L; @@ -188,16 +157,6 @@ static inline void lif_neuron_closed_form( neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); } -//void neuron_model_set_global_neuron_params( -// global_neuron_params_pointer_t params) { -// use(params); -// -// local_eta = params->eta; -// io_printf(IO_BUF, "local eta = %k\n", local_eta); -// -// // Does Nothing - no params -//} - state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, @@ -210,17 +169,6 @@ state_t neuron_model_state_update( // If outside of the refractory period if (neuron->refract_timer <= 0) { -// REAL total_exc = 0; -// REAL total_inh = 0; -// -// total_exc += exc_input[0]; -// total_inh += inh_input[0]; -// for (int i=0; i < num_excitatory_inputs; i++){ -// total_exc += exc_input[i]; -// } -// for (int i=0; i< num_inhibitory_inputs; i++){ -// total_inh += inh_input[i]; -// } // Get the input in nA input_t input_this_timestep = exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; @@ -249,58 +197,21 @@ state_t neuron_model_state_update( + neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update - - // ****************************************************************** - // Update eligibility vector - // ****************************************************************** -// neuron->syn_state[syn_ind].el_a = -// (neuron->psi * neuron->syn_state[syn_ind].z_bar) + -// (rho - neuron->psi * neuron->beta) * -// neuron->syn_state[syn_ind].el_a; - - - // ****************************************************************** - // Update eligibility trace - // ****************************************************************** -// REAL temp_elig_trace = neuron->psi * (neuron->syn_state[syn_ind].z_bar - -// neuron->beta * neuron->syn_state[syn_ind].el_a); -// -// neuron->syn_state[syn_ind].e_bar = -// neuron->exp_TC * neuron->syn_state[syn_ind].e_bar -// + (1 - neuron->exp_TC) * temp_elig_trace; - // ****************************************************************** // Update cached total weight change // ****************************************************************** REAL this_dt_weight_change = -// -local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; local_eta * neuron->L * neuron->syn_state[syn_ind].z_bar; neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; -// if (!syn_ind || neuron->syn_state[syn_ind].z_bar){// || neuron->syn_state[syn_ind].z_bar_inp){ -// io_printf(IO_BUF, "total synapses = %u \t syn_ind = %u \t " -// "z_bar_inp = %k \t z_bar = %k \t time:%u\n" -// "L = %k = %k * %k = l * w_fb\n" -// "this dw = %k \t tot dw %k\n" -// , -// total_synapses_per_neuron, -// syn_ind, -// neuron->syn_state[syn_ind].z_bar_inp, -// neuron->syn_state[syn_ind].z_bar, -// time, -// neuron->L, learning_signal, neuron -> w_fb, -// this_dt_weight_change, neuron->syn_state[syn_ind].delta_w -// ); -// } + // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; - // decrease timestep counter preventing rapid updates if (neuron->syn_state[syn_ind].update_ready > 0){ neuron->syn_state[syn_ind].update_ready -= 1; } - } return neuron->V_membrane; @@ -324,31 +235,15 @@ void neuron_model_print_state_variables(const neuron_t *neuron) { } void neuron_model_print_parameters(const neuron_t *neuron) { -// io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); -// io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); -// -// io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); -// io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); -// -// io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); -// -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// -// io_printf(IO_BUF, "learning = %k n/a\n", neuron->L); -// -// io_printf(IO_BUF, "feedback w = %k n/a\n\n", neuron->w_fb); -// -// io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); -// io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); -// io_printf(IO_BUF, "time_to_spike_ticks = %k \n", -// neuron->time_to_spike_ticks); - -// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); + log_debug("V reset = %11.4k mv\n", neuron->V_reset); + log_debug("V rest = %11.4k mv\n", neuron->V_rest); + log_debug("I offset = %11.4k nA\n", neuron->I_offset); + log_debug("R membrane = %11.4k Mohm\n", neuron->R_membrane); + log_debug("exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); + log_debug("T refract = %u timesteps\n", neuron->T_refract); + log_debug("learning = %k n/a\n", neuron->L); + log_debug("feedback w = %k n/a\n\n", neuron->w_fb); + log_debug("T refract = %u timesteps\n", neuron->T_refract); } #endif // _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ From 48fd0ccc8669376d7dd8825936a48fc80717ed63 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 25 May 2023 12:02:59 +0100 Subject: [PATCH 098/123] Fixes for incremental_learning script and tidying up --- .../neuron_impl_eprop_adaptive.h | 26 ++++++-- .../neuron_impl_left_right_readout.h | 63 +++++++------------ .../models/neuron_model_eprop_adaptive_impl.h | 52 ++++++++------- .../neuron_model_left_right_readout_impl.h | 13 ++-- .../synapse_dynamics_eprop_adaptive_impl.c | 4 +- .../poisson/spike_source_poisson.c | 7 ++- .../neuron_model_left_right_readout.py | 8 +-- 7 files changed, 82 insertions(+), 91 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 1e4e9cb302d..e9f9cfe07bb 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -224,6 +224,16 @@ static void neuron_impl_load_neuron_parameters( spin1_memcpy(save_initial_state, address, next * sizeof(uint32_t)); } + log_info("neuron_impl_load_neuron_parameters n_neurons %u", n_neurons); + log_info("local_eta %k", neuron_array[0].eta); + log_info("core pop rate %k", neuron_array[0].core_pop_rate); + log_info("core target rate %k", neuron_array[0].core_target_rate); + log_info("rate exp TC %k", neuron_array[0].rate_exp_TC); + for (index_t n = 0; n < n_neurons; n++) { + log_info("neuron index %u voltage %k", n, neuron_array[n].V_membrane); + log_info("neuron index %u core_pop_rate %k", n, neuron_array[n].core_pop_rate); + } + #if LOG_LEVEL >= LOG_DEBUG log_debug("-------------------------------------\n"); for (index_t n = 0; n < n_neurons; n++) { @@ -237,18 +247,24 @@ static void neuron_impl_load_neuron_parameters( static void neuron_impl_do_timestep_update( uint32_t timer_count, uint32_t time, uint32_t n_neurons) { + // Decay the "global" rate trace (done once per timestep) + for (uint32_t n_ind=0; n_ind < n_neurons; n_ind++) { + neuron_t *global_neuron = &neuron_array[n_ind]; + global_neuron->core_pop_rate = + global_neuron->core_pop_rate * global_neuron->rate_exp_TC; + } + for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; - // Decay the "global" rate trace - neuron->core_pop_rate = neuron->core_pop_rate * neuron->rate_exp_TC; - // Get the input_type parameters and voltage for this neuron input_type_t *input_type = &input_type_array[neuron_index]; // Get threshold and additional input parameters for this neuron + // TODO: for some reason threshold is incorporated into neuron_model + // for this neuron; investigate if it can be separated // threshold_type_pointer_t threshold_type = // &threshold_type_array[neuron_index]; additional_input_t *additional_input = @@ -256,7 +272,7 @@ static void neuron_impl_do_timestep_update( synapse_types_t *synapse_type = &synapse_types_array[neuron_index]; - // This would be where a steps per timestep loop begins if desired + // TODO: This would be where a steps per timestep loop begins if desired // Get the voltage state_t voltage = neuron_model_get_membrane_voltage(neuron); @@ -430,7 +446,7 @@ static void neuron_impl_store_neuron_parameters( } log_debug("****** STORING COMPLETE ******"); - log_debug("neuron 0 'global' parameters, core_target_rate, core_pop_rate %k %k", + log_info("neuron 0 'global' parameters, core_target_rate, core_pop_rate %k %k", &neuron_array[0].core_target_rate, &neuron_array[0].core_pop_rate); } diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 56d0bfa51bd..572f9f074eb 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -90,6 +90,9 @@ typedef enum } left_right_state_t; // Left right parameters +// TODO: should these be set as parameters elsewhere or remain as constants +// that are just in the C code? +// (Conversely, should any current parameters become constants?) left_right_state_t current_state = 0; uint32_t current_time = 0; uint32_t cue_number = 0; @@ -100,8 +103,8 @@ uint32_t duration_of_cue = 100; // ms uint32_t wait_before_result = 1000; // ms but should be a range between 500-1500 uint32_t prompt_duration = 150; //ms bool start_prompt = false; -accum softmax_0 = 0k; -accum softmax_1 = 0k; +REAL softmax_0 = 0k; +REAL softmax_1 = 0k; bool completed_broadcast = true; @@ -258,7 +261,6 @@ static void neuron_impl_do_timestep_update( // Get the neuron itself neuron_t *neuron = &neuron_array[neuron_index]; -// bool spike = false; // Get the input_type parameters and voltage for this neuron input_type_t *input_type = &input_type_array[neuron_index]; @@ -342,11 +344,6 @@ static void neuron_impl_do_timestep_update( // This sends a "completed" signal send_spike_mc_payload( neuron_keys[neuron_index], bitsk(neuron->cross_entropy)); -// while (!spin1_send_mc_packet( -// neuron_keys[neuron_index], -// bitsk(neuron->cross_entropy), 1)) { -// spin1_delay_us(1); -// } } } @@ -362,11 +359,16 @@ static void neuron_impl_do_timestep_update( if ((time - current_time) % (wait_between_cues + duration_of_cue) == wait_between_cues){ // pick new value and broadcast -// REAL random_value = kdivui( -// (REAL)(mars_kiss64_seed(neuron->kiss_seed)), UINT32_MAX); // 0-1 - REAL random_value = ( - (REAL)mars_kiss64_seed(neuron->kiss_seed) / (REAL)UINT32_MAX); // 0-1 - if (random_value < 0.5k) { +// REAL random_value = ( +// REAL)(mars_kiss64_seed(neuron->kiss_seed) / (REAL)0xffffffff); // 0-1 + // The above does not actually give a REAL between 0 and 1 + // since (REAL)0xffffffff = -1.0k; however it's pretty much + // what we're looking for as it converts an uint32_t to a + // (signed) REAL, so the test just needs to be positive vs + // negative, removing the need for any dividing + REAL random_value = kbits( + mars_kiss64_seed(neuron->kiss_seed)); + if (random_value < ZERO) { current_cue_direction = 0; } else{ @@ -378,8 +380,6 @@ static void neuron_impl_do_timestep_update( for (int j = current_cue_direction*neuron->p_pop_size; j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++){ send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); -// spin1_send_mc_packet( -// neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); } } } @@ -392,8 +392,6 @@ static void neuron_impl_do_timestep_update( for (int j = current_cue_direction*neuron->p_pop_size; j < current_cue_direction*neuron->p_pop_size + neuron->p_pop_size; j++) { send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); -// spin1_send_mc_packet( -// neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); } if (cue_number >= neuron->number_of_cues) { current_state = (current_state + 1) % 3; @@ -422,8 +420,6 @@ static void neuron_impl_do_timestep_update( for (int j = 2*neuron->p_pop_size; j < 2*neuron->p_pop_size + neuron->p_pop_size; j++){ send_spike_mc_payload(neuron->p_key | j, bitsk(payload)); -// spin1_send_mc_packet( -// neuron->p_key | j, bitsk(payload), WITH_PAYLOAD); } } } @@ -432,10 +428,9 @@ static void neuron_impl_do_timestep_update( if (neuron_index == 2) { // Switched to always broadcasting error but with packet start_prompt = false; - accum exp_0 = expk(neuron->readout_V_0);// * 0.1k); - accum exp_1 = expk(neuron->readout_V_1);// * 0.1k); + REAL exp_0 = expk(neuron->readout_V_0);// * 0.1k); + REAL exp_1 = expk(neuron->readout_V_1);// * 0.1k); - // TODO: I'm not sure how an exponential can be zero? // Set up softmax calculation if (exp_0 == 0k && exp_1 == 0k) { if (neuron->readout_V_0 > neuron->readout_V_1) { @@ -448,9 +443,11 @@ static void neuron_impl_do_timestep_update( } } else { + softmax_0 = exp_0 / (exp_1 + exp_0); + softmax_1 = exp_1 / (exp_1 + exp_0); // These divides are okay in kdivk because exp is always positive - softmax_0 = kdivk(exp_0, (exp_1 + exp_0)); - softmax_1 = kdivk(exp_1, (exp_1 + exp_0)); +// softmax_0 = kdivk(exp_0, (exp_1 + exp_0)); +// softmax_1 = kdivk(exp_1, (exp_1 + exp_0)); } // What to do if log(0)? @@ -461,7 +458,6 @@ static void neuron_impl_do_timestep_update( glob_neuron->cross_entropy = -logk(softmax_1); } learning_signal = softmax_0; -// is_it_right = 1; } else{ for (uint32_t glob_n = 0; glob_n < n_neurons; glob_n++) { @@ -470,14 +466,9 @@ static void neuron_impl_do_timestep_update( glob_neuron->cross_entropy = -logk(softmax_0); } learning_signal = softmax_0 - 1.k; -// is_it_right = 0; } if (use_key) { send_spike_mc_payload(neuron_keys[neuron_index], bitsk(learning_signal)); -// while (!spin1_send_mc_packet( -// neuron_keys[neuron_index], bitsk(learning_signal), 1 )) { -// spin1_delay_us(1); -// } } } @@ -491,8 +482,6 @@ static void neuron_impl_do_timestep_update( for (int j = 2*neuron->p_pop_size; j < 2*neuron->p_pop_size + neuron->p_pop_size; j++){ send_spike_mc_payload(neuron->p_key | j, payload); -// spin1_send_mc_packet( -// neuron->p_key | j, payload, WITH_PAYLOAD); } } } @@ -521,16 +510,6 @@ static void neuron_impl_do_timestep_update( neuron->syn_state[0].delta_w); } - // This model doesn't spike so this can be commented out -// if (spike) { -// // Call relevant model-based functions -// // Tell the neuron model -// // neuron_model_has_spiked(neuron); -// -// // Tell the additional input -// additional_input_has_spiked(additional_input); -// } - // Shape the existing input according to the included rule synapse_types_shape_input(synapse_type); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 1641472517d..ff6cc5426c0 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -27,7 +27,7 @@ static bool printed_value = false; extern REAL learning_signal; extern uint32_t time; // this is probably unnecessary -extern uint32_t syn_dynamics_neurons_in_partition; +extern uint32_t neuron_impl_neurons_in_partition; typedef struct eprop_syn_state_t { REAL delta_w; // weight change to apply @@ -99,7 +99,6 @@ struct neuron_params_t { eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; }; - //! eprop neuron state struct neuron_t { // membrane voltage [mV] @@ -162,8 +161,7 @@ struct neuron_t { eprop_syn_state_t syn_state[SYNAPSES_PER_NEURON]; }; - -// TODO: use the threshold type for this instead +// TODO: Can we use the threshold type for this instead? static inline void threshold_type_update_threshold(state_t z, neuron_t *threshold_type){ @@ -203,9 +201,9 @@ static inline void neuron_model_initialise( state->V_reset = params->V_reset; state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); -// log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract_ms %k T_refract %d", -// state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, -// state->refract_timer, state->V_reset, params->T_refract_ms, state->T_refract); + log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract_ms %k T_refract %d", + state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, + state->refract_timer, state->V_reset, params->T_refract_ms, state->T_refract); // for everything else just copy across for now state->z = params->z; @@ -214,25 +212,26 @@ static inline void neuron_model_initialise( state->B = params->B; state->b = params->b; state->b_0 = params->b_0; - state->e_to_dt_on_tau_a = expk(-kdivk(ts, params->tau_a)); + REAL exp_tau_a = expk(-kdivk(ts, params->tau_a)); + state->e_to_dt_on_tau_a = (UFRACT) exp_tau_a; state->beta = params->beta; - state->adpt = 1 - expk(-kdivk(ts, params->tau_a)); + state->adpt = (UFRACT) (1.0k - exp_tau_a); state->scalar = params->scalar; state->L = params->L; state->w_fb = params->w_fb; state->window_size = params->window_size; state->number_of_cues = params->number_of_cues; -// log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", -// state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); + log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", + state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); state->core_pop_rate = params->pop_rate; state->core_target_rate = params->target_rate; state->rate_exp_TC = expk(-kdivk(ts, params->tau_err)); state->eta = params->eta; -// log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", -// state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); + log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", + state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; @@ -265,20 +264,20 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par } // simple Leaky I&F ODE -static inline void lif_neuron_closed_form( +static inline void lif_eprop_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep, REAL B_t) { REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; // update membrane voltage neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) - - neuron->z * B_t; // this line achieves reset (?) + - neuron->z * B_t; // this line achieves reset (Comment not needed?) } state_t neuron_model_state_update( uint16_t num_excitatory_inputs, input_t* exc_input, uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, REAL current_offset, neuron_t *restrict neuron, // this has a *restrict on it in LIF? + input_t external_bias, REAL current_offset, neuron_t *restrict neuron, REAL B_t) { log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); @@ -288,13 +287,13 @@ state_t neuron_model_state_update( input_t input_this_timestep = exc_input[0] + exc_input[1] + neuron->I_offset + external_bias + current_offset; - lif_neuron_closed_form( + lif_eprop_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep, B_t); // If outside of the refractory period if (neuron->refract_timer <= 0) { // Allow spiking again - neuron->A = 1; + neuron->A = 1.0k; } else { // Neuron cannot fire, as neuron->A=0; // countdown refractory timer @@ -308,7 +307,7 @@ state_t neuron_model_state_update( REAL psi_temp2 = ((absk(psi_temp1))); neuron->psi = ((1.0k - psi_temp2) > 0.0k)? (1.0k/neuron->b_0) * - 0.3k * //todo why is this commented? + 0.3k * (1.0k - psi_temp2) : 0.0k; neuron->psi *= neuron->A; @@ -319,8 +318,7 @@ state_t neuron_model_state_update( uint32_t total_recurrent_synapses_per_neuron = 0; //todo should this be fixed? uint32_t recurrent_offset = 100; - // TODO: check if this has already been calculated above... - REAL rho = neuron->e_to_dt_on_tau_a; // decay_s1615(1.k, neuron->e_to_dt_on_tau_a); + REAL rho = decay_s1615(1.0k, neuron->e_to_dt_on_tau_a); // TODO: Is there a better way of doing this? REAL accum_time = (accum)(time%neuron->window_size) * 0.001k; @@ -328,8 +326,8 @@ state_t neuron_model_state_update( accum_time += 1.k; } + // Calculate the membrane error REAL v_mem_error; - if (neuron->V_membrane > neuron->B){ v_mem_error = neuron->V_membrane - neuron->B; } @@ -345,7 +343,7 @@ state_t neuron_model_state_update( // / ((accum)(time%1300) // / (1.225k // 00000!!!!! / (accum_time - * (accum)syn_dynamics_neurons_in_partition)) + * (accum)neuron_impl_neurons_in_partition)) - neuron->core_target_rate; // hardcoded reset @@ -353,14 +351,13 @@ state_t neuron_model_state_update( printed_value = true; } if (time % neuron->window_size == 0){ - // TODO: does this need editing to be done for all neurons? -// global_parameters->core_pop_rate = 0.k; printed_value = false; } // Calculate new learning signal REAL new_learning_signal = (learning_signal * neuron->w_fb) + v_mem_error; + // TODO: magic constants need naming at least (and passing in?) uint32_t test_length = (150*neuron->number_of_cues)+1000+150; if(neuron->number_of_cues == 0) { test_length = neuron->window_size; @@ -378,7 +375,7 @@ state_t neuron_model_state_update( // eta used to be a global parameter, but now just copy from neuron REAL local_eta = neuron->eta; - // Reset parameter check + // Reset relevant parameters ahead of filtering if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ neuron->B = neuron->b_0; neuron->b = 0.k; @@ -491,7 +488,8 @@ state_t neuron_model_state_update( void neuron_model_has_spiked(neuron_t *restrict neuron) { // reset z to zero neuron->z = 0; - // TODO: Not sure this should be commented out + // TODO: Not sure this should be commented out but I think the change + // in the lif_eprop_neuron_closed_form(...) function possibly makes it redundant // neuron->V_membrane = neuron->V_rest; // Set refractory timer neuron->refract_timer = neuron->T_refract - 1; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 505aca03c1a..77617b0142f 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -160,7 +160,6 @@ static inline void neuron_model_initialise( state->ticks_per_second = params->ticks_per_second; state->readout_V_0 = params->readout_V_0; state->readout_V_1 = params->readout_V_1; -// REAL prob_command; state->rate_on = params->rate_on; state->rate_off = params->rate_off; state->mean_0 = params->mean_0; @@ -171,11 +170,11 @@ static inline void neuron_model_initialise( state->eta = params->eta; state->number_of_cues = params->number_of_cues; -// log_info("Check p_key %u p_pop_size %u", params->p_key, params->p_pop_size); -// log_info("Check number_of_cues %u eta %k", params->number_of_cues, params->eta); -// log_info("mean_0 %k mean_1 %k rate_on %k rate_off %k readout_V_0 %k readout_V_1 %k", -// params->mean_0, params->mean_1, params->rate_on, params->rate_off, -// params->readout_V_0, params->readout_V_1); + log_info("Check p_key %u p_pop_size %u", params->p_key, params->p_pop_size); + log_info("Check number_of_cues %u eta %k", params->number_of_cues, params->eta); + log_info("mean_0 %k mean_1 %k rate_on %k rate_off %k readout_V_0 %k readout_V_1 %k", + params->mean_0, params->mean_1, params->rate_on, params->rate_off, + params->readout_V_0, params->readout_V_1); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; @@ -183,7 +182,6 @@ static inline void neuron_model_initialise( } static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *params) { - // TODO: probably more parameters need copying across at this point, syn_state for a start params->V_init = state->V_membrane; params->refract_timer_init = state->refract_timer; params->L = state->L; @@ -194,7 +192,6 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par } } - // simple Leaky I&F ODE static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 140a693ffa0..5fb5a4abb51 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -73,7 +73,7 @@ extern neuron_t *neuron_array; //uint32_t num_plastic_pre_synaptic_events = 0; //uint32_t plastic_saturation_count = 0; -uint32_t syn_dynamics_neurons_in_partition; +extern uint32_t neuron_impl_neurons_in_partition; //--------------------------------------- // Macros @@ -336,7 +336,7 @@ static inline final_state_t eprop_plasticity_update( // Calculate regularisation error // REAL reg_error = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike - REAL reg_error = neuron_array[0].core_target_rate - (neuron_array[0].core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike + REAL reg_error = neuron_array[0].core_target_rate - (neuron_array[0].core_pop_rate / neuron_impl_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike // REAL reg_error = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; diff --git a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c index 6f38130cb7a..501bb2f63c0 100644 --- a/neural_modelling/src/spike_source/poisson/spike_source_poisson.c +++ b/neural_modelling/src/spike_source/poisson/spike_source_poisson.c @@ -364,8 +364,8 @@ void set_spike_source_rate(uint32_t sub_id, unsigned long accum rate) { REAL rate_per_tick = kbits( (__U64(bitsk(rate)) * __U64(bitsulr(ssp_params.seconds_per_tick))) >> 32); - log_debug("Setting rate of %u to %kHz (%k per tick)", - sub_id, rate, rate_per_tick); + log_debug("At time %u setting rate of %u to %k Hz (%k per tick)", + time, sub_id, (accum) rate, rate_per_tick); spike_source_t *spike_source = &source[sub_id]; if (rate_per_tick >= ssp_params.slow_rate_per_tick_cutoff) { @@ -417,7 +417,8 @@ static inline void set_spike_source_details(uint32_t id, bool rate_changed) { log_debug("Source %u is at index %u", id, index); source_details details = source_data[id]->details[index]; if (rate_changed) { - log_debug("Setting rate of %u to %k at %u", id, (s1615) details.rate, time); + log_debug("At time %u setting rate of %u to %k", + time, id, (s1615) details.rate); set_spike_source_rate(id, details.rate); } spike_source_t *p = &(source[id]); diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 151975a200b..642a4fca0f8 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -222,10 +222,10 @@ def add_parameters(self, parameters): parameters[W_FB] = self.__w_fb parameters[WINDOW_SIZE] = self.__window_size # These should probably have defaults earlier than this - parameters[SEED1] = 10065 - parameters[SEED2] = 232 - parameters[SEED3] = 3634 - parameters[SEED4] = 4877 + parameters[SEED1] = 1 # 10065 + parameters[SEED2] = 2 # 232 + parameters[SEED3] = 3 # 3634 + parameters[SEED4] = 4 # 4877 # parameters[PROB_COMMAND] = self._prob_command parameters[RATE_ON] = self.__rate_on From 888e3d636adb73ba9f56352efaf3671c85873499 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 26 May 2023 08:43:14 +0100 Subject: [PATCH 099/123] Turn non-eprop models back on again --- neural_modelling/Makefile | 4 ++-- neural_modelling/makefiles/neuron/Makefile | 4 ++-- .../implementations/neuron_impl_external_devices.h | 2 +- .../src/neuron/implementations/neuron_impl_standard.h | 2 +- neural_modelling/src/neuron/models/neuron_model.h | 4 ++-- .../neuron/models/neuron_model_eprop_adaptive_impl.h | 4 ++-- .../src/neuron/models/neuron_model_izh_impl.h | 10 +++++++++- .../models/neuron_model_left_right_readout_impl.h | 4 ++-- .../src/neuron/models/neuron_model_lif_impl.h | 9 ++++++++- .../neuron/models/neuron_model_sinusoid_readout_impl.h | 4 ++-- .../synapse_dynamics_stdp_izhikevich_neuromodulation.c | 4 ++-- .../plasticity/stdp/synapse_dynamics_stdp_mad_impl.c | 4 ++-- .../synapse_structure_weight_accumulator_impl.h | 4 ++-- .../synapse_structure/synapse_structure_weight_impl.h | 4 ++-- .../synapse_structure_weight_state_accumulator_impl.h | 4 ++-- ...se_structure_weight_state_accumulator_window_impl.h | 4 ++-- .../weight_dependence/weight_additive_one_term_impl.h | 2 +- .../weight_dependence/weight_additive_two_term_impl.h | 2 +- .../weight_dependence/weight_multiplicative_impl.h | 2 +- 19 files changed, 46 insertions(+), 31 deletions(-) diff --git a/neural_modelling/Makefile b/neural_modelling/Makefile index c79e197e95e..07ad399fe0f 100644 --- a/neural_modelling/Makefile +++ b/neural_modelling/Makefile @@ -16,10 +16,10 @@ BUILDS = synapse_expander \ spike_source/poisson \ delay_extension \ robot_motor_control \ + neuron_only \ + synapse_only \ neuron \ local_only_combined -# neuron_only \ - synapse_only \ DIRS = $(BUILDS:%=makefiles/%) diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index dc60011f35e..5c690cf40a7 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -17,7 +17,7 @@ MODELS = eprop_adaptive \ sinusoid_readout \ sinusoid_readout_stdp_mad_eprop_reg \ left_right_readout_stdp_mad_eprop_reg \ - # IF_curr_exp \ + IF_curr_exp \ IF_cond_exp \ IZK_curr_exp \ IZK_cond_exp \ @@ -26,7 +26,7 @@ MODELS = eprop_adaptive \ IF_curr_exp_ca2_adaptive ifneq ($(SPYNNAKER_DEBUG), DEBUG) -# MODELS += external_device_lif_control \ + MODELS += external_device_lif_control \ IF_curr_alpha \ IF_cond_exp_stoc \ IF_curr_exp_sEMD \ diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h b/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h index 5c10b1a927e..a3c65a1dd29 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_external_devices.h @@ -312,7 +312,7 @@ static void neuron_impl_do_timestep_update( state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, - 0, current_offset, this_neuron); + 0, current_offset, this_neuron, 0.0k); // determine if a packet should fly will_fire = _test_will_fire(the_packet_firing); diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index c94bd250277..ce59a02a7b6 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -310,7 +310,7 @@ static void neuron_impl_do_timestep_update( state_t result = neuron_model_state_update( NUM_EXCITATORY_RECEPTORS, exc_input_values, NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, current_offset, this_neuron); + external_bias, current_offset, this_neuron, 0.0k); // determine if a spike should occur bool spike_now = diff --git a/neural_modelling/src/neuron/models/neuron_model.h b/neural_modelling/src/neuron/models/neuron_model.h index f484821996a..47f3bff4e7d 100644 --- a/neural_modelling/src/neuron/models/neuron_model.h +++ b/neural_modelling/src/neuron/models/neuron_model.h @@ -70,8 +70,8 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return state_t which is the value to be compared with a threshold value //! to determine if the neuron has spiked static state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, + uint16_t num_excitatory_inputs, const input_t* exc_input, + uint16_t num_inhibitory_inputs, const input_t* inh_input, input_t external_bias, REAL current_offset, neuron_t *restrict neuron, REAL B_t); diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index ff6cc5426c0..9974183bcde 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -275,8 +275,8 @@ static inline void lif_eprop_neuron_closed_form( } state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, + uint16_t num_excitatory_inputs, const input_t* exc_input, + uint16_t num_inhibitory_inputs, const input_t* inh_input, input_t external_bias, REAL current_offset, neuron_t *restrict neuron, REAL B_t) { diff --git a/neural_modelling/src/neuron/models/neuron_model_izh_impl.h b/neural_modelling/src/neuron/models/neuron_model_izh_impl.h index 5b04806179e..5ffb1edcd91 100644 --- a/neural_modelling/src/neuron/models/neuron_model_izh_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_izh_impl.h @@ -65,6 +65,11 @@ struct neuron_t { REAL reset_h; }; +// Mark a value as possibly unused while not using any instructions, guaranteed +#ifndef __use +#define __use(x) do { (void) (x); } while (0) +#endif + static inline void neuron_model_initialise(neuron_t *state, neuron_params_t *params, uint32_t n_steps_per_timestep) { state->A = params->A; @@ -166,7 +171,10 @@ static inline void rk2_kernel_midpoint( static inline state_t neuron_model_state_update( uint16_t num_excitatory_inputs, const input_t *exc_input, uint16_t num_inhibitory_inputs, const input_t *inh_input, - input_t external_bias, REAL current_offset, neuron_t *restrict neuron) { + input_t external_bias, REAL current_offset, neuron_t *restrict neuron, + REAL B_t) { + __use(B_t); + REAL total_exc = ZERO; REAL total_inh = ZERO; diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 77617b0142f..1d873f7cad8 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -203,8 +203,8 @@ static inline void lif_neuron_closed_form( } state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, + uint16_t num_excitatory_inputs, const input_t* exc_input, + uint16_t num_inhibitory_inputs, const input_t* inh_input, input_t external_bias, REAL current_offset, neuron_t *restrict neuron, REAL B_t) { diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index c44b1a0decc..0e88bb31be6 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -81,6 +81,11 @@ struct neuron_t { int32_t T_refract; }; +// Mark a value as possibly unused while not using any instructions, guaranteed +#ifndef __use +#define __use(x) do { (void) (x); } while (0) +#endif + //! \brief Performs a ceil operation on an accum //! \param[in] value The value to ceil //! \return The ceil of the value @@ -142,7 +147,9 @@ static inline void lif_neuron_closed_form( static inline state_t neuron_model_state_update( uint16_t num_excitatory_inputs, const input_t *exc_input, uint16_t num_inhibitory_inputs, const input_t *inh_input, - input_t external_bias, REAL current_offset, neuron_t *restrict neuron) { + input_t external_bias, REAL current_offset, neuron_t *restrict neuron, + REAL B_t) { + __use(B_t); // If outside of the refractory period if (neuron->refract_timer <= 0) { diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index b6907bb7c1b..4220f6ebbfc 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -158,8 +158,8 @@ static inline void lif_neuron_closed_form( } state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, + uint16_t num_excitatory_inputs, const input_t* exc_input, + uint16_t num_inhibitory_inputs, const input_t* inh_input, input_t external_bias, REAL current_offset, neuron_t *restrict neuron, REAL B_t) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c index 92e083e4767..b6361c91626 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c @@ -104,7 +104,7 @@ static inline nm_final_state_t get_nm_final_state( nm_final_state_t final_state = { .weight=(weight_t) (bitsk(update_state.weight) >> update_state.weight_shift), .final_state=synapse_structure_get_final_state( - update_state.eligibility_state) + update_state.eligibility_state, 0.0k) }; return final_state; } @@ -340,7 +340,7 @@ void synapse_dynamics_print_plastic_synapses( update_state_t update_state = synapse_structure_get_update_state( *plastic_words++, synapse_type); final_state_t final_state = synapse_structure_get_final_state( - update_state); + update_state, 0.0k); weight_t weight = synapse_structure_get_final_weight(final_state); log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index 845c7274b3f..a30f7043aa6 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -99,7 +99,7 @@ static inline final_state_t plasticity_update_synapse( } // Return final synaptic word and weight - return synapse_structure_get_final_state(current_state); + return synapse_structure_get_final_state(current_state, 0.0k); } bool synapse_dynamics_initialise( @@ -150,7 +150,7 @@ void synapse_dynamics_print_plastic_synapses( update_state_t update_state = synapse_structure_get_update_state( *plastic_words++, synapse_type); final_state_t final_state = synapse_structure_get_final_state( - update_state); + update_state, 0.0k); weight_t weight = synapse_structure_get_final_weight(final_state); log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_accumulator_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_accumulator_impl.h index 175cb04fbca..2945a463da0 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_accumulator_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_accumulator_impl.h @@ -66,9 +66,9 @@ static inline update_state_t synapse_structure_get_update_state( //! \param[in] state: the update state //! \return the final state static inline final_state_t synapse_structure_get_final_state( - update_state_t state) { + update_state_t state, REAL reg_error) { // Get weight from state - weight_t weight = weight_get_final(state.weight_state); + weight_t weight = weight_get_final(state.weight_state, reg_error); // Build this into synaptic word along with updated accumulator and state return (final_state_t) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h index c2155e2c885..96fe9775314 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_impl.h @@ -51,8 +51,8 @@ static inline update_state_t synapse_structure_get_update_state( //! \param[in] state: the update state //! \return the final state static inline final_state_t synapse_structure_get_final_state( - update_state_t state) { - return weight_get_final(state); + update_state_t state, REAL reg_error) { + return weight_get_final(state, reg_error); } //--------------------------------------- diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_impl.h index 621a5e0251e..112decda95f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_impl.h @@ -66,9 +66,9 @@ static inline update_state_t synapse_structure_get_update_state( //! \param[in] state: the update state //! \return the final state static inline final_state_t synapse_structure_get_final_state( - update_state_t state) { + update_state_t state, REAL reg_error) { // Get weight from state - weight_t weight = weight_get_final(state.weight_state); + weight_t weight = weight_get_final(state.weight_state, reg_error); // Build this into synaptic word along with updated accumulator and state return (final_state_t) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_window_impl.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_window_impl.h index a13a76b8f7e..7818b9c774a 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_window_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_structure/synapse_structure_weight_state_accumulator_window_impl.h @@ -67,9 +67,9 @@ static inline update_state_t synapse_structure_get_update_state( //! \param[in] state: the update state //! \return the final state static inline final_state_t synapse_structure_get_final_state( - update_state_t state) { + update_state_t state, REAL reg_error) { // Get weight from state - weight_t weight = weight_get_final(state.weight_state); + weight_t weight = weight_get_final(state.weight_state, reg_error); // Build this into synaptic word along with updated accumulator and state return (final_state_t) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h index fd58a786276..ef3426615b3 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h @@ -102,7 +102,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \param[in] state: The updated weight state * \return The new weight. */ -static inline weight_t weight_get_final(weight_state_t state) { +static inline weight_t weight_get_final(weight_state_t state, REAL reg_error) { return (weight_t) (bitsk(state.weight) >> state.weight_shift); } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h index 6e865b96d01..ac1474b022f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h @@ -108,7 +108,7 @@ static inline weight_state_t weight_two_term_apply_potentiation( * \param[in] state: The updated weight state * \return The new weight. */ -static inline weight_t weight_get_final(weight_state_t state) { +static inline weight_t weight_get_final(weight_state_t state, REAL reg_error) { return (weight_t) (bitsk(state.weight) >> state.weight_shift); } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h index 6d147aa37be..65bae5c1370 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h @@ -109,7 +109,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \param[in] state: The updated weight state * \return The new weight. */ -static inline weight_t weight_get_final(weight_state_t state) { +static inline weight_t weight_get_final(weight_state_t state, REAL reg_error) { return (weight_t) (bitsk(state.weight) >> state.weight_shift); } From 8f6f9a1d9fd0d6982c96f2340839793c5a3fec7b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 26 May 2023 17:03:06 +0100 Subject: [PATCH 100/123] Tidy up code, vera line lengths, delete unused model --- .../neuron_impl_left_right_readout.h | 3 +- .../neuron_impl_store_recall_readout.h | 480 ------------------ .../models/neuron_model_eprop_adaptive_impl.h | 41 +- .../neuron_model_left_right_readout_impl.h | 3 +- .../neuron_model_sinusoid_readout_impl.h | 5 +- .../neuron_model_store_recall_readout_impl.c | 98 ---- .../neuron_model_store_recall_readout_impl.h | 68 --- .../synapse_dynamics_eprop_adaptive_impl.c | 252 +-------- ...synapse_dynamics_left_right_readout_impl.c | 214 +------- .../synapse_dynamics_sinusoid_readout_impl.c | 200 +------- .../weight_dependence/weight_eprop_reg_impl.h | 50 +- .../synapse_type_eprop_adaptive.h | 52 +- 12 files changed, 106 insertions(+), 1360 deletions(-) delete mode 100644 neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h delete mode 100644 neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c delete mode 100644 neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 572f9f074eb..4198bf7f972 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -82,8 +82,7 @@ extern REAL learning_signal; //uint32_t choice = 0; // Left right state parameters -typedef enum -{ +typedef enum { STATE_CUE, STATE_WAITING, STATE_PROMPT, diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h deleted file mode 100644 index 20037f875c6..00000000000 --- a/neural_modelling/src/neuron/implementations/neuron_impl_store_recall_readout.h +++ /dev/null @@ -1,480 +0,0 @@ -#ifndef _NEURON_IMPL_STANDARD_H_ -#define _NEURON_IMPL_STANDARD_H_ - -#include "neuron_impl.h" - -// Includes for model parts used in this implementation -#include -#include -#include -#include -#include - -// Further includes -#include -#include -#include -#include -#include -#include - -#define V_RECORDING_INDEX 0 -#define GSYN_EXCITATORY_RECORDING_INDEX 1 -#define GSYN_INHIBITORY_RECORDING_INDEX 2 - -#ifndef NUM_EXCITATORY_RECEPTORS -#define NUM_EXCITATORY_RECEPTORS 1 -#error NUM_EXCITATORY_RECEPTORS was undefined. It should be defined by a synapse\ - shaping include -#endif - -#ifndef NUM_INHIBITORY_RECEPTORS -#define NUM_INHIBITORY_RECEPTORS 1 -#error NUM_INHIBITORY_RECEPTORS was undefined. It should be defined by a synapse\ - shaping include -#endif - -//! Array of neuron states -static neuron_pointer_t neuron_array; - -//! Input states array -static input_type_pointer_t input_type_array; - -//! Additional input array -static additional_input_pointer_t additional_input_array; - -//! Threshold states array -static threshold_type_pointer_t threshold_type_array; - -//! Global parameters for the neurons -static global_neuron_params_pointer_t global_parameters; - -// The synapse shaping parameters -static synapse_param_t *neuron_synapse_shaping_params; - -static REAL next_spike_time = 0; -static uint32_t timer = 0; -static uint32_t target_ind = 0; - -// Store recall parameters -typedef enum -{ - STATE_IDLE, - STATE_STORING, - STATE_STORED, - STATE_RECALL, - STATE_SHIFT, -} current_state_t; - -uint32_t store_recall_state = STATE_IDLE; // 0: idle, 1: storing, 2:stored, 3:recall -uint32_t stored_value = 0; -uint32_t broacast_value = 0; -REAL ticks_for_mean = 0; - -static bool neuron_impl_initialise(uint32_t n_neurons) { - - // allocate DTCM for the global parameter details - if (sizeof(global_neuron_params_t) > 0) { - global_parameters = (global_neuron_params_t *) spin1_malloc( - sizeof(global_neuron_params_t)); - if (global_parameters == NULL) { - log_error("Unable to allocate global neuron parameters" - "- Out of DTCM"); - return false; - } - } - - // Allocate DTCM for neuron array - if (sizeof(neuron_t) != 0) { - neuron_array = (neuron_t *) spin1_malloc(n_neurons * sizeof(neuron_t)); - if (neuron_array == NULL) { - log_error("Unable to allocate neuron array - Out of DTCM"); - return false; - } - } - - // Allocate DTCM for input type array and copy block of data - if (sizeof(input_type_t) != 0) { - input_type_array = (input_type_t *) spin1_malloc( - n_neurons * sizeof(input_type_t)); - if (input_type_array == NULL) { - log_error("Unable to allocate input type array - Out of DTCM"); - return false; - } - } - - // Allocate DTCM for additional input array and copy block of data - if (sizeof(additional_input_t) != 0) { - additional_input_array = (additional_input_pointer_t) spin1_malloc( - n_neurons * sizeof(additional_input_t)); - if (additional_input_array == NULL) { - log_error("Unable to allocate additional input array" - " - Out of DTCM"); - return false; - } - } - - // Allocate DTCM for threshold type array and copy block of data - if (sizeof(threshold_type_t) != 0) { - threshold_type_array = (threshold_type_t *) spin1_malloc( - n_neurons * sizeof(threshold_type_t)); - if (threshold_type_array == NULL) { - log_error("Unable to allocate threshold type array - Out of DTCM"); - return false; - } - } - - // Allocate DTCM for synapse shaping parameters - if (sizeof(synapse_param_t) != 0) { - neuron_synapse_shaping_params = (synapse_param_t *) spin1_malloc( - n_neurons * sizeof(synapse_param_t)); - if (neuron_synapse_shaping_params == NULL) { - log_error("Unable to allocate synapse parameters array" - " - Out of DTCM"); - return false; - } - } - - // Seed the random input - validate_mars_kiss64_seed(global_parameters->kiss_seed); - - // Initialise pointers to Neuron parameters in STDP code -// synapse_dynamics_set_neuron_array(neuron_array); - log_info("set pointer to neuron array in stdp code"); - - return true; -} - -static void neuron_impl_add_inputs( - index_t synapse_type_index, index_t neuron_index, - input_t weights_this_timestep) { - // simple wrapper to synapse type input function - synapse_param_pointer_t parameters = - &(neuron_synapse_shaping_params[neuron_index]); - synapse_types_add_neuron_input(synapse_type_index, - parameters, weights_this_timestep); -} - -static void neuron_impl_load_neuron_parameters( - address_t address, uint32_t next, uint32_t n_neurons) { - log_debug("reading parameters, next is %u, n_neurons is %u ", - next, n_neurons); - - //log_debug("writing neuron global parameters"); - spin1_memcpy(global_parameters, &address[next], - sizeof(global_neuron_params_t)); - next += (sizeof(global_neuron_params_t) + 3) / 4; - - log_debug("reading neuron local parameters"); - spin1_memcpy(neuron_array, &address[next], n_neurons * sizeof(neuron_t)); - next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; - - log_debug("reading input type parameters"); - spin1_memcpy(input_type_array, &address[next], - n_neurons * sizeof(input_type_t)); - next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; - - log_debug("reading threshold type parameters"); - spin1_memcpy(threshold_type_array, &address[next], - n_neurons * sizeof(threshold_type_t)); - next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; - - log_debug("reading synapse parameters"); - spin1_memcpy(neuron_synapse_shaping_params, &address[next], - n_neurons * sizeof(synapse_param_t)); - next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; - - log_debug("reading additional input type parameters"); - spin1_memcpy(additional_input_array, &address[next], - n_neurons * sizeof(additional_input_t)); - next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; - - neuron_model_set_global_neuron_params(global_parameters); - - io_printf(IO_BUF, "\nPrinting global params\n"); - io_printf(IO_BUF, "seed 1: %u \n", global_parameters->kiss_seed[0]); - io_printf(IO_BUF, "seed 2: %u \n", global_parameters->kiss_seed[1]); - io_printf(IO_BUF, "seed 3: %u \n", global_parameters->kiss_seed[2]); - io_printf(IO_BUF, "seed 4: %u \n", global_parameters->kiss_seed[3]); - io_printf(IO_BUF, "ticks_per_second: %k \n\n", global_parameters->ticks_per_second); - io_printf(IO_BUF, "prob_command: %k \n\n", global_parameters->prob_command); - io_printf(IO_BUF, "rate on: %k \n\n", global_parameters->rate_on); - io_printf(IO_BUF, "rate off: %k \n\n", global_parameters->rate_off); - io_printf(IO_BUF, "mean 0: %k \n\n", global_parameters->mean_0); - io_printf(IO_BUF, "mean 1: %k \n\n", global_parameters->mean_1); - io_printf(IO_BUF, "poisson key: %k \n\n", global_parameters->p_key); - io_printf(IO_BUF, "poisson pop size: %k \n\n", global_parameters->p_pop_size); - - - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - } - - io_printf(IO_BUF, "size of global params: %u", - sizeof(global_neuron_params_t)); - - - - #if LOG_LEVEL >= LOG_DEBUG - log_debug("-------------------------------------\n"); - for (index_t n = 0; n < n_neurons; n++) { - neuron_model_print_parameters(&neuron_array[n]); - } - log_debug("-------------------------------------\n"); - //} - #endif // LOG_LEVEL >= LOG_DEBUG -} - -static bool neuron_impl_do_timestep_update(index_t neuron_index, - input_t external_bias, state_t *recorded_variable_values) { - - // Get the neuron itself - neuron_pointer_t neuron = &neuron_array[neuron_index]; - - // Change broadcasted value and state with probability - // State - 0: idle, 1: storing, 2:stored-idle, 3:recall - if (timer % 200 == 0 && neuron_index == 2){ //todo check this isn't changing for every neuron - if (store_recall_state == STATE_RECALL || store_recall_state == STATE_STORING){ - store_recall_state = (store_recall_state + 1) % STATE_SHIFT; - } - else{ - REAL random_number = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); - if (random_number < global_parameters->prob_command){ - store_recall_state = (store_recall_state + 1) % STATE_SHIFT; - } - } - REAL switch_value = (REAL)(mars_kiss64_seed(global_parameters->kiss_seed) / (REAL)0xffffffff); - if (switch_value < 0.5){ - broacast_value = (broacast_value + 1) % 2; - } - if (store_recall_state == STATE_STORING){ - stored_value = broacast_value; - } - // send packets to the variable poissons with the updated states - for (int i = 0; i < 4; i++){ - REAL payload = 10; - if ((broacast_value == i && i < 2) || - (i == 2 && store_recall_state == STATE_STORING) || - (i == 3 && store_recall_state == STATE_RECALL)){ - payload = global_parameters->rate_on; - } - else { - payload = global_parameters->rate_off; - } - for (int j = i*global_parameters->p_pop_size; - j < i*global_parameters->p_pop_size + global_parameters->p_pop_size; j++){ - spin1_send_mc_packet(global_parameters->p_key | j, payload, WITH_PAYLOAD); - } - } - } - - // Get the input_type parameters and voltage for this neuron - input_type_pointer_t input_type = &input_type_array[neuron_index]; - - // Get threshold and additional input parameters for this neuron - threshold_type_pointer_t threshold_type = - &threshold_type_array[neuron_index]; - additional_input_pointer_t additional_input = - &additional_input_array[neuron_index]; - synapse_param_pointer_t synapse_type = - &neuron_synapse_shaping_params[neuron_index]; - - // Get the voltage - state_t voltage = neuron_model_get_membrane_voltage(neuron); - - - // Get the exc and inh values from the synapses - input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); - input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); - - // Call functions to obtain exc_input and inh_input - input_t* exc_input_values = input_type_get_input_value( - exc_value, input_type, NUM_EXCITATORY_RECEPTORS); - input_t* inh_input_values = input_type_get_input_value( - inh_value, input_type, NUM_INHIBITORY_RECEPTORS); - - // Sum g_syn contributions from all receptors for recording - REAL total_exc = 0; - REAL total_inh = 0; - - for (int i = 0; i < NUM_EXCITATORY_RECEPTORS-1; i++){ - total_exc += exc_input_values[i]; - } - for (int i = 0; i < NUM_INHIBITORY_RECEPTORS-1; i++){ - total_inh += inh_input_values[i]; - } - - // Call functions to get the input values to be recorded - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - - // Call functions to convert exc_input and inh_input to current - input_type_convert_excitatory_input_to_current( - exc_input_values, input_type, voltage); - input_type_convert_inhibitory_input_to_current( - inh_input_values, input_type, voltage); - - external_bias += additional_input_get_input_value_as_current( - additional_input, voltage); - - // Reset values after recall - if (store_recall_state == STATE_IDLE){ - ticks_for_mean = 0; - global_parameters->mean_0 == 0; - global_parameters->mean_1 == 0; - //todo check if readout_V_0/1 need resetting too - } - - if (neuron_index == 0){ - recorded_variable_values[V_RECORDING_INDEX] = voltage; - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, -50k); - // Finally, set global membrane potential to updated value - global_parameters->readout_V_0 = result; - - } else if (neuron_index == 1){ - recorded_variable_values[V_RECORDING_INDEX] = voltage; - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron, -50k); - - // Finally, set global membrane potential to updated value - global_parameters->readout_V_1 = result; - //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// - // maybe sign of the error isn't important anymore? // - //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// - } else if (neuron_index == 2){ // this is the error source - - recorded_variable_values[V_RECORDING_INDEX] = stored_value; - // Switched to always broadcasting error but with packet - if (store_recall_state == STATE_RECALL){ //todo ensure this neuron id is correct - ticks_for_mean += 1; //todo is it a running error like this over recall? - // Softmax of the exc and inh inputs representing 1 and 0 respectively - // may need to scale to stop huge numbers going in the exp - global_parameters->mean_0 += global_parameters->readout_V_0; - global_parameters->mean_1 += global_parameters->readout_V_1; - accum exp_0 = expk(global_parameters->mean_0 / ticks_for_mean); - accum exp_1 = expk(global_parameters->mean_1 / ticks_for_mean); - accum softmax_0 = exp_0 / (exp_1 + exp_0); - accum softmax_1 = exp_1 / (exp_1 + exp_0); - // What to do if log(0)? - if (stored_value){ - global_parameters->cross_entropy = -logk(softmax_1); - } - else{ - global_parameters->cross_entropy = -logk(softmax_0); - } - while (!spin1_send_mc_packet( - key | neuron_index, bitsk(error), 1 )) { - spin1_delay_us(1); - } - } - timer++; - } -// else if (neuron_index == 3){ // this is the deprecated -// -// // Boundary of -0.7 because ln(0.5) =~= -0.7 representing random choice point, > -0.7 is more correct than not -// if (global_parameters->cross_entropy < -0.7){ -// // it's incorrect so change doing what you're doing or suppress synapses? -// } -// timer++; // update this here, as needs to be done once per iteration over all the neurons -// } - - // Shape the existing input according to the included rule - synapse_types_shape_input(synapse_type); - - #if LOG_LEVEL >= LOG_DEBUG - neuron_model_print_state_variables(neuron); - #endif // LOG_LEVEL >= LOG_DEBUG - - // Return the boolean to the model timestep update - return false; -} - -//! \brief stores neuron parameter back into sdram -//! \param[in] address: the address in sdram to start the store -static void neuron_impl_store_neuron_parameters( - address_t address, uint32_t next, uint32_t n_neurons) { - log_debug("writing parameters"); - - //log_debug("writing neuron global parameters"); - spin1_memcpy(&address[next], global_parameters, - sizeof(global_neuron_params_t)); - next += (sizeof(global_neuron_params_t) + 3) / 4; - - log_debug("writing neuron local parameters"); - spin1_memcpy(&address[next], neuron_array, - n_neurons * sizeof(neuron_t)); - next += ((n_neurons * sizeof(neuron_t)) + 3) / 4; - - log_debug("writing input type parameters"); - spin1_memcpy(&address[next], input_type_array, - n_neurons * sizeof(input_type_t)); - next += ((n_neurons * sizeof(input_type_t)) + 3) / 4; - - log_debug("writing threshold type parameters"); - spin1_memcpy(&address[next], threshold_type_array, - n_neurons * sizeof(threshold_type_t)); - next += ((n_neurons * sizeof(threshold_type_t)) + 3) / 4; - - log_debug("writing synapse parameters"); - spin1_memcpy(&address[next], neuron_synapse_shaping_params, - n_neurons * sizeof(synapse_param_t)); - next += ((n_neurons * sizeof(synapse_param_t)) + 3) / 4; - - log_debug("writing additional input type parameters"); - spin1_memcpy(&address[next], additional_input_array, - n_neurons * sizeof(additional_input_t)); - next += ((n_neurons * sizeof(additional_input_t)) + 3) / 4; -} - -#if LOG_LEVEL >= LOG_DEBUG -void neuron_impl_print_inputs(uint32_t n_neurons) { - bool empty = true; - for (index_t i = 0; i < n_neurons; i++) { - empty = empty - && (bitsk(synapse_types_get_excitatory_input( - &(neuron_synapse_shaping_params[i])) - - synapse_types_get_inhibitory_input( - &(neuron_synapse_shaping_params[i]))) == 0); - } - - if (!empty) { - log_debug("-------------------------------------\n"); - - for (index_t i = 0; i < n_neurons; i++) { - input_t input = - synapse_types_get_excitatory_input( - &(neuron_synapse_shaping_params[i])) - - synapse_types_get_inhibitory_input( - &(neuron_synapse_shaping_params[i])); - if (bitsk(input) != 0) { - log_debug("%3u: %12.6k (= ", i, input); - synapse_types_print_input( - &(neuron_synapse_shaping_params[i])); - log_debug(")\n"); - } - } - log_debug("-------------------------------------\n"); - } -} - -void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { - log_debug("-------------------------------------\n"); - for (index_t n = 0; n < n_neurons; n++) { - synapse_types_print_parameters(&(neuron_synapse_shaping_params[n])); - } - log_debug("-------------------------------------\n"); -} - -const char *neuron_impl_get_synapse_type_char(uint32_t synapse_type) { - return synapse_types_get_type_char(synapse_type); -} -#endif // LOG_LEVEL >= LOG_DEBUG - -#endif // _NEURON_IMPL_STANDARD_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index 9974183bcde..fd6aa8e2ce1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -201,9 +201,11 @@ static inline void neuron_model_initialise( state->V_reset = params->V_reset; state->T_refract = lif_ceil_accum(kdivk(params->T_refract_ms, ts)); - log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k refract_timer %k V_reset %k T_refract_ms %k T_refract %d", - state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, state->I_offset, - state->refract_timer, state->V_reset, params->T_refract_ms, state->T_refract); + log_info("V_membrane %k V_rest %k R_membrane %k exp_TC %k I_offset %k" + "refract_timer %k V_reset %k T_refract_ms %k T_refract %d", + state->V_membrane, state->V_rest, state->R_membrane, state->exp_TC, + state->I_offset, state->refract_timer, state->V_reset, + params->T_refract_ms, state->T_refract); // for everything else just copy across for now state->z = params->z; @@ -223,7 +225,8 @@ static inline void neuron_model_initialise( state->number_of_cues = params->number_of_cues; log_info("Check: z %k A %k psi %k B %k b %k b_0 %k window_size %u", - state->z, state->A, state->psi, state->B, state->b, state->b_0, state->window_size); + state->z, state->A, state->psi, state->B, state->b, state->b_0, + state->window_size); state->core_pop_rate = params->pop_rate; state->core_target_rate = params->target_rate; @@ -231,7 +234,8 @@ static inline void neuron_model_initialise( state->eta = params->eta; log_info("Check: core_pop_rate %k core_target_rate %k rate_exp_TC %k eta %k", - state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, state->eta); + state->core_pop_rate, state->core_target_rate, state->rate_exp_TC, + state->eta); for (uint32_t n_syn = 0; n_syn < SYNAPSES_PER_NEURON; n_syn++) { state->syn_state[n_syn] = params->syn_state[n_syn]; @@ -271,7 +275,7 @@ static inline void lif_eprop_neuron_closed_form( // update membrane voltage neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)) - - neuron->z * B_t; // this line achieves reset (Comment not needed?) + - neuron->z * B_t; // this line achieves reset } state_t neuron_model_state_update( @@ -359,7 +363,7 @@ state_t neuron_model_state_update( // TODO: magic constants need naming at least (and passing in?) uint32_t test_length = (150*neuron->number_of_cues)+1000+150; - if(neuron->number_of_cues == 0) { + if (neuron->number_of_cues == 0) { test_length = neuron->window_size; } @@ -376,7 +380,7 @@ state_t neuron_model_state_update( REAL local_eta = neuron->eta; // Reset relevant parameters ahead of filtering - if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ + if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues) { neuron->B = neuron->b_0; neuron->b = 0.k; neuron->V_membrane = neuron->V_rest; @@ -386,7 +390,7 @@ state_t neuron_model_state_update( // All subsequent operations now need doing once per eprop synapse for (uint32_t syn_ind=0; syn_ind < total_input_synapses_per_neuron; syn_ind++){ - if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ + if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues) { neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; @@ -399,7 +403,8 @@ state_t neuron_model_state_update( neuron->syn_state[syn_ind].z_bar * neuron->exp_TC + (1 - neuron->exp_TC) * - neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + neuron->syn_state[syn_ind].z_bar_inp; + // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** @@ -425,7 +430,9 @@ state_t neuron_model_state_update( // ****************************************************************** REAL this_dt_weight_change = local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; + // -= here to enable compiler to handle previous line (can crash when + // -ve is at beginning of previous line) // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; @@ -435,8 +442,9 @@ state_t neuron_model_state_update( } // All further operations now need doing once per recurrent eprop synapse - for (uint32_t syn_ind=recurrent_offset; syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++){ - if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues){ + for (uint32_t syn_ind=recurrent_offset; + syn_ind < total_recurrent_synapses_per_neuron+recurrent_offset; syn_ind++) { + if ((time % test_length == 0 || time % test_length == 1) && neuron->number_of_cues) { neuron->syn_state[syn_ind].z_bar_inp = 0.k; neuron->syn_state[syn_ind].z_bar = 0.k; neuron->syn_state[syn_ind].el_a = 0.k; @@ -473,7 +481,9 @@ state_t neuron_model_state_update( // ****************************************************************** REAL this_dt_weight_change = local_eta * neuron->L * neuron->syn_state[syn_ind].e_bar; - neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; // -= here to enable compiler to handle previous line (can crash when -ve is at beginning of previous line) + neuron->syn_state[syn_ind].delta_w -= this_dt_weight_change; + // -= here to enable compiler to handle previous line (can crash when + // -ve is at beginning of previous line) // reset input (can't have more than one spike per timestep neuron->syn_state[syn_ind].z_bar_inp = 0; @@ -506,7 +516,8 @@ void neuron_model_print_state_variables(const neuron_t *neuron) { log_debug("Printing synapse state values:"); for (uint32_t syn_ind=0; syn_ind < 100; syn_ind++){ - log_debug("synapse number %u delta_w, z_bar, z_bar_inp, e_bar, el_a %11.4k %11.4k %11.4k %11.4k %11.4k", + log_debug("synapse number %u delta_w, z_bar, z_bar_inp, e_bar, el_a " + "%11.4k %11.4k %11.4k %11.4k %11.4k", syn_ind, neuron->syn_state[syn_ind].delta_w, neuron->syn_state[syn_ind].z_bar, neuron->syn_state[syn_ind].z_bar_inp, neuron->syn_state[syn_ind].e_bar, neuron->syn_state[syn_ind].el_a); diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index 1d873f7cad8..f6f35970976 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -238,7 +238,8 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC - + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + (1.k - neuron->exp_TC) * neuron->syn_state[syn_ind].z_bar_inp; + // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** // Update cached total weight change diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index 4220f6ebbfc..0bc9506b753 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -193,9 +193,8 @@ state_t neuron_model_state_update( // ****************************************************************** neuron->syn_state[syn_ind].z_bar = neuron->syn_state[syn_ind].z_bar * neuron->exp_TC -// + (1 - neuron->exp_TC) * - + - neuron->syn_state[syn_ind].z_bar_inp; // updating z_bar is problematic, if spike could come and interrupt neuron update + + neuron->syn_state[syn_ind].z_bar_inp; + // updating z_bar is problematic, if spike could come and interrupt neuron update // ****************************************************************** // Update cached total weight change diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c deleted file mode 100644 index a3c424bd4cb..00000000000 --- a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.c +++ /dev/null @@ -1,98 +0,0 @@ -#include "neuron_model_store_recall_readout_impl.h" - -#include - -// simple Leaky I&F ODE -static inline void _lif_neuron_closed_form( - neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { - - REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; - - // update membrane voltage - neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); -} - -void neuron_model_set_global_neuron_params( - global_neuron_params_pointer_t params) { - use(params); - - // Does Nothing - no params -} - -state_t neuron_model_state_update( - uint16_t num_excitatory_inputs, input_t* exc_input, - uint16_t num_inhibitory_inputs, input_t* inh_input, - input_t external_bias, neuron_pointer_t neuron, input_t B_t) { - - log_debug("Exc 1: %12.6k, Exc 2: %12.6k", exc_input[0], exc_input[1]); - log_debug("Inh 1: %12.6k, Inh 2: %12.6k", inh_input[0], inh_input[1]); - - - // If outside of the refractory period - if (neuron->refract_timer <= 0) { - REAL total_exc = 0; - REAL total_inh = 0; - - total_exc += exc_input[0]; - total_inh += inh_input[0]; -// for (int i=0; i < num_excitatory_inputs; i++){ -// total_exc += exc_input[i]; -// } -// for (int i=0; i< num_inhibitory_inputs; i++){ -// total_inh += inh_input[i]; -// } - // Get the input in nA - input_t input_this_timestep = - total_exc - total_inh + external_bias + neuron->I_offset; - - _lif_neuron_closed_form( - neuron, neuron->V_membrane, input_this_timestep); - } else { - - // countdown refractory timer - neuron->refract_timer -= 1; - } - return neuron->V_membrane; -} - -void neuron_model_has_spiked(neuron_pointer_t neuron) { - - // reset membrane voltage - neuron->V_membrane = neuron->V_reset; - - // reset refractory timer - neuron->refract_timer = neuron->T_refract; -} - -state_t neuron_model_get_membrane_voltage(neuron_pointer_t neuron) { - return neuron->V_membrane; -} - -void neuron_model_print_state_variables(restrict neuron_pointer_t neuron) { - log_debug("V membrane = %11.4k mv", neuron->V_membrane); -} - -void neuron_model_print_parameters(restrict neuron_pointer_t neuron) { - io_printf(IO_BUF, "V reset = %11.4k mv\n", neuron->V_reset); - io_printf(IO_BUF, "V rest = %11.4k mv\n", neuron->V_rest); - - io_printf(IO_BUF, "I offset = %11.4k nA\n", neuron->I_offset); - io_printf(IO_BUF, "R membrane = %11.4k Mohm\n", neuron->R_membrane); - - io_printf(IO_BUF, "exp(-ms/(RC)) = %11.4k [.]\n", neuron->exp_TC); - - io_printf(IO_BUF, "T refract = %u timesteps\n", neuron->T_refract); - io_printf(IO_BUF, "mean_isi_ticks = %k\n", neuron->mean_isi_ticks); - io_printf(IO_BUF, "time_to_spike_ticks = %k \n", - neuron->time_to_spike_ticks); - -// io_printf(IO_BUF, "Seed 1: %u\n", neuron->spike_source_seed[0]); -// io_printf(IO_BUF, "Seed 2: %u\n", neuron->spike_source_seed[1]); -// io_printf(IO_BUF, "Seed 3: %u\n", neuron->spike_source_seed[2]); -// io_printf(IO_BUF, "Seed 4: %u\n", neuron->spike_source_seed[3]); -//// io_printf(IO_BUF, "seconds per tick: %u\n", neuron->seconds_per_tick); -// io_printf(IO_BUF, "ticks per second: %k\n", neuron->ticks_per_second); -} - - - diff --git a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h deleted file mode 100644 index 08c1e8ee1f6..00000000000 --- a/neural_modelling/src/neuron/models/neuron_model_store_recall_readout_impl.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ -#define _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ - -#include "neuron_model.h" -#include "random.h" - -///////////////////////////////////////////////////////////// -// definition for LIF neuron parameters -typedef struct neuron_t { - // membrane voltage [mV] - REAL V_membrane; - - // membrane resting voltage [mV] - REAL V_rest; - - // membrane resistance [MOhm] - REAL R_membrane; - - // 'fixed' computation parameter - time constant multiplier for - // closed-form solution - // exp(-(machine time step in ms)/(R * C)) [.] - REAL exp_TC; - - // offset current [nA] - REAL I_offset; - - // countdown to end of next refractory period [timesteps] - int32_t refract_timer; - - // post-spike reset membrane voltage [mV] - REAL V_reset; - - // refractory time of neuron [timesteps] - int32_t T_refract; - - - // Poisson compartment params - REAL mean_isi_ticks; - REAL time_to_spike_ticks; - - int32_t time_since_last_spike; - REAL rate_at_last_setting; - REAL rate_update_threshold; - - -// // Should be in global params -// mars_kiss64_seed_t spike_source_seed; // array of 4 values -//// UFRACT seconds_per_tick; -// REAL ticks_per_second; - -} neuron_t; - -typedef struct global_neuron_params_t { - mars_kiss64_seed_t kiss_seed; // array of 4 values - REAL ticks_per_second; - REAL readout_V_0; - REAL readout_V_1; - REAL prob_command; - REAL rate_on; - REAL rate_off; - REAL mean_0; - REAL mean_1; - REAL cross_entropy; - uint32_t p_key; - REAL p_pop_size; -} global_neuron_params_t; - -#endif // _NEURON_MODEL_LIF_CURR_POISSON_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 5fb5a4abb51..1711746e27f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -29,16 +29,6 @@ //--------------------------------------- // Structures //--------------------------------------- -//! \brief The type of history data of pre-events -//! -//! This data is stored in SDRAM in the plastic part of the synaptic matrix -//typedef struct { -// //! The event time -// uint32_t prev_time; -// //! The event trace -// pre_trace_t prev_trace; -//} pre_event_history_t; - //! The format of the plastic data region of a synaptic row struct synapse_row_plastic_data_t { //! The pre-event history @@ -47,31 +37,11 @@ struct synapse_row_plastic_data_t { plastic_synapse_t synapses[]; }; -//#include "weight_dependence/weight.h" -//#include "timing_dependence/timing.h" -//#include -//#include -//#include - // TODO: make work with stdp common? (is this even really STDP?) -//#include #include -//#include extern neuron_t *neuron_array; -//extern global_neuron_params_pointer_t global_parameters; - -// These are now defined earlier -//static uint32_t synapse_type_index_bits; -//static uint32_t synapse_index_bits; -//static uint32_t synapse_index_mask; -//static uint32_t synapse_type_index_mask; -//static uint32_t synapse_delay_index_type_bits; -//static uint32_t synapse_type_mask; - -//uint32_t num_plastic_pre_synaptic_events = 0; -//uint32_t plastic_saturation_count = 0; extern uint32_t neuron_impl_neurons_in_partition; @@ -104,86 +74,8 @@ extern uint32_t neuron_impl_neurons_in_partition; #define SYNAPSE_AXONAL_DELAY_MASK \ ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) - uint32_t RECURRENT_SYNAPSE_OFFSET = 100; -////--------------------------------------- -//// Structures -////--------------------------------------- -//typedef struct { -// pre_trace_t prev_trace; -// uint32_t prev_time; -//} pre_event_history_t; - -//post_event_history_t *post_event_history; - -/* PRIVATE FUNCTIONS */ - -//// Mark a value as possibly unused while not using any instructions, guaranteed -//#ifndef __use -//#define __use(x) do { (void) (x); } while (0) -//#endif - -//--------------------------------------- -// Synapse update loop -//--------------------------------------- -//static inline final_state_t plasticity_update_synapse( -// uint32_t time, -// const uint32_t last_pre_time, const pre_trace_t last_pre_trace, -// const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, -// const uint32_t delay_axonal, update_state_t current_state, -// const post_event_history_t *post_event_history) { -// // Apply axonal delay to time of last presynaptic spike -// const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; -// -// // Get the post-synaptic window of events to be processed -// const uint32_t window_begin_time = -// (delayed_last_pre_time >= delay_dendritic) -// ? (delayed_last_pre_time - delay_dendritic) : 0; -// const uint32_t window_end_time = time + delay_axonal - delay_dendritic; -// post_event_window_t post_window = post_events_get_window_delayed( -// post_event_history, window_begin_time, window_end_time); -// -// log_debug("\tPerforming deferred synapse update at time:%u", time); -// log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", -// window_begin_time, window_end_time, post_window.prev_time, -// post_window.num_events); -// -// // print_event_history(post_event_history); -// // print_delayed_window_events(post_event_history, window_begin_time, -// // window_end_time, delay_dendritic); -// -// // Process events in post-synaptic window -// while (post_window.num_events > 0) { -// const uint32_t delayed_post_time = -// *post_window.next_time + delay_dendritic; -// log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", -// delayed_post_time); -// -// // Apply spike to state -// current_state = timing_apply_post_spike( -// delayed_post_time, *post_window.next_trace, delayed_last_pre_time, -// last_pre_trace, post_window.prev_time, post_window.prev_trace, -// current_state); -// -// // Go onto next event -// post_window = post_events_next_delayed(post_window, delayed_post_time); -// } -// -// const uint32_t delayed_pre_time = time + delay_axonal; -// log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", -// delayed_pre_time, post_window.prev_time); -// -// // Apply spike to state -// // **NOTE** dendritic delay is subtracted -// current_state = timing_apply_pre_spike( -// delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, -// post_window.prev_time, post_window.prev_trace, current_state); -// -// // Return final synaptic word and weight -// return synapse_structure_get_final_state(current_state); -//} - //--------------------------------------- // Synaptic row plastic-region implementation //--------------------------------------- @@ -241,7 +133,8 @@ void synapse_dynamics_print_plastic_synapses( synapses_print_weight( weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", - synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), + synapse_row_sparse_delay( + control_word, synapse_type_index_bits, synapse_delay_mask), synapse_types_get_type_char(synapse_type), synapse_row_sparse_index(control_word, synapse_index_mask), synapse_delay_mask, synapse_type_index_bits); @@ -262,26 +155,6 @@ static inline index_t sparse_axonal_delay(uint32_t x) { bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { -// // Load timing dependence data -// address_t weight_region_address = timing_initialise(address); -// if (address == NULL) { -// return NULL; -// } -// -// syn_dynamics_neurons_in_partition = n_neurons; -// -// // Load weight dependence data -// address_t weight_result = weight_initialise( -// weight_region_address, n_synapse_types, -// ring_buffer_to_input_buffer_left_shifts); -// if (weight_result == NULL) { -// return NULL; -// } -// -// post_event_history = post_events_init_buffers(n_neurons); -// if (post_event_history == NULL) { -// return NULL; -// } if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, ring_buffer_to_input_buffer_left_shifts)) { @@ -293,32 +166,16 @@ bool synapse_dynamics_initialise( return false; } - return true; // weight_result; + return true; } - static inline final_state_t eprop_plasticity_update( update_state_t current_state, REAL delta_w) { - // Test weight change - // delta_w = -0.1k; - - int32_t delta_w_int = (int32_t) roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - -// log_info("delta_w_int %d", delta_w_int); + int32_t delta_w_int = (int32_t) roundk(delta_w, 15); + // TODO: THIS NEEDS UPDATING TO APPROPRIATE SCALING (?) if (delta_w){ // TODO: This should probably be delta_w_int -// if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" -//// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" -// "\n", -// delta_w, delta_w_int -//// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) -// ); -//// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", -//// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); -// } - if (delta_w_int < 0){ current_state = weight_one_term_apply_depression( current_state, delta_w_int << 3); @@ -327,30 +184,17 @@ static inline final_state_t eprop_plasticity_update( current_state, delta_w_int << 3); } } - else { -// if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "delta_w: %k\n", delta_w); -// } -// current_state = current_state; // ?? what? - } // Calculate regularisation error -// REAL reg_error = global_parameters->core_target_rate - (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike - REAL reg_error = neuron_array[0].core_target_rate - (neuron_array[0].core_pop_rate / neuron_impl_neurons_in_partition); // this needs swapping for an inverse multiply - too expensive to do divide on every spike -// REAL reg_error = ((global_parameters->core_target_rate + ((neuron_array->w_fb - 0.5) * 20)) - global_parameters->core_pop_rate) / syn_dynamics_neurons_in_partition; // this needs swapping for an inverse multiply - too expensive to do divide on every spike - -// REAL reg_error = (global_parameters->core_pop_rate / syn_dynamics_neurons_in_partition) - global_parameters->core_target_rate; -// io_printf(IO_BUF, "core_pop_rate = %k, target = %k, error = %k\n", global_parameters->core_pop_rate, global_parameters->core_target_rate, reg_error); - -// io_printf(IO_BUF, "reg_error before: %k\n", reg_error); + REAL reg_error = neuron_array[0].core_target_rate - ( + neuron_array[0].core_pop_rate / neuron_impl_neurons_in_partition); + // this needs swapping for an inverse multiply - too expensive to do divide + // on every spike // Return final synaptic word and weight return synapse_structure_get_final_state(current_state, reg_error); } - - - bool synapse_dynamics_process_plastic_synapses( synapse_row_plastic_data_t *plastic_region_address, synapse_row_fixed_part_t *fixed_region, @@ -375,14 +219,10 @@ bool synapse_dynamics_process_plastic_synapses( // Extract control-word components // **NOTE** cunningly, control word is just the same as lower // 16-bits of 32-bit fixed synapse so same functions can be used -// uint32_t delay_axonal = sparse_axonal_delay(control_word); - - uint32_t delay = 1; // why was this 1.0k? + uint32_t delay = 1; uint32_t syn_ind_from_delay = synapse_row_sparse_delay( control_word, synapse_type_index_bits, synapse_delay_mask); -// uint32_t delay_dendritic = synapse_row_sparse_delay( -// control_word, synapse_type_index_bits); uint32_t type = synapse_row_sparse_type( control_word, synapse_index_bits, synapse_type_mask); uint32_t index = @@ -407,32 +247,8 @@ bool synapse_dynamics_process_plastic_synapses( synapse_structure_get_update_state(*plastic_words, type); neuron_t *neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; // !!!! Check what units this is in - same as weight? !!!! -// log_info("plastic update of z_bar_inp for neuron_ind %u syn_ind %u", neuron_ind, syn_ind_from_delay); - -// io_printf(IO_BUF, "initial_weight: d%d, k%k, u%u - ", current_state.initial_weight, current_state.initial_weight, current_state.initial_weight); -// if (current_state.initial_weight > 0){ -// io_printf(IO_BUF, "+ve\n"); -// } -// else if(current_state.initial_weight < 0){ -// io_printf(IO_BUF, "-ve\n"); -// neuron->syn_state[syn_ind_from_delay].z_bar_inp *= -1k; -// } -// else{ -// io_printf(IO_BUF, "0\n"); -// } - - -// if (PRINT_PLASTICITY){ -//// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", -//// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); -// -// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", -// neuron_ind, syn_ind_from_delay, type, -// current_state.weight, -// neuron->syn_state[syn_ind_from_delay].delta_w, time); -// } - + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; + // !!!! TODO: Check what units this is in - same as weight? !!!! // Perform weight update: only if batch time has elapsed final_state_t final_state; @@ -442,7 +258,6 @@ bool synapse_dynamics_process_plastic_synapses( io_printf(IO_BUF, "update_ready=0\n"); } -// log_info("delta_w is %k", neuron->syn_state[syn_ind_from_delay].delta_w); // Go through typical weight update process to clip to limits final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); @@ -465,35 +280,28 @@ bool synapse_dynamics_process_plastic_synapses( // Don't reset delta_w -> keep this accumulating and apply weight change in future } - // Add contribution to synaptic input // Convert into ring buffer offset uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index_combined( - // delay_axonal + delay_dendritic + time, type_index, synapse_type_index_bits, synapse_delay_mask); - // Check for ring buffer saturation (? - again? - is int16_t correct here now?) + // Check for ring buffer saturation int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); -// log_info("Check: accumulation %d ring_buffer %d time %u", -// accumulation, ring_buffers[ring_buffer_index], time); - -// uint32_t sat_test = accumulation & 0x10000; -// if (sat_test) { -// accumulation = sat_test - 1; -// plastic_saturation_count++; -// } - // overflow check - if (accumulation < ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) - && ring_buffers[ring_buffer_index] > 0 && synapse_structure_get_final_weight(final_state) > 0){ + if (accumulation < ring_buffers[ring_buffer_index] + ( + synapse_structure_get_final_weight(final_state)) + && ring_buffers[ring_buffer_index] > 0 && ( + synapse_structure_get_final_weight(final_state) > 0)) { accumulation = ring_buffers[ring_buffer_index]; } // underflow check - if (accumulation > ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) - && ring_buffers[ring_buffer_index] < 0 && synapse_structure_get_final_weight(final_state) < 0){ + if (accumulation > ring_buffers[ring_buffer_index] + ( + synapse_structure_get_final_weight(final_state)) + && ring_buffers[ring_buffer_index] < 0 && ( + synapse_structure_get_final_weight(final_state) < 0)) { accumulation = ring_buffers[ring_buffer_index]; } @@ -520,22 +328,8 @@ void synapse_dynamics_process_post_synaptic_event( timing_add_post_spike(time, last_post_time, last_post_trace)); } -//input_t synapse_dynamics_get_intrinsic_bias( -// uint32_t time, index_t neuron_index) { -// use(time); -// use(neuron_index); -// return 0.0k; -//} -// -//uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { -// return num_plastic_pre_synaptic_events; -//} -// -//uint32_t synapse_dynamics_get_plastic_saturation_count(void) { -// return plastic_saturation_count; -//} - -// TODO: fix below to match other dynamics impls +// TODO: fix below to match other dynamics impls? (Do we want structural plasticity +// mixed with eprop??) #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index 80e5c90dfe9..daa1c4bdbf7 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -26,29 +26,9 @@ #include "post_events.h" #include "synapse_dynamics_stdp_common.h" -//#include "weight_dependence/weight.h" -//#include "timing_dependence/timing.h" -//#include -//#include -//#include - - -//#include -//#include #include extern neuron_t *neuron_array; -//extern global_neuron_params_pointer_t global_parameters; - -//static uint32_t synapse_type_index_bits; -//static uint32_t synapse_index_bits; -//static uint32_t synapse_index_mask; -//static uint32_t synapse_type_index_mask; -//static uint32_t synapse_delay_index_type_bits; -//static uint32_t synapse_type_mask; - -//uint32_t num_plastic_pre_synaptic_events = 0; -//uint32_t plastic_saturation_count = 0; //--------------------------------------- // Macros @@ -85,13 +65,6 @@ uint32_t RECURRENT_SYNAPSE_OFFSET = 100; ////--------------------------------------- //// Structures ////--------------------------------------- -//typedef struct { -// pre_trace_t prev_trace; -// uint32_t prev_time; -//} pre_event_history_t; -// -//post_event_history_t *post_event_history; - //! The format of the plastic data region of a synaptic row struct synapse_row_plastic_data_t { //! The pre-event history @@ -100,68 +73,6 @@ struct synapse_row_plastic_data_t { plastic_synapse_t synapses[]; }; -/* PRIVATE FUNCTIONS */ - -//--------------------------------------- -// Synapse update loop -//--------------------------------------- -//static inline final_state_t plasticity_update_synapse( -// uint32_t time, -// const uint32_t last_pre_time, const pre_trace_t last_pre_trace, -// const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, -// const uint32_t delay_axonal, update_state_t current_state, -// const post_event_history_t *post_event_history) { -// // Apply axonal delay to time of last presynaptic spike -// const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; -// -// // Get the post-synaptic window of events to be processed -// const uint32_t window_begin_time = -// (delayed_last_pre_time >= delay_dendritic) -// ? (delayed_last_pre_time - delay_dendritic) : 0; -// const uint32_t window_end_time = time + delay_axonal - delay_dendritic; -// post_event_window_t post_window = post_events_get_window_delayed( -// post_event_history, window_begin_time, window_end_time); -// -// log_debug("\tPerforming deferred synapse update at time:%u", time); -// log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", -// window_begin_time, window_end_time, post_window.prev_time, -// post_window.num_events); -// -// // print_event_history(post_event_history); -// // print_delayed_window_events(post_event_history, window_begin_time, -// // window_end_time, delay_dendritic); -// -// // Process events in post-synaptic window -// while (post_window.num_events > 0) { -// const uint32_t delayed_post_time = -// *post_window.next_time + delay_dendritic; -// log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", -// delayed_post_time); -// -// // Apply spike to state -// current_state = timing_apply_post_spike( -// delayed_post_time, *post_window.next_trace, delayed_last_pre_time, -// last_pre_trace, post_window.prev_time, post_window.prev_trace, -// current_state); -// -// // Go onto next event -// post_window = post_events_next_delayed(post_window, delayed_post_time); -// } -// -// const uint32_t delayed_pre_time = time + delay_axonal; -// log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", -// delayed_pre_time, post_window.prev_time); -// -// // Apply spike to state -// // **NOTE** dendritic delay is subtracted -// current_state = timing_apply_pre_spike( -// delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, -// post_window.prev_time, post_window.prev_trace, current_state); -// -// // Return final synaptic word and weight -// return synapse_structure_get_final_state(current_state); -//} - //--------------------------------------- // Synaptic row plastic-region implementation //--------------------------------------- @@ -240,24 +151,6 @@ static inline index_t sparse_axonal_delay(uint32_t x) { bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { -// // Load timing dependence data -// address_t weight_region_address = timing_initialise(address); -// if (address == NULL) { -// return NULL; -// } -// -// // Load weight dependence data -// address_t weight_result = weight_initialise( -// weight_region_address, n_synapse_types, -// ring_buffer_to_input_buffer_left_shifts); -// if (weight_result == NULL) { -// return NULL; -// } -// -// post_event_history = post_events_init_buffers(n_neurons); -// if (post_event_history == NULL) { -// return NULL; -// } if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, ring_buffer_to_input_buffer_left_shifts)) { @@ -269,61 +162,39 @@ bool synapse_dynamics_initialise( return false; } - return true; // weight_result; + return true; } static inline final_state_t eprop_plasticity_update( update_state_t current_state, REAL delta_w){ - // Test weight change - // delta_w = -0.1k; - - // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? -// int32_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING -// int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING -// int16_t delta_w_int = (int) delta_w; // >> 15; + int32_t delta_w_int = (int32_t)roundk(delta_w, 15); + // TODO: THIS NEEDS UPDATING TO APPROPRIATE SCALING (?) if (delta_w){ // TODO: delta_w_int instead? if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" -// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" - "\n", - delta_w, delta_w_int -// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) - ); -// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", -// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d\n", + delta_w, delta_w_int); } if (delta_w_int < 0){ current_state = weight_one_term_apply_depression( current_state, delta_w_int << 3); -// current_state, (int16_t)(delta_w_int << 0)); } else { current_state = weight_one_term_apply_potentiation( current_state, delta_w_int << 3); -// current_state, (int16_t)(delta_w_int << 0)); } } - else { -// if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "delta_w: %k\n", delta_w); -// } -// current_state = current_state; - } - - // Calculate regularisation error - REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; + // Regularisation error isn't used in this case + REAL reg_error = 0.0; // Return final synaptic word and weight return synapse_structure_get_final_state(current_state, reg_error); } - bool synapse_dynamics_process_plastic_synapses( synapse_row_plastic_data_t *plastic_region_address, synapse_row_fixed_part_t *fixed_region, @@ -337,8 +208,8 @@ bool synapse_dynamics_process_plastic_synapses( num_plastic_pre_synaptic_events += plastic_synapse; - // Could maybe have a single z_bar for the entire synaptic row - // and update it once here for all synaptic words? + // TODO: Could maybe have a single z_bar for the entire synaptic row + // and update it once here for all synaptic words? // Loop through plastic synapses for (; plastic_synapse > 0; plastic_synapse--) { @@ -348,14 +219,10 @@ bool synapse_dynamics_process_plastic_synapses( // Extract control-word components // **NOTE** cunningly, control word is just the same as lower // 16-bits of 32-bit fixed synapse so same functions can be used -// uint32_t delay_axonal = sparse_axonal_delay(control_word); - - uint32_t delay = 1; // 1.0k; ?? + uint32_t delay = 1; uint32_t syn_ind_from_delay = synapse_row_sparse_delay( control_word, synapse_type_index_bits, synapse_delay_mask); -// uint32_t delay_dendritic = synapse_row_sparse_delay( -// control_word, synapse_type_index_bits); uint32_t type = synapse_row_sparse_type( control_word, synapse_index_bits, synapse_type_mask); uint32_t index = @@ -369,34 +236,23 @@ bool synapse_dynamics_process_plastic_synapses( // For low pass filter of incoming spike train on this synapse // Use postsynaptic neuron index to access neuron struct, - if (type==1){ + if (type==1) { // this is a recurrent synapse: add 100 to index to correct array location - syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; + syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } - neuron_t *neuron = &neuron_array[neuron_ind]; - neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; // !!!! Check what units this is in - same as weight? !!!! + neuron->syn_state[syn_ind_from_delay].z_bar_inp = 1024.0k; + // !!!! TODO: Check what units this is in - same as weight? !!!! // Create update state from the plastic synaptic word update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); -// if (PRINT_PLASTICITY){ -//// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", -//// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); -// -// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", -// neuron_ind, syn_ind_from_delay, type, -// current_state.initial_weight, -// neuron->syn_state[syn_ind_from_delay].delta_w, time); -// } - // Perform weight update: only if batch time has elapsed final_state_t final_state; if (neuron->syn_state[syn_ind_from_delay].update_ready <= 0){ - // enough time has elapsed - perform weight update if (PRINT_PLASTICITY){ io_printf(IO_BUF, "update_ready=0\n"); @@ -427,41 +283,38 @@ bool synapse_dynamics_process_plastic_synapses( // Add contribution to synaptic input // Convert into ring buffer offset uint32_t ring_buffer_index = synapse_row_get_ring_buffer_index_combined( - // delay_axonal + delay_dendritic + time, type_index, synapse_type_index_bits, synapse_delay_mask); // Check for ring buffer saturation int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); -// io_printf(IO_BUF, "d acc:%d, rb:%d, syn:%d\n", accumulation, ring_buffers[ring_buffer_index], synapse_structure_get_final_weight(final_state)); -// io_printf(IO_BUF, "u acc:%u, rb:%u, syn:%u\n", accumulation, ring_buffers[ring_buffer_index], synapse_structure_get_final_weight(final_state)); -// io_printf(IO_BUF, "k acc:%k, rb:%k, syn:%k\n", accumulation, ring_buffers[ring_buffer_index], synapse_structure_get_final_weight(final_state)); + // overflow check - if (accumulation < ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) - && ring_buffers[ring_buffer_index] > 0 && synapse_structure_get_final_weight(final_state) > 0){ + if (accumulation < ring_buffers[ring_buffer_index] + ( + synapse_structure_get_final_weight(final_state)) + && ring_buffers[ring_buffer_index] > 0 && ( + synapse_structure_get_final_weight(final_state) > 0)){ accumulation = ring_buffers[ring_buffer_index]; plastic_saturation_count++; } // underflow check - if (accumulation > ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state) - && ring_buffers[ring_buffer_index] < 0 && synapse_structure_get_final_weight(final_state) < 0){ + if (accumulation > ring_buffers[ring_buffer_index] + ( + synapse_structure_get_final_weight(final_state)) + && ring_buffers[ring_buffer_index] < 0 && ( + synapse_structure_get_final_weight(final_state) < 0)) { accumulation = ring_buffers[ring_buffer_index]; plastic_saturation_count++; } -// uint32_t sat_test = accumulation & 0x20000; -// if (sat_test) { -// accumulation = 0x10000 - 1; -// plastic_saturation_count++; -// } - ring_buffers[ring_buffer_index] = accumulation; // Write back updated synaptic word to plastic region *plastic_words++ = synapse_structure_get_final_synaptic_word(final_state); } + // TODO: set write_back here? + *write_back = true; return true; } @@ -478,23 +331,8 @@ void synapse_dynamics_process_post_synaptic_event( timing_add_post_spike(time, last_post_time, last_post_trace)); } -//input_t synapse_dynamics_get_intrinsic_bias( -// uint32_t time, index_t neuron_index) { -// use(time); -// use(neuron_index); -// return 0.0k; -//} -// -//uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { -// return num_plastic_pre_synaptic_events; -//} -// -//uint32_t synapse_dynamics_get_plastic_saturation_count(void) { -// return plastic_saturation_count; -//} - // TODO: fix below to match other dynamics impls so that structural -// plasticity can be used +// plasticity can be used if desired? #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index 193d186a9cf..a243a4fd344 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -26,29 +26,9 @@ #include "post_events.h" #include "synapse_dynamics_stdp_common.h" -//#include "weight_dependence/weight.h" -//#include "timing_dependence/timing.h" -//#include -//#include -//#include - - -//#include -//#include #include extern neuron_t *neuron_array; -//extern global_neuron_params_pointer_t global_parameters; - -//static uint32_t synapse_type_index_bits; -//static uint32_t synapse_index_bits; -//static uint32_t synapse_index_mask; -//static uint32_t synapse_type_index_mask; -//static uint32_t synapse_delay_index_type_bits; -//static uint32_t synapse_type_mask; - -//uint32_t num_plastic_pre_synaptic_events = 0; -//uint32_t plastic_saturation_count = 0; //--------------------------------------- // Macros @@ -85,13 +65,6 @@ uint32_t RECURRENT_SYNAPSE_OFFSET = 100; //--------------------------------------- // Structures //--------------------------------------- -//typedef struct { -// pre_trace_t prev_trace; -// uint32_t prev_time; -//} pre_event_history_t; - -//post_event_history_t *post_event_history; - //! The format of the plastic data region of a synaptic row struct synapse_row_plastic_data_t { //! The pre-event history @@ -100,68 +73,6 @@ struct synapse_row_plastic_data_t { plastic_synapse_t synapses[]; }; -/* PRIVATE FUNCTIONS */ - -//--------------------------------------- -// Synapse update loop -//--------------------------------------- -//static inline final_state_t plasticity_update_synapse( -// uint32_t time, -// const uint32_t last_pre_time, const pre_trace_t last_pre_trace, -// const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, -// const uint32_t delay_axonal, update_state_t current_state, -// const post_event_history_t *post_event_history) { -// // Apply axonal delay to time of last presynaptic spike -// const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; -// -// // Get the post-synaptic window of events to be processed -// const uint32_t window_begin_time = -// (delayed_last_pre_time >= delay_dendritic) -// ? (delayed_last_pre_time - delay_dendritic) : 0; -// const uint32_t window_end_time = time + delay_axonal - delay_dendritic; -// post_event_window_t post_window = post_events_get_window_delayed( -// post_event_history, window_begin_time, window_end_time); -// -// log_debug("\tPerforming deferred synapse update at time:%u", time); -// log_debug("\t\tbegin_time:%u, end_time:%u - prev_time:%u, num_events:%u", -// window_begin_time, window_end_time, post_window.prev_time, -// post_window.num_events); -// -// // print_event_history(post_event_history); -// // print_delayed_window_events(post_event_history, window_begin_time, -// // window_end_time, delay_dendritic); -// -// // Process events in post-synaptic window -// while (post_window.num_events > 0) { -// const uint32_t delayed_post_time = -// *post_window.next_time + delay_dendritic; -// log_debug("\t\tApplying post-synaptic event at delayed time:%u\n", -// delayed_post_time); -// -// // Apply spike to state -// current_state = timing_apply_post_spike( -// delayed_post_time, *post_window.next_trace, delayed_last_pre_time, -// last_pre_trace, post_window.prev_time, post_window.prev_trace, -// current_state); -// -// // Go onto next event -// post_window = post_events_next_delayed(post_window, delayed_post_time); -// } -// -// const uint32_t delayed_pre_time = time + delay_axonal; -// log_debug("\t\tApplying pre-synaptic event at time:%u last post time:%u\n", -// delayed_pre_time, post_window.prev_time); -// -// // Apply spike to state -// // **NOTE** dendritic delay is subtracted -// current_state = timing_apply_pre_spike( -// delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, -// post_window.prev_time, post_window.prev_trace, current_state); -// -// // Return final synaptic word and weight -// return synapse_structure_get_final_state(current_state); -//} - //--------------------------------------- // Synaptic row plastic-region implementation //--------------------------------------- @@ -219,7 +130,8 @@ void synapse_dynamics_print_plastic_synapses( synapses_print_weight( weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", - synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), + synapse_row_sparse_delay( + control_word, synapse_type_index_bits, synapse_delay_mask), synapse_types_get_type_char(synapse_type), synapse_row_sparse_index(control_word, synapse_index_mask), synapse_delay_mask, synapse_type_index_bits); @@ -240,24 +152,6 @@ static inline index_t sparse_axonal_delay(uint32_t x) { bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { -// // Load timing dependence data -// address_t weight_region_address = timing_initialise(address); -// if (address == NULL) { -// return NULL; -// } -// -// // Load weight dependence data -// address_t weight_result = weight_initialise( -// weight_region_address, n_synapse_types, -// ring_buffer_to_input_buffer_left_shifts); -// if (weight_result == NULL) { -// return NULL; -// } -// -// post_event_history = post_events_init_buffers(n_neurons); -// if (post_event_history == NULL) { -// return NULL; -// } if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, ring_buffer_to_input_buffer_left_shifts)) { @@ -269,33 +163,19 @@ bool synapse_dynamics_initialise( return false; } - return true; // weight_result; + return true; } - static inline final_state_t eprop_plasticity_update( update_state_t current_state, REAL delta_w){ - - // Test weight change - // delta_w = -0.1k; - - // Convert delta_w to int16_t (same as weight) - take only integer bits from REAL? -// int32_t delta_w_int = bitsk(delta_w); // THIS NEEDS UPDATING TO APPROPRIATE SCALING - int32_t delta_w_int = (int32_t)roundk(delta_w, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING -// int32_t delta_w_int_shift = (int32_t)roundk(delta_w << 3, 15); // THIS NEEDS UPDATING TO APPROPRIATE SCALING -// int16_t delta_w_int = (int) delta_w; // >> 15; + int32_t delta_w_int = (int32_t)roundk(delta_w, 15); + // THIS NEEDS UPDATING TO APPROPRIATE SCALING if (delta_w){ // TODO: This should probably be delta_w_int if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d" -// ", 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d" - "\n", - delta_w, delta_w_int -// , (int16_t)delta_w_int, (int16_t)(delta_w_int << 7), (int16_t)(delta_w_int << 9), (int16_t)(delta_w_int << 11) - ); -// io_printf(IO_BUF, "shift delta_w_int: %d, 16b delta_w_int: %d, delta << 7: %d, delta << 9: %d, delta << 11: %d\n", -// delta_w_int_shift, (int16_t)delta_w_int_shift, (int16_t)(delta_w_int_shift << 1), (int16_t)(delta_w_int_shift << 2), (int16_t)(delta_w_int_shift << 4)); + io_printf(IO_BUF, "delta_w: %k, delta_w_int: %d \n", + delta_w, delta_w_int); } if (delta_w_int < 0){ @@ -306,21 +186,14 @@ static inline final_state_t eprop_plasticity_update( current_state, delta_w_int << 3); } } - else { -// if (PRINT_PLASTICITY){ -// io_printf(IO_BUF, "delta_w: %k\n", delta_w); -// } -// current_state = current_state; what? - } - // Calculate regularisation error - REAL reg_error = 0.0; //global_parameters->core_target_rate - global_parameters->core_pop_rate; + // Regularisation error isn't used in this case + REAL reg_error = 0.0; // Return final synaptic word and weight return synapse_structure_get_final_state(current_state, reg_error); } - bool synapse_dynamics_process_plastic_synapses( synapse_row_plastic_data_t *plastic_region_address, synapse_row_fixed_part_t *fixed_region, @@ -345,14 +218,10 @@ bool synapse_dynamics_process_plastic_synapses( // Extract control-word components // **NOTE** cunningly, control word is just the same as lower // 16-bits of 32-bit fixed synapse so same functions can be used -// uint32_t delay_axonal = sparse_axonal_delay(control_word); - - uint32_t delay = 1; // 1.0k; ?? + uint32_t delay = 1; uint32_t syn_ind_from_delay = synapse_row_sparse_delay( control_word, synapse_type_index_bits, synapse_delay_mask); -// uint32_t delay_dendritic = synapse_row_sparse_delay( -// control_word, synapse_type_index_bits); uint32_t type = synapse_row_sparse_type( control_word, synapse_index_bits, synapse_type_mask); uint32_t index = @@ -367,8 +236,7 @@ bool synapse_dynamics_process_plastic_synapses( // Use postsynaptic neuron index to access neuron struct, if (type==1){ - // this is a recurrent synapse: add 100 to index to - // correct array location + // this is a recurrent synapse: add 100 to index to correct array location syn_ind_from_delay += RECURRENT_SYNAPSE_OFFSET; } @@ -380,29 +248,15 @@ bool synapse_dynamics_process_plastic_synapses( update_state_t current_state = synapse_structure_get_update_state(*plastic_words, type); -// if (PRINT_PLASTICITY){ -//// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, type: %u, zbar: %k\n", -//// neuron_ind, syn_ind_from_delay, type, neuron->syn_state[syn_ind_from_delay].z_bar_inp); -// -// io_printf(IO_BUF, "neuron ind: %u, synapse ind: %u, %type: %u init w (plas): %d, summed_dw: %k, time: %u\n", -// neuron_ind, syn_ind_from_delay, type, -// current_state.initial_weight, -// neuron->syn_state[syn_ind_from_delay].delta_w, time); -// } - // Perform weight update: only if batch time has elapsed final_state_t final_state; - if (neuron->syn_state[syn_ind_from_delay].update_ready <= 0){ - + if (neuron->syn_state[syn_ind_from_delay].update_ready <= 0) { // enough time has elapsed - perform weight update if (PRINT_PLASTICITY){ io_printf(IO_BUF, "update_ready=0\n"); } -// log_info("Check: eprop plasticity update, delta_w %k syn_ind %u time %u", -// neuron->syn_state[syn_ind_from_delay].delta_w, syn_ind_from_delay, time); - // Go through typical weight update process to clip to limits final_state = eprop_plasticity_update(current_state, neuron->syn_state[syn_ind_from_delay].delta_w); @@ -432,22 +286,13 @@ bool synapse_dynamics_process_plastic_synapses( time, type_index, synapse_type_index_bits, synapse_delay_mask); - // Check for ring buffer saturation + // Check for ring buffer saturation (?) int16_t accumulation = ring_buffers[ring_buffer_index] + synapse_structure_get_final_weight(final_state); -// log_info("Check: accumulation %d ring_buffer %d time %u", -// accumulation, ring_buffers[ring_buffer_index], time); - -// uint32_t sat_test = accumulation & 0x10000; -// if (sat_test) { -// accumulation = sat_test - 1; -// plastic_saturation_count++; -// } - ring_buffers[ring_buffer_index] = accumulation; - // no overflow or underflow checking? + // TODO: no overflow or underflow checking? // Write back updated synaptic word to plastic region *plastic_words++ = @@ -470,23 +315,8 @@ void synapse_dynamics_process_post_synaptic_event( timing_add_post_spike(time, last_post_time, last_post_trace)); } -//input_t synapse_dynamics_get_intrinsic_bias( -// uint32_t time, index_t neuron_index) { -// use(time); -// use(neuron_index); -// return 0.0k; -//} -// -//uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { -// return num_plastic_pre_synaptic_events; -//} -// -//uint32_t synapse_dynamics_get_plastic_saturation_count(void) { -// return plastic_saturation_count; -//} - // TODO: fix below to match other dynamics impls so that structural -// plasticity can be used +// plasticity can be used if desired? #if SYNGEN_ENABLED == 1 diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index d715f531f38..6ed3a9456ea 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -73,16 +73,12 @@ static inline weight_state_t weight_get_initial( static inline weight_state_t weight_one_term_apply_depression( weight_state_t state, int32_t a2_minus) { - if (PRINT_PLASTICITY){ io_printf(IO_BUF, "depressing: %d\n", a2_minus); } -// state.weight -= mul_accum_fixed(state.weight_region->a2_minus, a2_minus); state.weight -= kbits(a2_minus); state.weight = kbits(MAX(bitsk(state.weight), bitsk(state.weight_region->min_weight))); return state; -// state.a2_minus += a2_minus; -// return state; } //--------------------------------------- @@ -92,67 +88,41 @@ static inline weight_state_t weight_one_term_apply_potentiation( if (PRINT_PLASTICITY){ io_printf(IO_BUF, "potentiating: %d\n", a2_plus); } -// log_info("weight %k a2_plus %d converted to %k bitsk(weight) %d", -// state.weight, a2_plus, kbits(a2_plus), bitsk(state.weight)); -// state.weight += mul_accum_fixed(state.weight_region->a2_plus, a2_plus); state.weight += kbits(a2_plus); state.weight = kbits(MIN(bitsk(state.weight), bitsk(state.weight_region->max_weight))); -// log_info("weight after min of max %k", state.weight); return state; -// state.a2_plus += a2_plus; -// return state; } //--------------------------------------- static inline weight_t weight_get_final(weight_state_t new_state, REAL reg_error) { - // Scale potentiation and depression - // **NOTE** A2+ and A2- are pre-scaled into weight format -// int32_t scaled_a2_plus = STDP_FIXED_MUL_16X16( -// new_state.a2_plus, new_state.weight_region->a2_plus); -// int32_t scaled_a2_minus = STDP_FIXED_MUL_16X16( -// new_state.a2_minus, new_state.weight_region->a2_minus); - // Apply eprop plasticity updates to initial weight -// accum new_weight = bitsk(new_state.weight) >> new_state.weight_shift; accum new_weight = new_state.weight; -// int32_t new_weight = -// new_state.initial_weight + new_state.a2_plus + new_state.a2_minus; accum reg_weight = new_weight; accum reg_change = 0.0k; REAL reg_boundary = 1.0k; // Calculate regularisation - if (new_state.weight_region->reg_rate > 0.0k && (reg_error > reg_boundary || reg_error < -reg_boundary)){ // if reg rate is zero or error small, regularisation is turned off -// reg_change = new_weight * new_state.weight_region->reg_rate * reg_error; -// if (reg_error > 0){ -// reg_error -= reg_boundary; -// } else if (reg_error < 0){ -// reg_error += reg_boundary; -// } - reg_change = new_state.weight_region->max_weight * new_state.weight_region->reg_rate * reg_error; + if (new_state.weight_region->reg_rate > 0.0k && ( + reg_error > reg_boundary || reg_error < -reg_boundary)) { + // if reg rate is zero or error small, regularisation is turned off + reg_change = new_state.weight_region->max_weight * ( + new_state.weight_region->reg_rate * reg_error); reg_weight = new_weight + reg_change; -// io_printf(IO_BUF, "\tw:%d + reg_shift:%d = reg_w:%d -- err:%k\n", new_weight, reg_change, reg_weight, reg_error); } if (PRINT_PLASTICITY){ - io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, max:%d", reg_weight, reg_change, new_state.weight_region->max_weight); + io_printf(IO_BUF, "\tbefore minmax reg_w:%d, reg_shift:%d, max:%d", + reg_weight, reg_change, new_state.weight_region->max_weight); } - // Clamp new weight to bounds (not sure this is needed now?) -// reg_weight = MIN(new_state.weight_region->max_weight, -// MAX(reg_weight, new_state.weight_region->min_weight)); if (PRINT_PLASTICITY){ io_printf(IO_BUF, "\told_weight:%d, a2+:%d, a2-:%d, " - // "scaled a2+:%d, scaled a2-:%d," " new_weight:%d, reg_weight:%d, reg_l_rate:%k, reg_error:%k\n", - new_state.weight, new_state.weight_region->a2_plus, new_state.weight_region->a2_minus, - // scaled_a2_plus, scaled_a2_minus, - new_weight, reg_weight, new_state.weight_region->reg_rate, reg_error); + new_state.weight, new_state.weight_region->a2_plus, + new_state.weight_region->a2_minus, new_weight, reg_weight, + new_state.weight_region->reg_rate, reg_error); } -// log_info("reg_weight %k new_weight %k reg_error %k reg_change %k reg_boundary %k", -// reg_weight, new_weight, reg_error, reg_change, reg_boundary); - return (weight_t) (bitsk(reg_weight) >> new_state.weight_shift); } diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h index 6c4bda48f3c..9429d0b8ecb 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h @@ -11,7 +11,6 @@ #ifndef _SYNAPSE_TYPES_EPROP_ADPATIVE_IMPL_H_ #define _SYNAPSE_TYPES_EPROP_ADAPTIVE_IMPL_H_ - //--------------------------------------- // Macros //--------------------------------------- @@ -29,15 +28,12 @@ //--------------------------------------- // Synapse parameters //--------------------------------------- -//input_t excitatory_response[NUM_EXCITATORY_RECEPTORS]; -//input_t inhibitory_response[NUM_INHIBITORY_RECEPTORS]; struct synapse_types_params_t { input_t exc; input_t exc2; input_t inh; input_t inh2; -// REAL time_step_ms; }; struct synapse_types_t { @@ -47,23 +43,9 @@ struct synapse_types_t { input_t inh2; //!< Second inhibitory synaptic input }; -//typedef struct synapse_param_t { -// decay_t exc_decay; -// decay_t exc_init; -// decay_t exc2_decay; -// decay_t exc2_init; -// decay_t inh_decay; -// decay_t inh_init; -// decay_t inh2_decay; -// decay_t inh2_init; -// input_t input_buffer_excitatory_value; -// input_t input_buffer_excitatory2_value; -// input_t input_buffer_inhibitory_value; -// input_t input_buffer_inhibitory2_value; -//} synapse_param_t; - //! human readable definition for the positions in the input regions for the //! different synapse types. +// TODO: these have different names on the python side... typedef enum input_buffer_regions { EXCITATORY_ONE, EXCITATORY_TWO, INHIBITORY_ONE, INHIBITORY_TWO } input_buffer_regions; @@ -71,7 +53,6 @@ typedef enum input_buffer_regions { //--------------------------------------- // Synapse shaping inline implementation //--------------------------------------- - static inline void synapse_types_initialise(synapse_types_t *state, synapse_types_params_t *params, UNUSED uint32_t n_steps_per_timestep) { state->exc = params->exc; @@ -97,23 +78,10 @@ static inline void synapse_types_save_state(synapse_types_t *state, //! \return nothing static inline void synapse_types_shape_input( synapse_types_t *parameter) { - parameter->exc = 0; -// decay_s1615( -// parameter->input_buffer_excitatory_value, -// parameter->exc_decay); parameter->exc2 = 0; -// decay_s1615( -// parameter->input_buffer_excitatory2_value, -// parameter->exc2_decay); parameter->inh = 0; -// decay_s1615( -// parameter->input_buffer_inhibitory_value, -// parameter->inh_decay); parameter->inh2 = 0; -// decay_s1615( -// parameter->input_buffer_inhibitory2_value, -// parameter->inh2_decay); } //! \brief adds the inputs for a give timer period to a given neuron that is @@ -126,32 +94,14 @@ static inline void synapse_types_shape_input( static inline void synapse_types_add_neuron_input( index_t synapse_type_index, synapse_types_t *parameter, input_t input) { -// if (input){ -// io_printf(IO_BUF, "index = %u, %d \t input = %u, %d\t%u\n", synapse_type_index, synapse_type_index, input, input, input>>3); -// } if (synapse_type_index == EXCITATORY_ONE) { parameter->exc += input; -// = -// parameter->input_buffer_excitatory_value + -// decay_s1615(input, parameter->exc_init); - } else if (synapse_type_index == EXCITATORY_TWO) { parameter->exc2 += input; -// = -// parameter->input_buffer_excitatory2_value + -// decay_s1615(input, parameter->exc2_init); - } else if (synapse_type_index == INHIBITORY_ONE) { parameter->inh += input; -// = -// parameter->input_buffer_inhibitory_value + -// decay_s1615(input, parameter->inh_init); - } else if (synapse_type_index == INHIBITORY_TWO) { parameter->inh2 += input; -// = -// parameter->input_buffer_inhibitory2_value + -// decay_s1615(input, parameter->inh2_init); } } From 42eabce6c058eb8020e09691ef54c9bf0b64160d Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 26 May 2023 17:15:57 +0100 Subject: [PATCH 101/123] vera line length --- neural_modelling/src/neuron/synapses.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index d18a242e994..87718f4195b 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -237,7 +237,9 @@ static inline bool process_fixed_synapses( int32_t weight = synapse_row_sparse_weight(synaptic_word); // Add weight to current ring buffer value - int32_t accumulation = ring_buffers[ring_buffer_index] + weight; // switch to saturated arithmetic to avoid complicated saturation check, will it check saturation at both ends? + int32_t accumulation = ring_buffers[ring_buffer_index] + weight; + // TODO: switch to saturated arithmetic to avoid complicated saturation check, + // will it check saturation at both ends? // If 17th bit is set, saturate accumulator at UINT16_MAX (0xFFFF) // **NOTE** 0x10000 can be expressed as an ARM literal, From fc149dfbdeef8c37500159f426e541c3286db122 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 26 May 2023 17:27:35 +0100 Subject: [PATCH 102/123] Try using ubuntu 22 (to update compiler version) --- .github/workflows/c_actions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/c_actions.yml b/.github/workflows/c_actions.yml index f6b8e70d297..dd105ac1e27 100644 --- a/.github/workflows/c_actions.yml +++ b/.github/workflows/c_actions.yml @@ -19,7 +19,7 @@ name: C Actions on: [push] jobs: build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 10 steps: - name: Checkout From c15512daa895ddcb10afcd852b0a044d1f196633 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 6 Jun 2023 17:23:10 +0100 Subject: [PATCH 103/123] Reduce ITCM further --- neural_modelling/src/neuron/current_sources/current_source.h | 2 +- .../plasticity/stdp/timing_dependence/timing_eprop_impl.c | 4 ++-- .../plasticity/stdp/timing_dependence/timing_eprop_impl.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index f1fd058dd5c..3d60ccdaaed 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -150,7 +150,7 @@ static bool current_source_load_parameters(address_t cs_address) { // Avoid the loops if no current sources #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) - io_printf(IO_BUF, "no current sources defined \n"); +// io_printf(IO_BUF, "no current sources defined \n"); return true; #else diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c index 34756b9afd9..cfe30b615f5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c @@ -28,8 +28,8 @@ int16_t tau_minus_lookup[TAU_MINUS_SIZE]; // Functions //--------------------------------------- address_t timing_initialise(address_t address) { - io_printf(IO_BUF, "timing_initialise: starting\n"); - io_printf(IO_BUF, "\t Nothing to be done\n"); +// io_printf(IO_BUF, "timing_initialise: starting\n"); +// io_printf(IO_BUF, "\t Nothing to be done\n"); return address; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h index bb2c5550662..aa63cab92be 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h @@ -29,7 +29,7 @@ typedef int16_t pre_trace_t; #include // Include debug header for log_info etc -#include +//#include // Include generic plasticity maths functions #include From 5de2d6dc25d8e58410549a6d8fd2332c03b4765b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 6 Jun 2023 17:28:45 +0100 Subject: [PATCH 104/123] Don't build eprop binaries in debug mode --- neural_modelling/makefiles/neuron/Makefile | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index 5c690cf40a7..b8fc299eb80 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -12,12 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -MODELS = eprop_adaptive \ - eprop_adaptive_stdp_mad_eprop_reg \ - sinusoid_readout \ - sinusoid_readout_stdp_mad_eprop_reg \ - left_right_readout_stdp_mad_eprop_reg \ - IF_curr_exp \ +MODELS = IF_curr_exp \ IF_cond_exp \ IZK_curr_exp \ IZK_cond_exp \ @@ -43,7 +38,12 @@ ifneq ($(SPYNNAKER_DEBUG), DEBUG) IZK_cond_exp_stdp_mad_pair_additive \ IF_curr_alpha_stdp_mad_pair_additive \ IF_curr_exp_stdp_mad_recurrent_pre_stochastic_multiplicative \ - IF_curr_exp_stdp_mad_pfister_triplet_additive + IF_curr_exp_stdp_mad_pfister_triplet_additive \ + eprop_adaptive \ + eprop_adaptive_stdp_mad_eprop_reg \ + sinusoid_readout \ + sinusoid_readout_stdp_mad_eprop_reg \ + left_right_readout_stdp_mad_eprop_reg endif all: From 80cd906d2dd9e984e45f92cfaf3dd0493fc27a18 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 7 Jun 2023 11:26:21 +0100 Subject: [PATCH 105/123] Not using StoreRecallReadout any more --- .../pyNN/models/neuron/builds/__init__.py | 3 +- .../neuron/builds/store_recall_readout.py | 42 --- .../models/neuron/neuron_models/__init__.py | 2 - .../neuron_model_store_recall_readout.py | 347 ------------------ 4 files changed, 1 insertion(+), 393 deletions(-) delete mode 100644 spynnaker/pyNN/models/neuron/builds/store_recall_readout.py delete mode 100644 spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py diff --git a/spynnaker/pyNN/models/neuron/builds/__init__.py b/spynnaker/pyNN/models/neuron/builds/__init__.py index 2eb0699920b..0e578161379 100644 --- a/spynnaker/pyNN/models/neuron/builds/__init__.py +++ b/spynnaker/pyNN/models/neuron/builds/__init__.py @@ -27,7 +27,6 @@ from .if_curr_exp_ca2_adaptive import IFCurrExpCa2Adaptive from .if_curr_exp_semd_base import IFCurrExpSEMDBase from .eprop_adaptive import EPropAdaptive -# from .store_recall_readout import StoreRecallReadout from .sinusoid_readout import SinusoidReadout from .left_right_readout import LeftRightReadout @@ -36,4 +35,4 @@ "IFCurrExpBase", "IFFacetsConductancePopulation", "IzkCondExpBase", "IzkCurrExpBase", "IFCondExpStoc", "IFCurrDelta", "IFCurrExpCa2Adaptive", "IFCurrExpSEMDBase", - "EPropAdaptive", "SinusoidReadout", "LeftRightReadout"] # , "StoreRecallReadout" + "EPropAdaptive", "SinusoidReadout", "LeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py b/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py deleted file mode 100644 index 9861dc433b4..00000000000 --- a/spynnaker/pyNN/models/neuron/builds/store_recall_readout.py +++ /dev/null @@ -1,42 +0,0 @@ -from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard -from spynnaker.pyNN.models.defaults import default_initial_values -from spynnaker.pyNN.models.neuron.neuron_models import ( - NeuronModelStoreRecallReadout) -from spynnaker.pyNN.models.neuron.synapse_types import ( - SynapseTypeEPropAdaptive) -from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent -from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeStatic - - -class StoreRecallReadout(AbstractPyNNNeuronModelStandard): - """ Leaky integrate and fire neuron which fires Poisson spikes with rate - set by the neurons membrane potential - """ - - @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2", "mean_isi_ticks", - "time_to_spike_ticks"}) - def __init__( - self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, - v_thresh=100, tau_refrac=0.1, - isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, - mean_isi_ticks=65000, time_to_spike_ticks=65000, - i_offset=0.0, v=50, rate_update_threshold=0.25, - prob_command=1./6., rate_on=50, rate_off=0, poisson_pop_size=25): - # pylint: disable=too-many-arguments, too-many-locals - neuron_model = NeuronModelStoreRecallReadout( - v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, - mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, - prob_command, rate_on, rate_off, poisson_pop_size) - synapse_type = SynapseTypeEPropAdaptive( - tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, - isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) - input_type = InputTypeCurrent() - threshold_type = ThresholdTypeStatic(v_thresh) - - super(StoreRecallReadout, self).__init__( - model_name="store_recall_readout_neuron", - binary="store_recall_readout_neuron.aplx", - neuron_model=neuron_model, input_type=input_type, - synapse_type=synapse_type, threshold_type=threshold_type) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index a19597a895e..59dcf402f19 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -16,7 +16,6 @@ from .neuron_model_leaky_integrate_and_fire import ( NeuronModelLeakyIntegrateAndFire) from .neuron_model_eprop_adaptive import NeuronModelEPropAdaptive -# from .neuron_model_store_recall_readout import NeuronModelStoreRecallReadout from .neuron_model_sinusoid_readout import ( NeuronModelLeakyIntegrateAndFireSinusoidReadout) from .neuron_model_left_right_readout import NeuronModelLeftRightReadout @@ -25,4 +24,3 @@ "NeuronModelEPropAdaptive", "NeuronModelLeakyIntegrateAndFireSinusoidReadout", "NeuronModelLeftRightReadout"] - # "NeuronModelStoreRecallReadout", diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py deleted file mode 100644 index c0cd3d8cc6b..00000000000 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_store_recall_readout.py +++ /dev/null @@ -1,347 +0,0 @@ -import numpy -from spinn_utilities.overrides import overrides -from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items -from .abstract_neuron_model import AbstractNeuronModel - -MICROSECONDS_PER_SECOND = 1000000.0 -MICROSECONDS_PER_MILLISECOND = 1000.0 -V = "v" -V_REST = "v_rest" -TAU_M = "tau_m" -CM = "cm" -I_OFFSET = "i_offset" -V_RESET = "v_reset" -TAU_REFRAC = "tau_refrac" -COUNT_REFRAC = "count_refrac" -MEAN_ISI_TICKS = "mean_isi_ticks" -TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" -SEED1 = "seed1" -SEED2 = "seed2" -SEED3 = "seed3" -SEED4 = "seed4" -TICKS_PER_SECOND = "ticks_per_second" -TIME_SINCE_LAST_SPIKE = "time_since_last_spike" -RATE_AT_LAST_SETTING = "rate_at_last_setting" -RATE_UPDATE_THRESHOLD = "rate_update_threshold" -PROB_COMMAND = "prob_command" -RATE_ON = "rate_on" -RATE_OFF = "rate_off" - -UNITS = { - V: 'mV', - V_REST: 'mV', - TAU_M: 'ms', - CM: 'nF', - I_OFFSET: 'nA', - V_RESET: 'mV', - TAU_REFRAC: 'ms' -} - - -class NeuronModelStoreRecallReadout(AbstractNeuronModel): - __slots__ = [ - "_v_init", - "_v_rest", - "_tau_m", - "_cm", - "_i_offset", - "_v_reset", - "_tau_refrac", - "_mean_isi_ticks", - "_time_to_spike_ticks", - "_time_since_last_spike", - "_rate_at_last_setting", - "_rate_update_threshold", - "_prob_command", - "_rate_on", - "_rate_off", - "_mean_0", - "_mean_1", - "_cross_entropy", - "_poisson_key", - "_poisson_pop_size" - ] - - def __init__( - self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, - mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, - prob_command, rate_on, rate_off, poisson_pop_size): - - global_data_types=[ - DataType.UINT32, # MARS KISS seed - DataType.UINT32, # MARS KISS seed - DataType.UINT32, # MARS KISS seed - DataType.UINT32, # MARS KISS seed - DataType.S1615, # ticks_per_second - DataType.S1615, # global mem pot - DataType.S1615, # global mem pot 2 - DataType.S1615, # rate on - DataType.S1615, # rate off - DataType.S1615, # mean 0 activation - DataType.S1615, # mean 0 activation - DataType.S1615, # cross entropy - DataType.UINT32 # poisson key - ] - global_data_types.extend([DataType.S1615 for i in range(1024)]) - - super(NeuronModelStoreRecallReadout, self).__init__( - data_types= [ - DataType.S1615, # v - DataType.S1615, # v_rest - DataType.S1615, # r_membrane (= tau_m / cm) - DataType.S1615, # exp_tc (= e^(-ts / tau_m)) - DataType.S1615, # i_offset - DataType.INT32, # count_refrac - DataType.S1615, # v_reset - DataType.INT32, # tau_refrac - #### Poisson Compartment Params #### - DataType.S1615, # REAL mean_isi_ticks - DataType.S1615, # REAL time_to_spike_ticks - DataType.INT32, # int32_t time_since_last_spike s - DataType.S1615, # REAL rate_at_last_setting; s - DataType.S1615 # REAL rate_update_threshold; p - ], - - global_data_types=global_data_types - ) - - if v_init is None: - v_init = v_rest - - self._v_init = v_init - self._v_rest = v_rest - self._tau_m = tau_m - self._cm = cm - self._i_offset = i_offset - self._v_reset = v_reset - self._tau_refrac = tau_refrac - self._mean_isi_ticks = mean_isi_ticks - self._time_to_spike_ticks = time_to_spike_ticks - self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation - self._rate_at_last_setting = 0 - self._rate_update_threshold = 2 - self._prob_command = prob_command - self._rate_off = rate_off - self._rate_on = rate_on - self._mean_0 = 0.0 - self._mean_1 = 0.0 - self._cross_entropy = 0.0 - self._poisson_key = None - self._poisson_pop_size = poisson_pop_size - - def set_poisson_key(self, p_key): - self._poisson_key = p_key - - @overrides(AbstractNeuronModel.get_n_cpu_cycles) - def get_n_cpu_cycles(self, n_neurons): - # A bit of a guess - return 100 * n_neurons - - @overrides(AbstractNeuronModel.add_parameters) - def add_parameters(self, parameters): - parameters[V_REST] = self._v_rest - parameters[TAU_M] = self._tau_m - parameters[CM] = self._cm - parameters[I_OFFSET] = self._i_offset - parameters[V_RESET] = self._v_reset - parameters[TAU_REFRAC] = self._tau_refrac - parameters[SEED1] = 10065 - parameters[SEED2] = 232 - parameters[SEED3] = 3634 - parameters[SEED4] = 4877 - - parameters[PROB_COMMAND] = self._prob_command - parameters[RATE_ON] = self._rate_on - parameters[RATE_OFF] = self._rate_off - - parameters[TICKS_PER_SECOND] = 0 # set in get_valuers() - parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold -# parameters[TARGET_DATA] = self._target_data - - @overrides(AbstractNeuronModel.add_state_variables) - def add_state_variables(self, state_variables): - state_variables[V] = self._v_init - state_variables[COUNT_REFRAC] = 0 - state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks - state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential - state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike - state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting - - - @overrides(AbstractNeuronModel.get_units) - def get_units(self, variable): - return UNITS[variable] - - @overrides(AbstractNeuronModel.has_variable) - def has_variable(self, variable): - return variable in UNITS - - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) - def get_values(self, parameters, state_variables, vertex_slice, ts): - - # Add the rest of the data - return [state_variables[V], - parameters[V_REST], - parameters[TAU_M] / parameters[CM], - parameters[TAU_M].apply_operation( - operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - parameters[I_OFFSET], state_variables[COUNT_REFRAC], - parameters[V_RESET], - parameters[TAU_REFRAC].apply_operation( - operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), - state_variables[MEAN_ISI_TICKS], - state_variables[TIME_TO_SPIKE_TICKS], - state_variables[TIME_SINCE_LAST_SPIKE], - state_variables[RATE_AT_LAST_SETTING], - parameters[RATE_UPDATE_THRESHOLD] - ] - - @overrides(AbstractNeuronModel.update_values) - def update_values(self, values, parameters, state_variables): - - # Read the data - (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - _v_reset, _tau_refrac, - mean_isi_ticks, time_to_spike_ticks, time_since_last_spike, - rate_at_last_setting, _rate_update_threshold -# _seed1, _seed2, _seed3, _seed4, _ticks_per_second - ) = values - - # Copy the changed data only - state_variables[V] = v - state_variables[COUNT_REFRAC] = count_refrac - state_variables[MEAN_ISI_TICKS] = mean_isi_ticks - state_variables[TIME_TO_SPIKE_TICKS] = time_to_spike_ticks - state_variables[TIME_SINCE_LAST_SPIKE] = time_since_last_spike - state_variables[RATE_AT_LAST_SETTING] = rate_at_last_setting - - # Global params - @inject_items({"machine_time_step": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_global_values, - additional_arguments={'machine_time_step'}) - def get_global_values(self, machine_time_step): - vals = [ - 1, # seed 1 - 2, # seed 2 - 3, # seed 3 - 4, # seed 4 - MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second - 0.0, # set to 0, as will be set in first timestep of model anyway - 0.0, # set to 0, as will be set in first timestep of model anyway - ] - -# target_data = [] -# -# for i in range(1024): -# target_data.append( -# # 4 -# 5 + 2 * numpy.sin(2 * i * 2* numpy.pi / 1024) \ -# + 5 * numpy.sin((4 * i * 2* numpy.pi / 1024)) -# ) - vals.extend(self._prob_command) - vals.extend(self._rate_on) - vals.extend(self._rate_off) - vals.extend(self._mean_0) - vals.extend(self._mean_1) - vals.extend(self._cross_entropy) - vals.extend(self._poisson_key) - vals.extend(self._poisson_pop_size) - return vals - - @property - def prob_command(self): - return self._prob_command - - @prob_command.setter - def prob_command(self, prob_command): - self._prob_command = prob_command - - @property - def rate_on(self): - return self._rate_on - - @rate_on.setter - def rate_on(self, rate_on): - self._rate_on = rate_on - - @property - def rate_off(self): - return self._rate_off - - @rate_on.setter - def rate_on(self, rate_off): - self._rate_off = rate_off - - @property - def v_init(self): - return self._v - - @v_init.setter - def v_init(self, v_init): - self._v = v_init - - @property - def v_rest(self): - return self._v_rest - - @v_rest.setter - def v_rest(self, v_rest): - self._v_rest = v_rest - - @property - def tau_m(self): - return self._tau_m - - @tau_m.setter - def tau_m(self, tau_m): - self._tau_m = tau_m - - @property - def cm(self): - return self._cm - - @cm.setter - def cm(self, cm): - self._cm = cm - - @property - def i_offset(self): - return self._i_offset - - @i_offset.setter - def i_offset(self, i_offset): - self._i_offset = i_offset - - @property - def v_reset(self): - return self._v_reset - - @v_reset.setter - def v_reset(self, v_reset): - self._v_reset = v_reset - - @property - def tau_refrac(self): - return self._tau_refrac - - @tau_refrac.setter - def tau_refrac(self, tau_refrac): - self._tau_refrac = tau_refrac - - @property - def mean_isi_ticks(self): - return self._mean_isi_ticks - - @mean_isi_ticks.setter - def mean_isi_ticks(self, new_mean_isi_ticks): - self._mean_isi_ticks = new_mean_isi_ticks - - @property - def time_to_spike_ticks(self): - return self._time_to_spike_ticks - - @mean_isi_ticks.setter - def time_to_spike_ticks(self, new_time_to_spike_ticks): - self._time_to_spike_ticks = new_time_to_spike_ticks \ No newline at end of file From 8143e7cc32d139a6f1fe4cebb6fa7d39c5c9cb98 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 7 Jun 2023 13:07:22 +0100 Subject: [PATCH 106/123] Update is_same_as in eprop stdp --- .../timing_dependence_eprop.py | 26 +------------------ .../synapse_dynamics/synapse_dynamics_stdp.py | 4 +-- 2 files changed, 3 insertions(+), 27 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py index e80a9c4e8ff..2b348f8b4bb 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py @@ -23,11 +23,6 @@ logger = logging.getLogger(__name__) -# LOOKUP_TAU_PLUS_SIZE = 256 -# LOOKUP_TAU_PLUS_SHIFT = 0 -# LOOKUP_TAU_MINUS_SIZE = 256 -# LOOKUP_TAU_MINUS_SHIFT = 0 - class TimingDependenceEprop(AbstractTimingDependence): __slots__ = [ @@ -42,10 +37,6 @@ def __init__(self, A_plus=0.01, A_minus=0.01): self.__synapse_structure = SynapseStructureWeightOnly() -# # provenance data -# self.__tau_plus_last_entry = None -# self.__tau_minus_last_entry = None - @property def A_plus(self): return self.__a_plus @@ -66,8 +57,7 @@ def A_minus(self, new_value): def is_same_as(self, timing_dependence): if not isinstance(timing_dependence, TimingDependenceEprop): return False - return (self.__tau_plus == timing_dependence.tau_plus and - self.__tau_minus == timing_dependence.tau_minus) + return True @property def vertex_executable_suffix(self): @@ -75,7 +65,6 @@ def vertex_executable_suffix(self): @property def pre_trace_n_bytes(self): - # Pair rule requires no pre-synaptic trace when only the nearest # Neighbours are considered and, a single 16-bit R1 trace return 2 @@ -94,19 +83,6 @@ def write_parameters( # There are currently no parameters to write for this rule pass - # Check timestep is valid -# if machine_time_step != 1000: -# raise NotImplementedError( -# "STDP LUT generation currently only supports 1ms timesteps") - -# # Write lookup tables -# self.__tau_plus_last_entry = plasticity_helpers.write_exp_lut( -# spec, self.__tau_plus, LOOKUP_TAU_PLUS_SIZE, -# LOOKUP_TAU_PLUS_SHIFT) -# self.__tau_minus_last_entry = plasticity_helpers.write_exp_lut( -# spec, self.__tau_minus, LOOKUP_TAU_MINUS_SIZE, -# LOOKUP_TAU_MINUS_SHIFT) - @property def synaptic_structure(self): return self.__synapse_structure diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 4fe214cff3d..f4e3b420121 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -228,8 +228,8 @@ def is_same_as(self, synapse_dynamics): if not isinstance(synapse_dynamics, SynapseDynamicsSTDP): return False return ( - # self.__timing_dependence.is_same_as( - # synapse_dynamics.timing_dependence) and + self.__timing_dependence.is_same_as( + synapse_dynamics.timing_dependence) and self.__weight_dependence.is_same_as( synapse_dynamics.weight_dependence) and (self.__dendritic_delay_fraction == From 2ce486a1dccbd9d0b9c674f3b8ffa283c8a39af7 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 10:44:43 +0100 Subject: [PATCH 107/123] Flake8 and general tidying up --- spynnaker/pyNN/extra_models/__init__.py | 2 +- .../connectors/abstract_connector.py | 8 +- .../neuron/abstract_population_vertex.py | 5 +- .../models/neuron/builds/eprop_adaptive.py | 27 +- .../neuron/builds/left_right_readout.py | 27 +- .../models/neuron/builds/sinusoid_readout.py | 17 +- .../models/neuron/neuron_models/__init__.py | 2 +- .../neuron_model_eprop_adaptive.py | 205 ++-------------- .../neuron_model_left_right_readout.py | 231 +++--------------- .../neuron_model_sinusoid_readout.py | 140 ++--------- .../timing_dependence_eprop.py | 36 +-- .../weight_dependence_eprop_reg.py | 24 +- .../neuron/population_machine_vertex.py | 9 +- .../synapse_dynamics/synapse_dynamics_stdp.py | 2 +- .../synapse_type_eprop_adaptive.py | 128 ++-------- .../threshold_type_adaptive.py | 74 ++---- .../threshold_types/threshold_type_none.py | 31 +-- 17 files changed, 207 insertions(+), 761 deletions(-) diff --git a/spynnaker/pyNN/extra_models/__init__.py b/spynnaker/pyNN/extra_models/__init__.py index faf7bc87153..61ef344fb6b 100644 --- a/spynnaker/pyNN/extra_models/__init__.py +++ b/spynnaker/pyNN/extra_models/__init__.py @@ -55,7 +55,7 @@ 'RecurrentRule', 'Vogels2011Rule', # eprop plastic stuff 'TimingDependenceEprop', - 'WeightDependenceEprop', + 'WeightDependenceEpropReg', # Variable rate Poisson 'SpikeSourcePoissonVariable'] diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 5c32db72084..a3bba01fbf6 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -99,8 +99,7 @@ def set_projection_information(self, synapse_info): :param SynapseInformation synapse_info: the synapse info """ self._rng = (self._rng or NumpyRNG()) - # TODO: this was set to zero but I'm unconvinced that it needs to be - self.__min_delay = 0 # SpynnakerDataView.get_simulation_time_step_ms() + self.__min_delay = SpynnakerDataView.get_simulation_time_step_ms() def _get_delay_minimum(self, delays, n_connections, synapse_info): """ @@ -466,14 +465,15 @@ def _generate_weights( if self.__safe: if not weights.size: warn_once(logger, "No connection in " + str(self)) - # TODO: I think this is allowed for local_only so investigate what's going on there + # TODO: I think this is allowed for local_only so investigate what's + # going on there # elif numpy.amin(weights) < 0 < numpy.amax(weights): # raise SpynnakerException( # "Weights must be either all positive or all negative in " # f"projection {synapse_info.pre_population.label}->" # f"{synapse_info.post_population.label}") # return numpy.abs(weights) - return weights # numpy.abs(weights) + return weights # numpy.abs(weights) def _clip_delays(self, delays): """ diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 1974fba3620..d71814990d6 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1051,8 +1051,9 @@ def get_ring_buffer_shifts(self): w + 1 if (2 ** w) <= a else w for w, a in zip(max_weight_powers, max_weights)) - # EPROP (from synaptic_manager) - # fix weight shift so we can scale eligibility trace calculations accordingly. + # TODO: check this EPROP (from synaptic_manager) + # fix weight shift so we can scale eligibility trace calculations + # accordingly. max_weight_powers = (2 #if w >= 1 else w for w in max_weight_powers) diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index aece63d8fd0..a4c9179cbd4 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -1,17 +1,16 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard from spynnaker.pyNN.models.defaults import default_initial_values @@ -22,6 +21,7 @@ from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeNone + class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ Adaptive threshold neuron with eprop support """ @@ -38,12 +38,12 @@ def __init__( tau_m=20.0, cm=1.0, v_rest=0, v_reset=0, tau_refrac=5.0, i_offset=0.0, v=0.0, psi=0.0, - #synapse type params + # synapse type params # tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, # Regularisation params - target_rate=10.0, tau_err=1000.0, # fits with 1 ms timestep + target_rate=10.0, tau_err=1000.0, # fits with 1 ms timestep # Threshold parameters B=10.0, small_b=0.0, small_b_0=10.0, tau_a=500.0, beta=1.8, @@ -73,7 +73,6 @@ def __init__( ) synapse_type = SynapseTypeEPropAdaptive( - # tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) input_type = InputTypeCurrent() diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index 1f8f5ec5384..6950ef5ef20 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -1,6 +1,21 @@ +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard from spynnaker.pyNN.models.defaults import default_initial_values -from spynnaker.pyNN.models.neuron.neuron_models import (NeuronModelLeftRightReadout) +from spynnaker.pyNN.models.neuron.neuron_models import ( + NeuronModelLeftRightReadout) from spynnaker.pyNN.models.neuron.synapse_types import ( SynapseTypeEPropAdaptive) from spynnaker.pyNN.models.neuron.input_types import InputTypeCurrent @@ -19,8 +34,6 @@ def __init__( v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - # tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, -# mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, rate_on=40, rate_off=0, poisson_pop_size=10, @@ -29,18 +42,12 @@ def __init__( # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeftRightReadout( - v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, #target_data, + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, # Learning signal params - # l, - - # mean_isi_ticks, time_to_spike_ticks, - # rate_update_threshold, - # prob_command, rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, number_of_cues) synapse_type = SynapseTypeEPropAdaptive( - # tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) input_type = InputTypeCurrent() diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index ac720ce4d42..a0d7d7ef7d0 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -1,3 +1,17 @@ +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard from spynnaker.pyNN.models.defaults import default_initial_values from spynnaker.pyNN.models.neuron.neuron_models import ( @@ -20,8 +34,6 @@ def __init__( v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - # tau_syn_E=5.0, tau_syn_E2=5.0, tau_syn_I=5.0, tau_syn_I2=5.0, -# mean_isi_ticks=65000, time_to_spike_ticks=65000, rate_update_threshold=0.25, target_data=[], @@ -35,7 +47,6 @@ def __init__( l, w_fb, eta, update_ready) synapse_type = SynapseTypeEPropAdaptive( - # tau_syn_E, tau_syn_E2, tau_syn_I, tau_syn_I2, isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) input_type = InputTypeCurrent() diff --git a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py index 59dcf402f19..e5c6112f5da 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/__init__.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/__init__.py @@ -23,4 +23,4 @@ __all__ = ["NeuronModelIzh", "NeuronModelLeakyIntegrateAndFire", "NeuronModelEPropAdaptive", "NeuronModelLeakyIntegrateAndFireSinusoidReadout", - "NeuronModelLeftRightReadout"] + "NeuronModelLeftRightReadout"] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 209fc9785b8..c0e71de67b3 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -1,19 +1,17 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType from spynnaker.pyNN.models.neuron.implementations import ( @@ -67,26 +65,6 @@ E_BAR = "e_bar" UPDATE_READY = "update_ready" -# UNITS = { -# V: 'mV', -# V_REST: 'mV', -# TAU_M: 'ms', -# CM: 'nF', -# I_OFFSET: 'nA', -# V_RESET: 'mV', -# TAU_REFRAC: 'ms', -# Z: 'N/A', -# A: 'N/A', -# PSI: 'N/A', -# BIG_B: "mV", -# SMALL_B: "mV", -# SMALL_B_0: "mV", -# TAU_A: "ms", -# BETA: "N/A", -# # ADPT: "mV" -# SCALAR: "dimensionless" -# } - class NeuronModelEPropAdaptive(AbstractStandardNeuronComponent): __slots__ = [ @@ -100,27 +78,21 @@ class NeuronModelEPropAdaptive(AbstractStandardNeuronComponent): "__z", "__a", "__psi", - # threshold params "__B", "__small_b", "__small_b_0", "__tau_a", "__beta", - # "_adpt" "__scalar", - # reg params "__target_rate", "__tau_err", - # learning signal "__l", "__w_fb", - # "__eta", "__window_size", "__number_of_cues", - # eprop "global" "__core_pop_rate", "__core_target_rate", @@ -155,7 +127,7 @@ def __init__( # eprop "global" eta ): - # TODO: documentation + # TODO: documentation of parameters struct_neuron_vals = [ # neuron params @@ -168,33 +140,32 @@ def __init__( (DataType.S1615, TAU_REFRAC), (DataType.INT32, REFRACT_TIMER), (DataType.S1615, TIMESTEP), - (DataType.S1615, Z), # Z - (DataType.S1615, A), # A + (DataType.S1615, Z), + (DataType.S1615, A), (DataType.S1615, PSI), # psi, pseuo_derivative (DataType.S1615, BIG_B), (DataType.S1615, SMALL_B), (DataType.S1615, SMALL_B_0), (DataType.UINT32, TAU_A), (DataType.S1615, BETA), - # (DataType.UINT32, ADPT), (DataType.S1615, SCALAR), (DataType.S1615, L), (DataType.S1615, W_FB), (DataType.UINT32, WINDOW_SIZE), (DataType.UINT32, NUMBER_OF_CUES), - (DataType.S1615, CORE_POP_RATE), # core_pop_rate - (DataType.S1615, TARGET_RATE), # core_target_rate - (DataType.S1615, TAU_ERR), # rate_exp_TC - (DataType.S1615, ETA)] # eta (learning rate) + (DataType.S1615, CORE_POP_RATE), # core_pop_rate + (DataType.S1615, TARGET_RATE), # core_target_rate + (DataType.S1615, TAU_ERR), # rate_exp_TC + (DataType.S1615, ETA)] # eta (learning rate) for n in range(SYNAPSES_PER_NEURON): struct_neuron_vals.extend( # eprop syn state - [(DataType.S1615, DELTA_W+str(n)), # delta_w - (DataType.S1615, Z_BAR_OLD+str(n)), # z_bar_old - (DataType.S1615, Z_BAR+str(n)), # z_bar - (DataType.S1615, EP_A+str(n)), # ep_a - (DataType.S1615, E_BAR+str(n)), # e_bar + [(DataType.S1615, DELTA_W+str(n)), # delta_w + (DataType.S1615, Z_BAR_OLD+str(n)), # z_bar_old + (DataType.S1615, Z_BAR+str(n)), # z_bar + (DataType.S1615, EP_A+str(n)), # ep_a + (DataType.S1615, E_BAR+str(n)), # e_bar (DataType.INT32, UPDATE_READY+str(n))]) super().__init__( @@ -213,7 +184,7 @@ def __init__( self.__i_offset = i_offset self.__v_reset = v_reset self.__tau_refrac = tau_refrac - self.__psi = psi # calculate from v and v_thresh (but will probably end up zero) + self.__psi = psi # threshold params self.__B = B @@ -235,16 +206,8 @@ def __init__( self.__number_of_cues = number_of_cues # eprop "global" - # self.__core_pop_rate = target_rate - # self.__core_target_rate = target_rate - # self.__rate_exp_TC = numpy.exp(-float(ts/1000)/self.__tau_err) self.__eta = eta - # @overrides(AbstractStandardNeuronComponent.get_n_cpu_cycles) - # def get_n_cpu_cycles(self, n_neurons): - # # A bit of a guess - # return 100 * n_neurons - @overrides(AbstractStandardNeuronComponent.add_parameters) def add_parameters(self, parameters): parameters[V_REST] = self.__v_rest @@ -265,8 +228,6 @@ def add_parameters(self, parameters): # Are these parameters or variables? parameters[CORE_POP_RATE] = 0.0 # initialise here, not in C - # parameters[CORE_TARGET_RATE] = self.__core_target_rate - # parameters[RATE_EXP_TC] = self.__rate_exp_TC parameters[TARGET_RATE] = self.__target_rate parameters[TAU_ERR] = self.__tau_err parameters[ETA] = self.__eta @@ -292,124 +253,6 @@ def add_state_variables(self, state_variables): state_variables[E_BAR+str(n)] = 0 state_variables[UPDATE_READY+str(n)] = self.__window_size - # @overrides(AbstractNeuronModel.get_units) - # def get_units(self, variable): - # return UNITS[variable] - # - # @overrides(AbstractNeuronModel.has_variable) - # def has_variable(self, variable): - # return variable in UNITS - # - # @inject_items({"ts": "MachineTimeStep"}) - # @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) - # def get_values(self, parameters, state_variables, vertex_slice, ts): - # - # ulfract = pow(2, 32) - # - # # Add the rest of the data - # values = [state_variables[V], - # parameters[V_REST], - # parameters[TAU_M] / parameters[CM], - # parameters[TAU_M].apply_operation( - # operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - # parameters[I_OFFSET], - # state_variables[COUNT_REFRAC], - # parameters[V_RESET], - # parameters[TAU_REFRAC].apply_operation( - # operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), - # state_variables[Z], - # state_variables[A], - # state_variables[PSI], - # - # state_variables[BIG_B], - # state_variables[SMALL_B], - # parameters[SMALL_B_0], - # parameters[TAU_A].apply_operation( - # operation=lambda - # x: numpy.exp(float(-ts) / (1000.0 * x)) * ulfract), - # parameters[BETA], - # parameters[TAU_A].apply_operation( - # operation=lambda x: (1 - numpy.exp( - # float(-ts) / (1000.0 * x))) * ulfract), # ADPT - # parameters[SCALAR], - # - # state_variables[L], - # parameters[W_FB], - # parameters[WINDOW_SIZE], - # parameters[NUMBER_OF_CUES] - # ] - # - # # create synaptic state - init all state to zero - # for n in range(SYNAPSES_PER_NEURON): - # eprop_syn_init = [state_variables[DELTA_W+str(n)], - # state_variables[Z_BAR_OLD+str(n)], - # state_variables[Z_BAR+str(n)], - # state_variables[EP_A+str(n)], - # state_variables[E_BAR+str(n)], - # state_variables[UPDATE_READY+str(n)] - # ] - # # extend to appropriate fan-in - # values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) - # - # return values - # - # @inject_items({"ts": "MachineTimeStep"}) - # @overrides(AbstractNeuronModel.get_global_values, - # additional_arguments={'ts'}) - # def get_global_values(self, ts): - # glob_vals = [ - # self.__target_rate, # initialise global pop rate to the target - # self.__target_rate, # set target rate - # numpy.exp(-float(ts/1000)/self.__tau_err), - # self.__eta # learning rate - # ] - # - # print("\n ") - # print(glob_vals) - # print(ts) - # print("\n") - # return glob_vals - # - # - # @overrides(AbstractNeuronModel.update_values) - # def update_values(self, values, parameters, state_variables): - # - # delta_w = [0] * SYNAPSES_PER_NEURON - # z_bar_old = [0] * SYNAPSES_PER_NEURON - # z_bar = [0] * SYNAPSES_PER_NEURON - # ep_a = [0] * SYNAPSES_PER_NEURON - # e_bar = [0] * SYNAPSES_PER_NEURON - # update_ready = [0] * SYNAPSES_PER_NEURON - # # Read the data - # (v, _v_rest, _r_membrane, _exp_tc, _i_offset, count_refrac, - # _v_reset, _tau_refrac, psi, - # big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar, - # l, __w_fb, window_size, number_of_cues, delta_w, z_bar_old, z_bar, ep_a, e_bar, update_ready) = values - # - # # Not sure this will work with the new array of synapse!!! - # # (Note that this function is only called if you do e.g. run(), set(), - # # run() i.e. it's not used by auto-pause and resume, so this is - # # untested) - # # todo check alignment on this - # - # # Copy the changed data only - # state_variables[V] = v - # state_variables[COUNT_REFRAC] = count_refrac - # state_variables[PSI] = psi - # - # state_variables[BIG_B] = big_b - # state_variables[SMALL_B] = small_b - # - # state_variables[L] = l - # - # for n in range(SYNAPSES_PER_NEURON): - # state_variables[DELTA_W+str(n)] = delta_w[n] - # state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] - # state_variables[Z_BAR+str(n)] = z_bar[n] - # state_variables[EP_A+str(n)] = ep_a[n] - # state_variables[E_BAR+str(n)] = e_bar[n] - # state_variables[UPDATE_READY] = update_ready[n] - @property def v_init(self): return self.__v_init @@ -530,4 +373,4 @@ def number_of_cues(self): def window_size(self, new_value): self.__number_of_cues = new_value - # setter for "globals" like target rate etc. ? + # TODO: setters for "globals" like target rate, eta, etc. diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 642a4fca0f8..0a24a34b088 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -1,4 +1,17 @@ -import numpy +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from spinn_utilities.overrides import overrides from data_specification.enums import DataType from spynnaker.pyNN.models.neuron.implementations import ( @@ -7,10 +20,8 @@ from spynnaker.pyNN.data import SpynnakerDataView # constants -SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state +SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state (?) -# MICROSECONDS_PER_SECOND = 1000000.0 -# MICROSECONDS_PER_MILLISECOND = 1000.0 V = "v" V_REST = "v_rest" TAU_M = "tau_m" @@ -27,8 +38,6 @@ W_FB = "feedback_weight" WINDOW_SIZE = "window_size" -MEAN_ISI_TICKS = "mean_isi_ticks" -TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" SEED1 = "seed1" SEED2 = "seed2" SEED3 = "seed3" @@ -36,8 +45,6 @@ TICKS_PER_SECOND = "ticks_per_second" TIME_SINCE_LAST_SPIKE = "time_since_last_spike" RATE_AT_LAST_SETTING = "rate_at_last_setting" -# RATE_UPDATE_THRESHOLD = "rate_update_threshold" -# PROB_COMMAND = "prob_command" MEAN_L = "mean_l" MEAN_R = "mean_r" RATE_ON = "rate_on" @@ -51,20 +58,8 @@ DELTA_W = "delta_w" Z_BAR_OLD = "z_bar_old" Z_BAR = "z_bar" -# EP_A = "ep_a" -# E_BAR = "e_bar" UPDATE_READY = "update_ready" -# UNITS = { -# V: 'mV', -# V_REST: 'mV', -# TAU_M: 'ms', -# CM: 'nF', -# I_OFFSET: 'nA', -# V_RESET: 'mV', -# TAU_REFRAC: 'ms' -# } - class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): __slots__ = [ @@ -75,12 +70,6 @@ class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): "__i_offset", "__v_reset", "__tau_refrac", - # "_mean_isi_ticks", - # "_time_to_spike_ticks", - # "_time_since_last_spike", - # "_rate_at_last_setting", - # "_rate_update_threshold", - # "_prob_command", "__rate_off", "__rate_on", "__l", @@ -98,52 +87,31 @@ class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): def __init__( self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, - # mean_isi_ticks, time_to_spike_ticks, - # rate_update_threshold, - # prob_command, rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, number_of_cues): - # global_data_types = [ - # DataType.UINT32, # MARS KISS seed - # DataType.UINT32, # MARS KISS seed - # DataType.UINT32, # MARS KISS seed - # DataType.UINT32, # MARS KISS seed - # DataType.S1615, # ticks_per_second - # DataType.S1615, # global mem pot - # DataType.S1615, # global mem pot 2 - # DataType.S1615, # rate on - # DataType.S1615, # rate off - # DataType.S1615, # mean left activation - # DataType.S1615, # mean right activation - # DataType.S1615, # cross entropy - # DataType.UINT32, # poisson key - # DataType.UINT32, # poisson pop size - # DataType.S1615, # eta - # DataType.UINT32, # number of cues - # ] struct_neuron_vals = [ (DataType.S1615, V), # v (DataType.S1615, V_REST), # v_rest - (DataType.S1615, CM), # r_membrane (= tau_m / cm) - (DataType.S1615, TAU_M), # exp_tc (= e^(-ts / tau_m)) - (DataType.S1615, I_OFFSET), # i_offset - (DataType.S1615, V_RESET), # v_reset - (DataType.S1615, TAU_REFRAC), # tau_refrac - (DataType.INT32, REFRACT_TIMER), # count_refrac - (DataType.S1615, TIMESTEP), # timestep + (DataType.S1615, CM), # r_membrane (= tau_m / cm) + (DataType.S1615, TAU_M), # exp_tc (= e^(-ts / tau_m)) + (DataType.S1615, I_OFFSET), # i_offset + (DataType.S1615, V_RESET), # v_reset + (DataType.S1615, TAU_REFRAC), # tau_refrac + (DataType.INT32, REFRACT_TIMER), # count_refrac + (DataType.S1615, TIMESTEP), # timestep # Learning signal - (DataType.S1615, L), # L - (DataType.S1615, W_FB), # w_fb - (DataType.UINT32, WINDOW_SIZE), # window_size + (DataType.S1615, L), # L + (DataType.S1615, W_FB), # w_fb + (DataType.UINT32, WINDOW_SIZE), # window_size # former global parameters (DataType.UINT32, SEED1), (DataType.UINT32, SEED2), (DataType.UINT32, SEED3), - (DataType.UINT32, SEED4), # + (DataType.UINT32, SEED4), (DataType.S1615, TICKS_PER_SECOND), - (DataType.S1615, TIME_SINCE_LAST_SPIKE), # apparently set to 0.0 on first timestep - (DataType.S1615, RATE_AT_LAST_SETTING), # apparently set to 0.0 on first timestep + (DataType.S1615, TIME_SINCE_LAST_SPIKE), + (DataType.S1615, RATE_AT_LAST_SETTING), (DataType.S1615, RATE_ON), (DataType.S1615, RATE_OFF), (DataType.S1615, MEAN_L), @@ -155,7 +123,6 @@ def __init__( (DataType.UINT32, NUMBER_OF_CUES) ] - # Synapse states - always initialise to zero for n in range(SYNAPSES_PER_NEURON): struct_neuron_vals.extend( @@ -179,18 +146,12 @@ def __init__( self.__i_offset = i_offset self.__v_reset = v_reset self.__tau_refrac = tau_refrac - # self._mean_isi_ticks = mean_isi_ticks - # self._time_to_spike_ticks = time_to_spike_ticks - # self._time_since_last_spike = 0 # this should be initialised to zero - we know nothing about before the simulation - # self._rate_at_last_setting = 0 - # self._rate_update_threshold = 2 - # self._prob_command = prob_command self.__rate_off = rate_off self.__rate_on = rate_on self.__mean_l = 0.0 self.__mean_r = 0.0 self.__cross_entropy = 0.0 - self.__poisson_key = 0 # None TODO: work out how to pass this in + self.__poisson_key = 0 # None TODO: work out how to pass this in self.__poisson_pop_size = poisson_pop_size self.__l = l self.__w_fb = w_fb @@ -203,11 +164,6 @@ def __init__( def set_poisson_key(self, p_key): self.__poisson_key = p_key - # @overrides(AbstractNeuronModel.get_n_cpu_cycles) - # def get_n_cpu_cycles(self, n_neurons): - # # A bit of a guess - # return 100 * n_neurons - @overrides(AbstractStandardNeuronComponent.add_parameters) def add_parameters(self, parameters): parameters[V_REST] = self.__v_rest @@ -222,26 +178,23 @@ def add_parameters(self, parameters): parameters[W_FB] = self.__w_fb parameters[WINDOW_SIZE] = self.__window_size # These should probably have defaults earlier than this + # TODO: some confusion as to which values were actually being used? parameters[SEED1] = 1 # 10065 parameters[SEED2] = 2 # 232 parameters[SEED3] = 3 # 3634 parameters[SEED4] = 4 # 4877 - # parameters[PROB_COMMAND] = self._prob_command parameters[RATE_ON] = self.__rate_on parameters[RATE_OFF] = self.__rate_off - parameters[TICKS_PER_SECOND] = 0.0 # set in get_valuers() + parameters[TICKS_PER_SECOND] = 0.0 parameters[TIME_SINCE_LAST_SPIKE] = 0.0 parameters[RATE_AT_LAST_SETTING] = 0.0 parameters[POISSON_POP_SIZE] = self.__poisson_pop_size - # parameters[RATE_UPDATE_THRESHOLD] = self._rate_update_threshold -# parameters[TARGET_DATA] = self._target_data parameters[MEAN_L] = self.__mean_l parameters[MEAN_R] = self.__mean_r parameters[CROSS_ENTROPY] = self.__cross_entropy - parameters[POISSON_KEY] = self.__poisson_key # not sure this is needed here - print("in add_parameters, poisson key is ", self.__poisson_key) + parameters[POISSON_KEY] = self.__poisson_key parameters[POISSON_POP_SIZE] = self.__poisson_pop_size parameters[ETA] = self.__eta parameters[NUMBER_OF_CUES] = self.__number_of_cues @@ -253,117 +206,13 @@ def add_state_variables(self, state_variables): #learning params state_variables[L] = self.__l - # state_variables[MEAN_ISI_TICKS] = self._mean_isi_ticks - # state_variables[TIME_TO_SPIKE_TICKS] = self._time_to_spike_ticks # could eventually be set from membrane potential - # state_variables[TIME_SINCE_LAST_SPIKE] = self._time_since_last_spike - # state_variables[RATE_AT_LAST_SETTING] = self._rate_at_last_setting for n in range(SYNAPSES_PER_NEURON): state_variables[DELTA_W+str(n)] = 0 state_variables[Z_BAR_OLD+str(n)] = 0 state_variables[Z_BAR+str(n)] = 0 - # state_variables[EP_A+str(n)] = 0 - # state_variables[E_BAR+str(n)] = 0 state_variables[UPDATE_READY+str(n)] = self.__window_size - - # @overrides(AbstractNeuronModel.get_units) - # def get_units(self, variable): - # return UNITS[variable] - # - # @overrides(AbstractNeuronModel.has_variable) - # def has_variable(self, variable): - # return variable in UNITS - # - # @inject_items({"ts": "MachineTimeStep"}) - # @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) - # def get_values(self, parameters, state_variables, vertex_slice, ts): - # - # # Add the rest of the data - # values = [state_variables[V], - # parameters[V_REST], - # parameters[TAU_M] / parameters[CM], - # parameters[TAU_M].apply_operation( - # operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - # parameters[I_OFFSET], state_variables[COUNT_REFRAC], - # parameters[V_RESET], - # parameters[TAU_REFRAC].apply_operation( - # operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), - # - # state_variables[L], - # parameters[W_FB], - # parameters[WINDOW_SIZE] - # ] - # - # # create synaptic state - init all state to zero - # eprop_syn_init = [0, # delta w - # 0, # z_bar_inp - # 0,#, # z_bar - # # 0, # el_a - # # 0] # e_bar - # self._window_size, #int(numpy.random.rand()*1024) # update_ready - # ] - # # extend to appropriate fan-in - # values.extend(eprop_syn_init * SYNAPSES_PER_NEURON) - # - # return values - # - # @overrides(AbstractNeuronModel.update_values) - # def update_values(self, values, parameters, state_variables): - # - # # Read the data - # (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, - # _v_reset, _tau_refrac, - # _l, _w_fb, window_size, delta_w, z_bar_old, z_bar, update_ready) = values # Not sure this will work with the new array of synapse!!! - # # todo check alignment on this - # - # # Copy the changed data only - # state_variables[V] = _v - # - # state_variables[L] = _l - # - # for n in range(SYNAPSES_PER_NEURON): - # state_variables[DELTA_W+str(n)] = delta_w[n] - # state_variables[Z_BAR_OLD+str(n)] = z_bar_old[n] - # state_variables[Z_BAR+str(n)] = z_bar[n] - # # state_variables[EP_A+str(n)] = ep_a[n] - # # state_variables[E_BAR+str(n)] = e_bar[n] - # state_variables[UPDATE_READY] = update_ready[n] - # - # # Global params - # @inject_items({"machine_time_step": "MachineTimeStep"}) - # @overrides(AbstractNeuronModel.get_global_values, - # additional_arguments={'machine_time_step'}) - # def get_global_values(self, machine_time_step): - # vals = [ - # 1, # seed 1 - # 2, # seed 2 - # 3, # seed 3 - # 4, # seed 4 - # MICROSECONDS_PER_SECOND / float(machine_time_step), # ticks_per_second - # 0.0, # set to 0, as will be set in first timestep of model anyway - # 0.0, # set to 0, as will be set in first timestep of model anyway - # self._rate_on, - # self._rate_off, - # self._mean_l, - # self._mean_r, - # self._cross_entropy, - # self._poisson_key, - # self._poisson_pop_size, - # self._eta, - # self._number_of_cues - # ] - # - # return vals - - # @property - # def prob_command(self): - # return self.__prob_command - - # @prob_command.setter - # def prob_command(self, prob_command): - # self._prob_command = prob_command - @property def rate_on(self): return self.__rate_on @@ -452,18 +301,4 @@ def window_size(self): def window_size(self, new_value): self.__window_size = new_value - # @property - # def mean_isi_ticks(self): - # return self._mean_isi_ticks - # - # @mean_isi_ticks.setter - # def mean_isi_ticks(self, new_mean_isi_ticks): - # self._mean_isi_ticks = new_mean_isi_ticks - # - # @property - # def time_to_spike_ticks(self): - # return self._time_to_spike_ticks - # - # @mean_isi_ticks.setter - # def time_to_spike_ticks(self, new_time_to_spike_ticks): - # self._time_to_spike_ticks = new_time_to_spike_ticks + # TODO: Check setters for all parameters \ No newline at end of file diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 0db78bff775..43639a1d24f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -1,4 +1,17 @@ -import numpy +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from spinn_utilities.overrides import overrides from data_specification.enums import DataType from spynnaker.pyNN.models.neuron.implementations import ( @@ -9,9 +22,6 @@ # constants SYNAPSES_PER_NEURON = 250 # around 415 with only 3 in syn_state -# MICROSECONDS_PER_SECOND = 1000000.0 -# MICROSECONDS_PER_MILLISECOND = 1000.0 - V = "v" V_REST = "v_rest" TAU_M = "tau_m" @@ -19,19 +29,8 @@ I_OFFSET = "i_offset" V_RESET = "v_reset" TAU_REFRAC = "tau_refrac" -# COUNT_REFRAC = "count_refrac" TIMESTEP = "timestep" REFRACT_TIMER = "refract_timer" -# MEAN_ISI_TICKS = "mean_isi_ticks" -# TIME_TO_SPIKE_TICKS = "time_to_spike_ticks" -# SEED1 = "seed1" -# SEED2 = "seed2" -# SEED3 = "seed3" -# SEED4 = "seed4" -# TICKS_PER_SECOND = "ticks_per_second" -# TIME_SINCE_LAST_SPIKE = "time_since_last_spike" -# RATE_AT_LAST_SETTING = "rate_at_last_setting" -# RATE_UPDATE_THRESHOLD = "rate_update_threshold" TARGET_DATA = "target_data" # Learning signal L = "learning_signal" @@ -44,18 +43,9 @@ Z_BAR = "z_bar" UPDATE_READY = "update_ready" -# UNITS = { -# V: 'mV', -# V_REST: 'mV', -# TAU_M: 'ms', -# CM: 'nF', -# I_OFFSET: 'nA', -# V_RESET: 'mV', -# TAU_REFRAC: 'ms' -# } - -class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractStandardNeuronComponent): +class NeuronModelLeakyIntegrateAndFireSinusoidReadout( + AbstractStandardNeuronComponent): __slots__ = [ "__v_init", "__v_rest", @@ -64,7 +54,6 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractStandardNeuronComp "__i_offset", "__v_reset", "__tau_refrac", - "__target_data", # learning signal @@ -76,21 +65,16 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout(AbstractStandardNeuronComp def __init__( self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, -# mean_isi_ticks, time_to_spike_ticks, rate_update_threshold, - target_data, - l, - w_fb, - eta, - update_ready): + target_data, l, w_fb, eta, update_ready): struct_neuron_vals = [ (DataType.S1615, V), # v - (DataType.S1615, V_REST), # v_rest - (DataType.S1615, CM), # r_membrane (= tau_m / cm) + (DataType.S1615, V_REST), # v_rest + (DataType.S1615, CM), # r_membrane (= tau_m / cm) (DataType.S1615, TAU_M), # exp_tc (= e^(-ts / tau_m)) (DataType.S1615, I_OFFSET), # i_offset (DataType.S1615, V_RESET), # v_reset - (DataType.S1615, TAU_REFRAC), # tau_refrac + (DataType.S1615, TAU_REFRAC), # tau_refrac (DataType.INT32, REFRACT_TIMER), # count_refrac (DataType.S1615, TIMESTEP), # timestep # Learning signal @@ -108,10 +92,10 @@ def __init__( for n in range(SYNAPSES_PER_NEURON): struct_neuron_vals.extend( # eprop_syn_state - [(DataType.S1615, DELTA_W+str(n)), # delta_w - (DataType.S1615, Z_BAR_OLD+str(n)), # z_bar_old - (DataType.S1615, Z_BAR+str(n)), # z_bar - (DataType.UINT32, UPDATE_READY+str(n))]) # update_ready + [(DataType.S1615, DELTA_W+str(n)), # delta_w + (DataType.S1615, Z_BAR_OLD+str(n)), # z_bar_old + (DataType.S1615, Z_BAR+str(n)), # z_bar + (DataType.UINT32, UPDATE_READY+str(n))]) # update_ready super().__init__( [Struct(struct_neuron_vals)], @@ -128,7 +112,6 @@ def __init__( self.__i_offset = i_offset self.__v_reset = v_reset self.__tau_refrac = tau_refrac - self.__target_data = target_data # learning signal @@ -139,11 +122,6 @@ def __init__( self.__update_ready = update_ready - # @overrides(AbstractNeuronModel.get_n_cpu_cycles) - # def get_n_cpu_cycles(self, n_neurons): - # # A bit of a guess - # return 100 * n_neurons - @overrides(AbstractStandardNeuronComponent.add_parameters) def add_parameters(self, parameters): parameters[V_REST] = self.__v_rest @@ -178,74 +156,6 @@ def add_state_variables(self, state_variables): state_variables[Z_BAR+str(n)] = 0 state_variables[UPDATE_READY+str(n)] = self.__update_ready - - # @overrides(AbstractNeuronModel.get_units) - # def get_units(self, variable): - # return UNITS[variable] - # - # @overrides(AbstractNeuronModel.has_variable) - # def has_variable(self, variable): - # return variable in UNITS - # - # @inject_items({"ts": "MachineTimeStep"}) - # @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) - # def get_values(self, parameters, state_variables, vertex_slice, ts): - # - # # Add the rest of the data - # values = [state_variables[V], - # parameters[V_REST], - # parameters[TAU_M] / parameters[CM], - # parameters[TAU_M].apply_operation( - # operation=lambda x: numpy.exp(float(-ts) / (1000.0 * x))), - # parameters[I_OFFSET], state_variables[COUNT_REFRAC], - # parameters[V_RESET], - # parameters[TAU_REFRAC].apply_operation( - # operation=lambda x: int(numpy.ceil(x / (ts / 1000.0)))), - # - # state_variables[L], - # parameters[W_FB] - # ] - # - # # create synaptic state - init all state to zero - # for n in range(SYNAPSES_PER_NEURON): - # eprop_syn_init = [0, # delta w - # 0, # z_bar_inp - # 0,#, # z_bar - # # 0, # el_a - # # 0] # e_bar - # self._update_ready, #int(numpy.random.rand()*1024) # update_ready - # ] - # # extend to appropriate fan-in - # values.extend(eprop_syn_init) # * SYNAPSES_PER_NEURON) - # - # return values - # - # @overrides(AbstractNeuronModel.update_values) - # def update_values(self, values, parameters, state_variables): - # - # # Read the data - # (_v, _v_rest, _r_membrane, _exp_tc, _i_offset, _count_refrac, - # _v_reset, _tau_refrac, - # _l, _w_fb) = values # Not sure this will work with the new array of synapse!!! - # # todo check alignment on this - # - # # Copy the changed data only - # state_variables[V] = _v - # - # state_variables[L] = _l - # - # - # # Global params - # @inject_items({"machine_time_step": "MachineTimeStep"}) - # @overrides(AbstractNeuronModel.get_global_values, - # additional_arguments={'machine_time_step'}) - # def get_global_values(self, machine_time_step): - # vals = [] - # - # vals.extend(self._target_data) - # vals.extend([self._eta]) - # return vals - @property def target_data(self): return self.__target_data @@ -317,3 +227,5 @@ def w_fb(self): @w_fb.setter def w_fb(self, w_fb): self.__w_fb = w_fb + + # TODO: check whether any further parmeters need setters \ No newline at end of file diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py index 2b348f8b4bb..8c57674e571 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_eprop.py @@ -1,22 +1,19 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import logging from spinn_utilities.overrides import overrides -# from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( -# plasticity_helpers) from .abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) @@ -65,8 +62,6 @@ def vertex_executable_suffix(self): @property def pre_trace_n_bytes(self): - # Pair rule requires no pre-synaptic trace when only the nearest - # Neighbours are considered and, a single 16-bit R1 trace return 2 @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes) @@ -87,17 +82,6 @@ def write_parameters( def synaptic_structure(self): return self.__synapse_structure -# @overrides(AbstractTimingDependence.get_provenance_data) -# def get_provenance_data(self, pre_population_label, post_population_label): -# prov_data = list() -# prov_data.append(plasticity_helpers.get_lut_provenance( -# pre_population_label, post_population_label, "SpikePairRule", -# "tau_plus_last_entry", "tau_plus", self.__tau_plus_last_entry)) -# prov_data.append(plasticity_helpers.get_lut_provenance( -# pre_population_label, post_population_label, "SpikePairRule", -# "tau_minus_last_entry", "tau_minus", self.__tau_minus_last_entry)) -# return prov_data - @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return [] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py index 6f066ab193f..dcc63141a63 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py @@ -1,17 +1,16 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from spinn_utilities.overrides import overrides from data_specification.enums import DataType @@ -19,7 +18,6 @@ from .abstract_weight_dependence import AbstractWeightDependence -# TODO: this doesn't have Aplus and Aminus? class WeightDependenceEpropReg( AbstractHasAPlusAMinus, AbstractWeightDependence): __slots__ = [ @@ -102,4 +100,4 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): - return ['w_min', 'w_max'] #, 'A_plus', 'A_minus'] + return ['w_min', 'w_max'] diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 9e63721d716..6162f09f4bc 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from enum import Enum import os import ctypes @@ -419,10 +420,12 @@ def get_n_keys_for_partition(self, partition_id): n_keys = 0 # Seems like overkill, there should be a simpler way to do this partitions = ( - SpynnakerDataView.get_outgoing_edge_partitions_starting_at_vertex( - self._app_vertex)) + SpynnakerDataView.\ + get_outgoing_edge_partitions_starting_at_vertex( + self._app_vertex)) for partition in partitions: - if partition.identifier == constants.LIVE_POISSON_CONTROL_PARTITION_ID: + if partition.identifier == ( + constants.LIVE_POISSON_CONTROL_PARTITION_ID): for edge in partition.edges: n_keys += edge.post_vertex.n_atoms return n_keys * n_colours diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index f4e3b420121..b250f2aa9cf 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -355,7 +355,7 @@ def get_plastic_synaptic_data( fixed_plastic = ( ((dendritic_delays.astype("uint16") & 0xFF) << # master code commented out - # (connections["delay"].astype("uint16") << + # (connections["delay"].astype("uint16") << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype("uint16") << n_neuron_id_bits) | diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index 5e72e6724cf..2635972a3fd 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -1,37 +1,31 @@ - # import numpy +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from spinn_utilities.overrides import overrides -# from pacman.executor.injection_decorator import inject_items from data_specification.enums import DataType from .abstract_synapse_type import AbstractSynapseType from spynnaker.pyNN.utilities.struct import Struct from spynnaker.pyNN.data import SpynnakerDataView -# TAU_SYN_E = 'tau_syn_E' -# TAU_SYN_E2 = 'tau_syn_E2' -# TAU_SYN_I = 'tau_syn_I' -# TAU_SYN_I2 = 'tau_syn_I2' ISYN_EXC = "isyn_exc" ISYN_EXC2 = "isyn_exc2" ISYN_INH = "isyn_inh" ISYN_INH2 = "isyn_inh2" -# TIMESTEP_MS = "timestep_ms" - -# UNITS = { -# TAU_SYN_E: "mV", -# TAU_SYN_E2: "mV", -# TAU_SYN_I: 'mV', -# ISYN_EXC: "", -# ISYN_EXC2: "", -# ISYN_INH: "", -# } class SynapseTypeEPropAdaptive(AbstractSynapseType): __slots__ = [ - # "_tau_syn_E", - # "_tau_syn_E2", - # "_tau_syn_I", - # "_tau_syn_I2", "_isyn_exc", "_isyn_exc2", "_isyn_inh", @@ -42,33 +36,18 @@ def __init__( ): super().__init__( [Struct([ - # (DataType.S1615, TAU_SYN_E), (DataType.S1615, ISYN_EXC), - # (DataType.S1615, TAU_SYN_E2), (DataType.S1615, ISYN_EXC2), - # (DataType.S1615, TAU_SYN_I), (DataType.S1615, ISYN_INH), - # (DataType.S1615, TAU_SYN_I2), (DataType.S1615, ISYN_INH2)])], {ISYN_EXC: "", ISYN_EXC2: "", ISYN_INH: "", ISYN_INH2: ""}) - # {TAU_SYN_E: "mV", TAU_SYN_E2: "mV", TAU_SYN_I: "mV", - # TAU_SYN_I2: "mV", ISYN_EXC: "", ISYN_EXC2: "", - # ISYN_INH: "", ISYN_INH2: ""}) - - # self._tau_syn_E = tau_syn_E - # self._tau_syn_E2 = tau_syn_E2 - # self._tau_syn_I = tau_syn_I - # self._tau_syn_I2 = tau_syn_I2 + self._isyn_exc = isyn_exc self._isyn_exc2 = isyn_exc2 self._isyn_inh = isyn_inh self._isyn_inh2 = isyn_inh2 - # @overrides(AbstractSynapseType.get_n_cpu_cycles) - # def get_n_cpu_cycles(self, n_neurons): - # return 100 * n_neurons - @overrides(AbstractSynapseType.add_parameters) def add_parameters(self, parameters): pass @@ -80,48 +59,6 @@ def add_state_variables(self, state_variables): state_variables[ISYN_INH] = self._isyn_inh state_variables[ISYN_INH2] = self._isyn_inh2 - # @overrides(AbstractSynapseType.get_units) - # def get_units(self, variable): - # return UNITS[variable] - # - # @overrides(AbstractSynapseType.has_variable) - # def has_variable(self, variable): - # return variable in UNITS - - # @inject_items({"ts": "MachineTimeStep"}) - # @overrides(AbstractSynapseType.get_values, additional_arguments={'ts'}) - # def get_values(self, parameters, state_variables, vertex_slice, ts): - # - # tsfloat = float(ts) / 1000.0 - # decay = lambda x: numpy.exp(-tsfloat / x) # noqa E731 - # init = lambda x: (x / tsfloat) * (1.0 - numpy.exp(-tsfloat / x)) # noqa E731 - # - # # Add the rest of the data - # return [parameters[TAU_SYN_E].apply_operation(decay), - # parameters[TAU_SYN_E].apply_operation(init), - # parameters[TAU_SYN_E2].apply_operation(decay), - # parameters[TAU_SYN_E2].apply_operation(init), - # parameters[TAU_SYN_I].apply_operation(decay), - # parameters[TAU_SYN_I].apply_operation(init), - # parameters[TAU_SYN_I2].apply_operation(decay), - # parameters[TAU_SYN_I2].apply_operation(init), - # state_variables[ISYN_EXC], - # state_variables[ISYN_EXC2], - # state_variables[ISYN_INH], - # state_variables[ISYN_INH2]] - # - # @overrides(AbstractSynapseType.update_values) - # def update_values(self, values, parameters, state_variables): - # - # # Read the data - # (_decay_E, _init_E, _decay_E2, _init_E2, _decay_I, _init_I, _decay_I2, _init_I2, - # isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) = values - # - # state_variables[ISYN_EXC] = isyn_exc - # state_variables[ISYN_EXC2] = isyn_exc2 - # state_variables[ISYN_INH] = isyn_inh - # state_variables[ISYN_INH2] = isyn_inh2 - @overrides(AbstractSynapseType.get_n_synapse_types) def get_n_synapse_types(self): return 4 @@ -140,39 +77,8 @@ def get_synapse_id_by_target(self, target): @overrides(AbstractSynapseType.get_synapse_targets) def get_synapse_targets(self): - return "input_connections", "recurrent_connections", "learning_signal", "unused" - - # @property - # def tau_syn_E(self): - # return self._tau_syn_E - # - # @tau_syn_E.setter - # def tau_syn_E(self, tau_syn_E): - # self._tau_syn_E = tau_syn_E - # - # @property - # def tau_syn_E2(self): - # return self._tau_syn_E2 - # - # @tau_syn_E2.setter - # def tau_syn_E2(self, tau_syn_E2): - # self._tau_syn_E2 = tau_syn_E2 - # - # @property - # def tau_syn_I(self): - # return self._tau_syn_I - # - # @tau_syn_I.setter - # def tau_syn_I(self, tau_syn_I): - # self._tau_syn_I = tau_syn_I - # - # @property - # def tau_syn_I2(self): - # return self._tau_syn_I2 - # - # @tau_syn_I2.setter - # def tau_syn_I2(self, tau_syn_I2): - # self._tau_syn_I2 = tau_syn_I2 + return ["input_connections", "recurrent_connections", + "learning_signal", "unused"] @property def isyn_exc(self): diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index a3912b28191..5f9149cce78 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -1,25 +1,23 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester +# Copyright (c) 2017 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from spinn_utilities.overrides import overrides from data_specification.enums import DataType from .abstract_threshold_type import AbstractThresholdType from spynnaker.pyNN.utilities.struct import Struct -# import numpy - BIG_B = "big_b" SMALL_B = "small_b" SMALL_B_0 = "small_b_0" @@ -30,7 +28,7 @@ class ThresholdTypeAdaptive(AbstractThresholdType): - """ A threshold that is a static value + """ A threshold that is adaptive """ __slots__ = [ "__B", @@ -38,7 +36,6 @@ class ThresholdTypeAdaptive(AbstractThresholdType): "__small_b_0", "__tau_a", "__beta", -# "_adpt" "__scalar" ] @@ -52,8 +49,8 @@ def __init__(self, B, small_b, small_b_0, tau_a, beta): (DataType.S1615, BETA), (DataType.UINT32, SCALAR), (DataType.S1615, TIMESTEP_MS)])], - {BIG_B: "mV", SMALL_B: "mV", SMALL_B_0: "mV", TAU_A: "ms", BETA: "", - SCALAR: ""}) + {BIG_B: "mV", SMALL_B: "mV", SMALL_B_0: "mV", TAU_A: "ms", + BETA: "", SCALAR: ""}) self._B = B self._small_b = small_b self._small_b_0 = small_b_0 @@ -61,11 +58,6 @@ def __init__(self, B, small_b, small_b_0, tau_a, beta): self._beta = beta self._scalar = 1000 - # @overrides(AbstractThresholdType.get_n_cpu_cycles) - # def get_n_cpu_cycles(self, n_neurons): - # # Just a comparison, but 2 just in case! - # return 2 * n_neurons - @overrides(AbstractThresholdType.add_parameters) def add_parameters(self, parameters): parameters[SMALL_B_0] = self._small_b_0 @@ -78,44 +70,6 @@ def add_state_variables(self, state_variables): state_variables[BIG_B] = self._B state_variables[SMALL_B] = self._small_b - # @overrides(AbstractThresholdType.get_units) - # def get_units(self, variable): - # return UNITS[variable] - # - # @overrides(AbstractThresholdType.has_variable) - # def has_variable(self, variable): - # return variable in UNITS - # - # @inject_items({"ts": "MachineTimeStep"}) - # @overrides(AbstractThresholdType.get_values, additional_arguments={'ts'}) - # def get_values(self, parameters, state_variables, vertex_slice, ts): - # - # ulfract = pow(2, 32) - # - # # Add the rest of the data - # return [ - # state_variables[BIG_B], - # state_variables[SMALL_B], - # parameters[SMALL_B_0], - # parameters[TAU_A].apply_operation( - # operation=lambda - # x: numpy.exp(float(-ts) / (1000.0 * x)) * ulfract), - # parameters[BETA], - # parameters[TAU_A].apply_operation( - # operation=lambda x: (1 - numpy.exp( - # float(-ts) / (1000.0 * x))) * ulfract), # ADPT - # parameters[SCALAR] - # ] - # - # @overrides(AbstractThresholdType.update_values) - # def update_values(self, values, parameters, state_variables): - # - # # Read the data - # (big_b, small_b, _small_b_0, _e_to_dt_on_tau_a, _beta, adpt, scalar) = values - # - # state_variables[BIG_B] = big_b - # state_variables[SMALL_B] = small_b - @property def B(self): return self._B diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py index 1a2bf235e64..668eb7f1c62 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_none.py @@ -1,20 +1,18 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from spinn_utilities.overrides import overrides -# from data_specification.enums import DataType from .abstract_threshold_type import AbstractThresholdType from spynnaker.pyNN.utilities.struct import Struct @@ -26,13 +24,8 @@ class ThresholdTypeNone(AbstractThresholdType): def __init__(self): super().__init__( - [Struct([])], # no params - {}) # no units - - # @overrides(AbstractThresholdType.get_n_cpu_cycles) - # def get_n_cpu_cycles(self, n_neurons): - # # Just a comparison, but 2 just in case! - # return 2 * n_neurons + [Struct([])], # no params + {}) # no units @overrides(AbstractThresholdType.add_parameters) def add_parameters(self, parameters): From 7e18daf52d0334014d8a29c3eabbc34a54dc7ff1 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 11:16:05 +0100 Subject: [PATCH 108/123] Flake8 / rename ambiguous parameter --- .../connectors/abstract_connector.py | 4 +-- .../neuron/abstract_population_vertex.py | 2 +- .../models/neuron/builds/eprop_adaptive.py | 23 +++++------- .../neuron/builds/left_right_readout.py | 14 ++++---- .../models/neuron/builds/sinusoid_readout.py | 11 +++--- .../neuron_model_eprop_adaptive.py | 35 +++++-------------- .../neuron_model_left_right_readout.py | 16 ++++----- .../neuron_model_sinusoid_readout.py | 6 ++-- .../neuron/population_machine_vertex.py | 2 +- .../models/neuron/synapse_types/__init__.py | 1 - .../synapse_type_eprop_adaptive.py | 3 +- .../threshold_type_adaptive.py | 3 +- 12 files changed, 44 insertions(+), 76 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index a3bba01fbf6..846f1d80443 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -465,8 +465,8 @@ def _generate_weights( if self.__safe: if not weights.size: warn_once(logger, "No connection in " + str(self)) - # TODO: I think this is allowed for local_only so investigate what's - # going on there + # TODO: I think this is allowed for local_only so investigate + # what's going on there # elif numpy.amin(weights) < 0 < numpy.amax(weights): # raise SpynnakerException( # "Weights must be either all positive or all negative in " diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index d71814990d6..462c51872aa 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1054,7 +1054,7 @@ def get_ring_buffer_shifts(self): # TODO: check this EPROP (from synaptic_manager) # fix weight shift so we can scale eligibility trace calculations # accordingly. - max_weight_powers = (2 #if w >= 1 else w + max_weight_powers = (2 # if w >= 1 else w for w in max_weight_powers) # Not entirely sure why but eprop also had signed weights stuff... diff --git a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py index a4c9179cbd4..e6fe33acaa8 100644 --- a/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/builds/eprop_adaptive.py @@ -27,11 +27,9 @@ class EPropAdaptive(AbstractPyNNNeuronModelStandard): """ @default_initial_values({"v", "isyn_exc", "isyn_exc2", - "isyn_inh", "isyn_inh2", - "psi", "target_rate", "tau_err", - "B", "small_b", - "l", "w_fb", "window_size", "number_of_cues", - "eta"}) + "isyn_inh", "isyn_inh2", "psi", "target_rate", + "tau_err", "B", "small_b", "learning_signal", + "w_fb", "window_size", "number_of_cues", "eta"}) def __init__( self, # neuron model params @@ -49,28 +47,23 @@ def __init__( B=10.0, small_b=0.0, small_b_0=10.0, tau_a=500.0, beta=1.8, # Learning signal and weight update constants - l=0.0, w_fb=0.5, window_size=13000, number_of_cues=0, + learning_signal=0.0, w_fb=0.5, window_size=13000, + number_of_cues=0, # eprop "global" eta=1.0 - ): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelEPropAdaptive( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi, # threshold params - B, - small_b, - small_b_0, - tau_a, - beta, + B, small_b, small_b_0, tau_a, beta, # Regularisation params target_rate, tau_err, # Learning signal params - l, w_fb, window_size, number_of_cues, + learning_signal, w_fb, window_size, number_of_cues, # eprop global - eta - ) + eta) synapse_type = SynapseTypeEPropAdaptive( isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) diff --git a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py index 6950ef5ef20..418c0f63739 100644 --- a/spynnaker/pyNN/models/neuron/builds/left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/left_right_readout.py @@ -27,25 +27,23 @@ class LeftRightReadout(AbstractPyNNNeuronModelStandard): """ @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2", - "l", "w_fb", "eta", "number_of_cues"}) + "isyn_inh2", "learning_signal", "w_fb", "eta", + "number_of_cues"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, - isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - rate_on=40, rate_off=0, poisson_pop_size=10, - # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0, window_size=13000, number_of_cues=1): + learning_signal=0, w_fb=0.5, eta=1.0, window_size=13000, + number_of_cues=1): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeftRightReadout( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, # Learning signal params - rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, - number_of_cues) + rate_on, rate_off, poisson_pop_size, learning_signal, w_fb, eta, + window_size, number_of_cues) synapse_type = SynapseTypeEPropAdaptive( isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index a0d7d7ef7d0..6e9bd367a97 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -27,24 +27,21 @@ class SinusoidReadout(AbstractPyNNNeuronModelStandard): """ @default_initial_values({"v", "isyn_exc", "isyn_exc2", "isyn_inh", - "isyn_inh2", "target_data", - "l", "w_fb", "eta", "update_ready"}) + "isyn_inh2", "target_data", "learning_signal", + "w_fb", "eta", "update_ready"}) def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, - isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - target_data=[], - # Learning signal and weight update constants - l=0, w_fb=0.5, eta=1.0, update_ready=1024): + learning_signal=0, w_fb=0.5, eta=1.0, update_ready=1024): # pylint: disable=too-many-arguments, too-many-locals neuron_model = NeuronModelLeakyIntegrateAndFireSinusoidReadout( v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, target_data, # Learning signal params - l, w_fb, eta, update_ready) + learning_signal, w_fb, eta, update_ready) synapse_type = SynapseTypeEPropAdaptive( isyn_exc, isyn_exc2, isyn_inh, isyn_inh2) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index c0e71de67b3..f77fb56f217 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -29,7 +29,6 @@ I_OFFSET = "i_offset" V_RESET = "v_reset" TAU_REFRAC = "tau_refrac" -# COUNT_REFRAC = "count_refrac" TIMESTEP = "timestep" REFRACT_TIMER = "refract_timer" @@ -37,14 +36,15 @@ PSI = "psi" Z = "z" A = "a" + # Threshold BIG_B = "big_b" SMALL_B = "small_b" SMALL_B_0 = "small_b_0" TAU_A = "tau_a" BETA = "beta" -# ADPT = "adpt" SCALAR = "scalar" + # Learning signal L = "learning_signal" W_FB = "feedback_weight" @@ -89,7 +89,7 @@ class NeuronModelEPropAdaptive(AbstractStandardNeuronComponent): "__target_rate", "__tau_err", # learning signal - "__l", + "__learning_signal", "__w_fb", "__window_size", "__number_of_cues", @@ -101,28 +101,12 @@ class NeuronModelEPropAdaptive(AbstractStandardNeuronComponent): ] def __init__( - self, - v_init, - v_rest, - tau_m, - cm, - i_offset, - v_reset, - tau_refrac, + self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, psi, # threshold params - B, - small_b, - small_b_0, - tau_a, - beta, + B, small_b, small_b_0, tau_a, beta, # regularisation params - target_rate, - tau_err, - l, - w_fb, - # eta, - window_size, + target_rate, tau_err, learning_signal, w_fb, window_size, number_of_cues, # eprop "global" eta @@ -142,7 +126,7 @@ def __init__( (DataType.S1615, TIMESTEP), (DataType.S1615, Z), (DataType.S1615, A), - (DataType.S1615, PSI), # psi, pseuo_derivative + (DataType.S1615, PSI), # psi, pseuo_derivative (DataType.S1615, BIG_B), (DataType.S1615, SMALL_B), (DataType.S1615, SMALL_B_0), @@ -199,9 +183,8 @@ def __init__( self.__tau_err = tau_err # learning signal - self.__l = l + self.__learning_signal = learning_signal self.__w_fb = w_fb - # self.__eta = eta self.__window_size = window_size self.__number_of_cues = number_of_cues @@ -243,7 +226,7 @@ def add_state_variables(self, state_variables): state_variables[BIG_B] = self.__B state_variables[SMALL_B] = self.__small_b - state_variables[L] = self.__l + state_variables[L] = self.__learning_signal for n in range(SYNAPSES_PER_NEURON): state_variables[DELTA_W+str(n)] = 0 diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 0a24a34b088..a9e2dd76ea3 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -72,7 +72,7 @@ class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): "__tau_refrac", "__rate_off", "__rate_on", - "__l", + "__learning_signal", "__w_fb", "__window_size", "__eta", @@ -87,8 +87,8 @@ class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): def __init__( self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, - rate_on, rate_off, poisson_pop_size, l, w_fb, eta, window_size, - number_of_cues): + rate_on, rate_off, poisson_pop_size, learning_signal, w_fb, eta, + window_size, number_of_cues): struct_neuron_vals = [ (DataType.S1615, V), # v @@ -101,7 +101,7 @@ def __init__( (DataType.INT32, REFRACT_TIMER), # count_refrac (DataType.S1615, TIMESTEP), # timestep # Learning signal - (DataType.S1615, L), # L + (DataType.S1615, L), # Learning_signal (DataType.S1615, W_FB), # w_fb (DataType.UINT32, WINDOW_SIZE), # window_size # former global parameters @@ -153,7 +153,7 @@ def __init__( self.__cross_entropy = 0.0 self.__poisson_key = 0 # None TODO: work out how to pass this in self.__poisson_pop_size = poisson_pop_size - self.__l = l + self.__learning_signal = learning_signal self.__w_fb = w_fb self.__eta = eta self.__window_size = window_size @@ -174,7 +174,7 @@ def add_parameters(self, parameters): parameters[TAU_REFRAC] = self.__tau_refrac parameters[TIMESTEP] = SpynnakerDataView.get_simulation_time_step_ms() - parameters[L] = self.__l + parameters[L] = self.__learning_signal parameters[W_FB] = self.__w_fb parameters[WINDOW_SIZE] = self.__window_size # These should probably have defaults earlier than this @@ -204,8 +204,8 @@ def add_state_variables(self, state_variables): state_variables[V] = self.__v_init state_variables[REFRACT_TIMER] = 0 - #learning params - state_variables[L] = self.__l + # learning params + state_variables[L] = self.__learning_signal for n in range(SYNAPSES_PER_NEURON): state_variables[DELTA_W+str(n)] = 0 diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index 43639a1d24f..ff291982b0f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -57,7 +57,7 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout( "__target_data", # learning signal - "__l", + "__learning_signal", "__w_fb", "__eta", "__update_ready" @@ -115,7 +115,7 @@ def __init__( self.__target_data = target_data # learning signal - self.__l = l + self.__learning_signal = learning_signal self.__w_fb = w_fb self.__eta = eta @@ -148,7 +148,7 @@ def add_state_variables(self, state_variables): state_variables[REFRACT_TIMER] = 0 # learning params - state_variables[L] = self.__l + state_variables[L] = self.__learning_signal for n in range(SYNAPSES_PER_NEURON): state_variables[DELTA_W+str(n)] = 0 diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 6162f09f4bc..f243f0a8f9c 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -420,7 +420,7 @@ def get_n_keys_for_partition(self, partition_id): n_keys = 0 # Seems like overkill, there should be a simpler way to do this partitions = ( - SpynnakerDataView.\ + SpynnakerDataView. get_outgoing_edge_partitions_starting_at_vertex( self._app_vertex)) for partition in partitions: diff --git a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py index 5369eda07b7..45e59343dd8 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py @@ -17,7 +17,6 @@ from .synapse_type_exponential import SynapseTypeExponential from .synapse_type_delta import SynapseTypeDelta from .synapse_type_alpha import SynapseTypeAlpha -from .synapse_type_eprop_adaptive import SynapseTypeEPropAdaptive from .synapse_type_semd import SynapseTypeSEMD from .synapse_type_eprop_adaptive import SynapseTypeEPropAdaptive diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index 2635972a3fd..e47a4359adb 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -16,7 +16,6 @@ from data_specification.enums import DataType from .abstract_synapse_type import AbstractSynapseType from spynnaker.pyNN.utilities.struct import Struct -from spynnaker.pyNN.data import SpynnakerDataView ISYN_EXC = "isyn_exc" ISYN_EXC2 = "isyn_exc2" @@ -110,4 +109,4 @@ def isyn_exc2(self): @isyn_exc2.setter def isyn_exc2(self, isyn_exc2): - self._isyn_exc2 = isyn_exc2 \ No newline at end of file + self._isyn_exc2 = isyn_exc2 diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index 5f9149cce78..675df6f01f1 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -47,8 +47,7 @@ def __init__(self, B, small_b, small_b_0, tau_a, beta): (DataType.S1615, SMALL_B_0), (DataType.UINT32, TAU_A), (DataType.S1615, BETA), - (DataType.UINT32, SCALAR), - (DataType.S1615, TIMESTEP_MS)])], + (DataType.UINT32, SCALAR)])], {BIG_B: "mV", SMALL_B: "mV", SMALL_B_0: "mV", TAU_A: "ms", BETA: "", SCALAR: ""}) self._B = B From 3910aff7cab4c6edd646cc9ef70bb5e636f56b40 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 11:25:34 +0100 Subject: [PATCH 109/123] more flake8 --- .../neuron/neuron_models/neuron_model_left_right_readout.py | 2 +- .../neuron/neuron_models/neuron_model_sinusoid_readout.py | 5 ++--- spynnaker/pyNN/models/neuron/synapse_types/__init__.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index a9e2dd76ea3..a0cf680cb76 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -301,4 +301,4 @@ def window_size(self): def window_size(self, new_value): self.__window_size = new_value - # TODO: Check setters for all parameters \ No newline at end of file + # TODO: Check setters for all parameters diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index ff291982b0f..f948d63f7fc 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -65,7 +65,7 @@ class NeuronModelLeakyIntegrateAndFireSinusoidReadout( def __init__( self, v_init, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac, - target_data, l, w_fb, eta, update_ready): + target_data, learning_signal, w_fb, eta, update_ready): struct_neuron_vals = [ (DataType.S1615, V), # v @@ -141,7 +141,6 @@ def add_parameters(self, parameters): parameters[ETA] = self.__eta - @overrides(AbstractStandardNeuronComponent.add_state_variables) def add_state_variables(self, state_variables): state_variables[V] = self.__v_init @@ -228,4 +227,4 @@ def w_fb(self): def w_fb(self, w_fb): self.__w_fb = w_fb - # TODO: check whether any further parmeters need setters \ No newline at end of file + # TODO: check whether any further parmeters need setters diff --git a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py index 45e59343dd8..29d2d0230cc 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/__init__.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/__init__.py @@ -22,4 +22,4 @@ __all__ = ["AbstractSynapseType", "SynapseTypeDualExponential", "SynapseTypeExponential", "SynapseTypeDelta", "SynapseTypeAlpha", - "SynapseTypeSEMD", "SynapseTypeEpropAdaptive"] + "SynapseTypeSEMD", "SynapseTypeEPropAdaptive"] From 55e56a5c7d45a545c17f6d717707abc5daff477e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 11:36:06 +0100 Subject: [PATCH 110/123] pylint: match to slots properly --- .../threshold_type_adaptive.py | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index 675df6f01f1..ef7fdf15c0b 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -50,32 +50,32 @@ def __init__(self, B, small_b, small_b_0, tau_a, beta): (DataType.UINT32, SCALAR)])], {BIG_B: "mV", SMALL_B: "mV", SMALL_B_0: "mV", TAU_A: "ms", BETA: "", SCALAR: ""}) - self._B = B - self._small_b = small_b - self._small_b_0 = small_b_0 - self._tau_a = tau_a - self._beta = beta - self._scalar = 1000 + self.__B = B + self.__small_b = small_b + self.__small_b_0 = small_b_0 + self.__tau_a = tau_a + self.__beta = beta + self.__scalar = 1000 @overrides(AbstractThresholdType.add_parameters) def add_parameters(self, parameters): - parameters[SMALL_B_0] = self._small_b_0 - parameters[TAU_A] = self._tau_a - parameters[BETA] = self._beta - parameters[SCALAR] = self._scalar + parameters[SMALL_B_0] = self.__small_b_0 + parameters[TAU_A] = self.__tau_a + parameters[BETA] = self.__beta + parameters[SCALAR] = self.__scalar @overrides(AbstractThresholdType.add_state_variables) def add_state_variables(self, state_variables): - state_variables[BIG_B] = self._B - state_variables[SMALL_B] = self._small_b + state_variables[BIG_B] = self.__B + state_variables[SMALL_B] = self.__small_b @property def B(self): - return self._B + return self.__B @B.setter def B(self, new_value): - self._B = new_value + self.__B = new_value @property def small_b(self): @@ -83,28 +83,28 @@ def small_b(self): @small_b.setter def small_b(self, new_value): - self._small_b = new_value + self.__small_b = new_value @property def small_b_0(self): - return self._small_b_0 + return self.__small_b_0 @small_b_0.setter def small_b_0(self, new_value): - self._small_b_0 = new_value + self.__small_b_0 = new_value @property def tau_a(self): - return self._tau_a + return self.__tau_a @tau_a.setter def tau_a(self, new_value): - self._tau_a = new_value + self.__tau_a = new_value @property def beta(self): - return self._beta + return self.__beta @beta.setter def beta(self, new_value): - self._beta = new_value + self.__beta = new_value From 699148f1932060457d6c3e24134ae23f6e9b7fa2 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 11:41:30 +0100 Subject: [PATCH 111/123] missed one --- .../models/neuron/threshold_types/threshold_type_adaptive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index ef7fdf15c0b..acb9c1d39a4 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -79,7 +79,7 @@ def B(self, new_value): @property def small_b(self): - return self._small_b + return self.__small_b @small_b.setter def small_b(self, new_value): From f8bd3a3bbad691c76df3868794fd10a3d8060735 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 11:54:40 +0100 Subject: [PATCH 112/123] pylint unused variables, access --- spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py | 2 +- .../neuron/neuron_models/neuron_model_left_right_readout.py | 3 --- .../stdp/weight_dependence/weight_dependence_eprop_reg.py | 2 +- spynnaker/pyNN/models/neuron/population_machine_vertex.py | 2 ++ 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index 6e9bd367a97..08efbb18ffe 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -33,7 +33,7 @@ def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - target_data=[], + target_data=[0], # Learning signal and weight update constants learning_signal=0, w_fb=0.5, eta=1.0, update_ready=1024): diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index a0cf680cb76..f6b422b82ed 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -81,7 +81,6 @@ class NeuronModelLeftRightReadout(AbstractStandardNeuronComponent): "__cross_entropy", "__poisson_key", "__poisson_pop_size", - "__n_keys_in_target", "__number_of_cues" ] @@ -159,8 +158,6 @@ def __init__( self.__window_size = window_size self.__number_of_cues = number_of_cues - self.__n_keys_in_target = poisson_pop_size * 4 - def set_poisson_key(self, p_key): self.__poisson_key = p_key diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py index dcc63141a63..4d4194bd8f7 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py @@ -77,7 +77,7 @@ def write_parameters( "Eprop_reg weight dependence only supports single terms") # Loop through each synapse type's weight scale - for w in synapse_weight_scales: + for _ in synapse_weight_scales: spec.write_value( data=self.__w_min * global_weight_scale, data_type=DataType.S1615) diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index f243f0a8f9c..16e2fea5ccb 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -303,10 +303,12 @@ def generate_data_specification(self, spec, placement): # Set the poisson key for eprop left-right routing_info = SpynnakerDataView.get_routing_infos() + # pylint: disable=protected-access if isinstance(self._app_vertex._pynn_model._model.neuron_model, NeuronModelLeftRightReadout): poisson_key = routing_info.get_first_key_from_pre_vertex( placement.vertex, constants.LIVE_POISSON_CONTROL_PARTITION_ID) + # pylint: disable=protected-access self._app_vertex._pynn_model._model.neuron_model.set_poisson_key( poisson_key) From 40e235a275b7af8ac0ab96f63481e756538a6753 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 12:08:45 +0100 Subject: [PATCH 113/123] Update licenses and rename file to match other naming --- .../makefiles/neuron/eprop_adaptive/Makefile | 23 ++++++++--------- .../Makefile | 23 ++++++++--------- .../Makefile | 21 ++++++++++------ .../neuron/sinusoid_readout/Makefile | 25 +++++++++++-------- .../Makefile | 21 ++++++++++------ .../neuron/store_recall_readout/Makefile | 13 ---------- .../neuron_impl_eprop_adaptive.h | 24 ++++++++---------- .../neuron_impl_left_right_readout.h | 18 ++++++++++++- .../neuron_impl_sinusoid_readout.h | 18 ++++++++++++- .../models/neuron_model_eprop_adaptive_impl.h | 19 +++++++------- .../neuron_model_left_right_readout_impl.h | 16 ++++++++++++ .../neuron_model_sinusoid_readout_impl.h | 16 ++++++++++++ .../synapse_dynamics_eprop_adaptive_impl.c | 21 ++++++++-------- ...synapse_dynamics_left_right_readout_impl.c | 21 ++++++++-------- .../synapse_dynamics_sinusoid_readout_impl.c | 21 ++++++++-------- .../timing_dependence/timing_eprop_impl.c | 21 ++++++++-------- .../timing_dependence/timing_eprop_impl.h | 21 ++++++++-------- .../weight_dependence/weight_eprop_reg_impl.c | 21 ++++++++-------- .../weight_dependence/weight_eprop_reg_impl.h | 21 ++++++++-------- ....h => synapse_types_eprop_adaptive_impl.h} | 24 +++++++++++------- .../threshold_types/threshold_type_adaptive.h | 16 ++++++++++++ 21 files changed, 251 insertions(+), 173 deletions(-) delete mode 100644 neural_modelling/makefiles/neuron/store_recall_readout/Makefile rename neural_modelling/src/neuron/synapse_types/{synapse_type_eprop_adaptive.h => synapse_types_eprop_adaptive_impl.h} (90%) diff --git a/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile b/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile index 0e5a2b16908..6b0485e786a 100644 --- a/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile +++ b/neural_modelling/makefiles/neuron/eprop_adaptive/Makefile @@ -1,21 +1,20 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. APP = $(notdir $(CURDIR)) NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_eprop_adaptive.h SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c -include ../neural_build.mk \ No newline at end of file +include ../neural_build.mk diff --git a/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile index acb735ba52d..df5d8d7e0f7 100644 --- a/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile +++ b/neural_modelling/makefiles/neuron/eprop_adaptive_stdp_mad_eprop_reg/Makefile @@ -1,17 +1,16 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2019 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. APP = $(notdir $(CURDIR)) @@ -22,4 +21,4 @@ TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/tim WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h -include ../neural_build.mk \ No newline at end of file +include ../neural_build.mk diff --git a/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile index 5d69049c0aa..e4e26a7e930 100644 --- a/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile +++ b/neural_modelling/makefiles/neuron/left_right_readout_stdp_mad_eprop_reg/Makefile @@ -1,17 +1,24 @@ +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + APP = $(notdir $(CURDIR)) -#OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_left_right_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_left_right_readout.h - -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c - TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h include ../neural_build.mk - - diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile index fbd1c7cb43d..f001d2cdd86 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout/Makefile @@ -1,17 +1,20 @@ +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + APP = $(notdir $(CURDIR)) -#OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h - SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c - -#TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c -#TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h -#WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c -#WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h include ../neural_build.mk - - diff --git a/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile b/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile index 8841fa7ca31..c17c28711be 100644 --- a/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile +++ b/neural_modelling/makefiles/neuron/sinusoid_readout_stdp_mad_eprop_reg/Makefile @@ -1,17 +1,24 @@ +# Copyright (c) 2019 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + APP = $(notdir $(CURDIR)) -#OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_sinusoid_readout_impl.c NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_sinusoid_readout.h - -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c -#SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c - TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h include ../neural_build.mk - - diff --git a/neural_modelling/makefiles/neuron/store_recall_readout/Makefile b/neural_modelling/makefiles/neuron/store_recall_readout/Makefile deleted file mode 100644 index 9863615e594..00000000000 --- a/neural_modelling/makefiles/neuron/store_recall_readout/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -APP = $(notdir $(CURDIR)) - -OTHER_SOURCES = $(NEURON_DIR)/neuron/models/neuron_model_store_recall_readout_impl.c -NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_store_recall_readout.h -SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c - -#TIMING_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.c -#TIMING_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/timing_dependence/timing_erbp_impl.h -#WEIGHT_DEPENDENCE = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.c -#WEIGHT_DEPENDENCE_H = $(NEURON_DIR)/neuron/plasticity/stdp/weight_dependence/weight_erbp_impl.h - - -include ../neural_build.mk \ No newline at end of file diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index e9f9cfe07bb..00a91e000bb 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #ifndef _NEURON_IMPL_EPROP_ADAPTIVE_H_ @@ -21,16 +20,15 @@ #include "neuron_impl.h" // Includes for model parts used in this implementation -#include #include #include #include #include +#include #include // Further includes -//#include #include #include diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index 4198bf7f972..a783b85d2dc 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -1,14 +1,30 @@ +/* + * Copyright (c) 2019 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef _NEURON_IMPL_LEFT_RIGHT_READOUT_H_ #define _NEURON_IMPL_LEFT_RIGHT_READOUT_H_ #include "neuron_impl.h" // Includes for model parts used in this implementation -#include #include #include #include #include +#include #include diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 6d20952d245..49e9be3a7e7 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -1,14 +1,30 @@ +/* + * Copyright (c) 2019 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef _NEURON_IMPL_SINUSOID_READOUT_H_ #define _NEURON_IMPL_SINUSOID_READOUT_H_ #include "neuron_impl.h" // Includes for model parts used in this implementation -#include #include #include #include #include +#include #include diff --git a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h index fd6aa8e2ce1..7a9f8d34a7c 100644 --- a/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_eprop_adaptive_impl.h @@ -1,18 +1,17 @@ /* * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #ifndef _NEURON_MODEL_EPROP_ADAPTIVE_IMPL_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h index f6f35970976..3409059d99d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_left_right_readout_impl.h @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2019 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef _NEURON_MODEL_LIF_CURR_LEFT_RIGHT_READOUT_IMPL_H_ #define _NEURON_MODEL_LIF_CURR_LEFT_RIGHT_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h index 0bc9506b753..9f06dd49b4d 100644 --- a/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_sinusoid_readout_impl.h @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2019 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ #define _NEURON_MODEL_SINUSOID_READOUT_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c index 1711746e27f..75c598cea31 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_eprop_adaptive_impl.c @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // Spinn_common includes diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c index daa1c4bdbf7..91447305117 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_left_right_readout_impl.c @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // Spinn_common includes diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c index a243a4fd344..ff1edd97eb9 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_sinusoid_readout_impl.c @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ // Spinn_common includes diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c index cfe30b615f5..2fa2ff7abe1 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.c @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #include "timing_eprop_impl.h" diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h index aa63cab92be..08866b5032d 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_eprop_impl.h @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #ifndef _TIMING_EPROP_IMPL_H_ diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c index a74165befd5..38c13aa4cf4 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.c @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #include "weight_eprop_reg_impl.h" diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h index 6ed3a9456ea..0a274926b75 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_eprop_reg_impl.h @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2019 The University of Manchester + * Copyright (c) 2019 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #ifndef _WEIGHT_EPROPREG_ONE_TERM_IMPL_H_ diff --git a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h b/neural_modelling/src/neuron/synapse_types/synapse_types_eprop_adaptive_impl.h similarity index 90% rename from neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h rename to neural_modelling/src/neuron/synapse_types/synapse_types_eprop_adaptive_impl.h index 9429d0b8ecb..0588f045116 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_type_eprop_adaptive.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_eprop_adaptive_impl.h @@ -1,12 +1,18 @@ -/*! \file -* -* \brief implementation of synapse_types.h for a simple duel exponential decay -* to synapses. -* -* \details If we have combined excitatory_one/excitatory_two/inhibitory -* synapses it will be because both excitatory and inhibitory synaptic -* time-constants (and thus propogators) are identical. -*/ +/* + * Copyright (c) 2019 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef _SYNAPSE_TYPES_EPROP_ADPATIVE_IMPL_H_ #define _SYNAPSE_TYPES_EPROP_ADAPTIVE_IMPL_H_ diff --git a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h index 90fd8ab71e2..6da49c3542a 100644 --- a/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h +++ b/neural_modelling/src/neuron/threshold_types/threshold_type_adaptive.h @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2019 The University of Manchester + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #ifndef _THRESHOLD_TYPE_ADAPTIVE_H_ #define _THRESHOLD_TYPE_ADAPTIVE_H_ From a1e1faceb21e2b74fc09cc0ff3dbd3246818a190 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 12:21:29 +0100 Subject: [PATCH 114/123] Put include files in the correct order --- .../src/neuron/implementations/neuron_impl_eprop_adaptive.h | 2 +- .../src/neuron/implementations/neuron_impl_left_right_readout.h | 2 +- .../src/neuron/implementations/neuron_impl_sinusoid_readout.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h index 00a91e000bb..2150b2744a5 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_eprop_adaptive.h @@ -22,9 +22,9 @@ // Includes for model parts used in this implementation #include #include +#include #include #include -#include #include diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h index a783b85d2dc..f9bf028a456 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_left_right_readout.h @@ -21,10 +21,10 @@ // Includes for model parts used in this implementation #include +#include #include #include #include -#include #include diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h index 49e9be3a7e7..0a0805d9b39 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_sinusoid_readout.h @@ -21,10 +21,10 @@ // Includes for model parts used in this implementation #include +#include #include #include #include -#include #include From 39d1ce4740aed0665dd5a027977510cf95029f07 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 8 Jun 2023 12:27:59 +0100 Subject: [PATCH 115/123] set default for target_data --- spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py index 08efbb18ffe..63ed6c7b9d8 100644 --- a/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/builds/sinusoid_readout.py @@ -33,7 +33,7 @@ def __init__( self, tau_m=20.0, cm=1.0, v_rest=0.0, v_reset=0.0, v_thresh=100, tau_refrac=0.1, i_offset=0.0, v=50, isyn_exc=0.0, isyn_exc2=0.0, isyn_inh=0.0, isyn_inh2=0.0, - target_data=[0], + target_data=0, # Learning signal and weight update constants learning_signal=0, w_fb=0.5, eta=1.0, update_ready=1024): From 966fedb052e4b4465db3225bf14e7eecf78d1a06 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 16 Jun 2023 10:17:29 +0100 Subject: [PATCH 116/123] rename setter to correct name --- .../neuron/neuron_models/neuron_model_eprop_adaptive.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index f77fb56f217..5cd0bec35f6 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -352,8 +352,8 @@ def window_size(self, new_value): def number_of_cues(self): return self.__number_of_cues - @window_size.setter - def window_size(self, new_value): + @number_of_cues.setter + def number_of_cues(self, new_value): self.__number_of_cues = new_value # TODO: setters for "globals" like target rate, eta, etc. From 1f2e8088af94b704a9ae8d35663717fd19cfcd4a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 6 Jul 2023 16:52:59 +0100 Subject: [PATCH 117/123] Add use_eprop to pass correct value into synapses --- .../abstract_standard_neuron_component.py | 10 ++++++++ .../neuron_model_eprop_adaptive.py | 4 +++ .../neuron_model_left_right_readout.py | 4 +++ .../neuron_model_sinusoid_readout.py | 4 +++ .../neuron/population_machine_synapses.py | 5 +++- .../synapse_dynamics_static.py | 25 +++++++++++++------ .../synapse_dynamics/synapse_dynamics_stdp.py | 20 ++++++++++++--- 7 files changed, 61 insertions(+), 11 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py index f5376318111..53915e01eb9 100644 --- a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py +++ b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py @@ -79,3 +79,13 @@ def get_units(self, variable): :param str variable: The name of the variable """ return self.__units[variable] + + @property + def uses_eprop(self): + """ + Says whether the component is an eprop model (default false) + + :rtype: bool + + """ + return False \ No newline at end of file diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 5cd0bec35f6..4bcbb48a1cb 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -357,3 +357,7 @@ def number_of_cues(self, new_value): self.__number_of_cues = new_value # TODO: setters for "globals" like target rate, eta, etc. + + @overrides(AbstractStandardNeuronComponent.uses_eprop) + def uses_eprop(self): + return True diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index f6b422b82ed..4033e465bfc 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -299,3 +299,7 @@ def window_size(self, new_value): self.__window_size = new_value # TODO: Check setters for all parameters + + @overrides(AbstractStandardNeuronComponent.uses_eprop) + def uses_eprop(self): + return True diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index f948d63f7fc..eb2467bf54f 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -228,3 +228,7 @@ def w_fb(self, w_fb): self.__w_fb = w_fb # TODO: check whether any further parmeters need setters + + @overrides(AbstractStandardNeuronComponent.uses_eprop) + def uses_eprop(self): + return True diff --git a/spynnaker/pyNN/models/neuron/population_machine_synapses.py b/spynnaker/pyNN/models/neuron/population_machine_synapses.py index f589ab23eb4..2420386474e 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_synapses.py +++ b/spynnaker/pyNN/models/neuron/population_machine_synapses.py @@ -206,7 +206,10 @@ def _write_synapse_parameters(self, spec, ring_buffer_shifts): spec.write_value(n_synapse_types) spec.write_value(get_n_bits(n_neurons)) spec.write_value(get_n_bits(n_synapse_types)) - spec.write_value(get_n_bits(max_delay)) + if self._app_vertex.neuron_impl.neuron_model.uses_eprop: + spec.write_value(1) + else: + spec.write_value(get_n_bits(max_delay)) spec.write_value(int(self._app_vertex.drop_late_spikes)) spec.write_value(self._app_vertex.incoming_spike_buffer_size) spec.write_array(ring_buffer_shifts) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index c6f6c8004fe..3fe1d864f2d 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -103,19 +103,28 @@ def get_static_synaptic_data( neuron_id_mask = (1 << n_neuron_id_bits) - 1 n_synapse_type_bits = get_n_bits(n_synapse_types) + # Branch + # fixed_fixed = ( + # ((numpy.rint(connections["weight"]).astype("uint16") & + # 0xFFFF).astype("uint32") << 16) | + # ((connections["delay"].astype("uint32") & 0xFF) << + # (n_neuron_id_bits + n_synapse_type_bits)) | + # (connections["synapse_type"].astype( + # "uint32") << n_neuron_id_bits) | + # ((connections["target"] - post_vertex_slice.lo_atom) & + # neuron_id_mask)) + + # Master fixed_fixed = ( - ((numpy.rint(connections["weight"]).astype("uint16") & - 0xFFFF).astype("uint32") << 16) | - # ((connections["delay"].astype("uint32") & 0xFF) << - # master is commented bit below (branch commented above) - # ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & - # 0xFFFF) << 16) | + ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & + 0xFFFF) << 16) | (connections["delay"].astype("uint32") << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype( "uint32") << n_neuron_id_bits) | ((connections["target"] - post_vertex_slice.lo_atom) & neuron_id_mask)) + fixed_fixed_rows = self.convert_per_connection_data_to_rows( connection_row_indices, n_rows, fixed_fixed.view(dtype="uint8").reshape((-1, BYTES_PER_WORD)), @@ -172,11 +181,13 @@ def read_static_synaptic_data( [numpy.repeat(i, ff_size[i]) for i in range(len(ff_size))]) connections["target"] = ( (data & neuron_id_mask) + post_vertex_slice.lo_atom) + # branch # connections["weight"] = ((data >> 16) & 0xFFFF).astype("int16") # connections["delay"] = (data >> (n_neuron_id_bits + # n_synapse_type_bits)) & 0xFF # connections["delay"][connections["delay"] == 0] = 16 - # master code is commented out below, branch code above here + + # master connections["weight"] = (data >> 16) & 0xFFFF connections["delay"] = (data & 0xFFFF) >> ( n_neuron_id_bits + n_synapse_type_bits) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index b250f2aa9cf..d6da735abe0 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -348,19 +348,33 @@ def get_plastic_synaptic_data( n_neuron_id_bits = get_n_bits(max_atoms_per_core) neuron_id_mask = (1 << n_neuron_id_bits) - 1 + # Get the fixed data + # Old branch dendritic_delays = ( connections["delay"] * self.__dendritic_delay_fraction) + axonal_delays = ( + connections["delay"] * (1.0 - self.__dendritic_delay_fraction)) # Get the fixed data fixed_plastic = ( ((dendritic_delays.astype("uint16") & 0xFF) << - # master code commented out - # (connections["delay"].astype("uint16") << (n_neuron_id_bits + n_synapse_type_bits)) | + ((axonal_delays.astype("uint16") & 0xF) << + (4 + n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype("uint16") << n_neuron_id_bits) | ((connections["target"].astype("uint16") - post_vertex_slice.lo_atom) & neuron_id_mask)) + + # Master + # fixed_plastic = ( + # (connections["delay"].astype("uint16") << + # (n_neuron_id_bits + n_synapse_type_bits)) | + # (connections["synapse_type"].astype("uint16") + # << n_neuron_id_bits) | + # ((connections["target"].astype("uint16") - + # post_vertex_slice.lo_atom) & neuron_id_mask)) + fixed_plastic_rows = self.convert_per_connection_data_to_rows( connection_row_indices, n_rows, fixed_plastic.view(dtype="uint8").reshape((-1, 2)), @@ -469,7 +483,7 @@ def read_plastic_synaptic_data( n_half_words += 1 half_word = 0 pp_half_words = numpy.concatenate([ - pp[:size * n_half_words * BYTES_PER_SHORT].view("uint16")[ + pp[:size * n_half_words * BYTES_PER_SHORT].view("int16")[ half_word::n_half_words] for pp, size in zip(pp_without_headers, fp_size)]) From 40383efb0423ac6fe98c62d0cb15e1029701f3ad Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 8 Aug 2023 16:33:10 +0100 Subject: [PATCH 118/123] DataType has moved --- .../models/neuron/neuron_models/neuron_model_eprop_adaptive.py | 2 +- .../neuron/neuron_models/neuron_model_left_right_readout.py | 2 +- .../neuron/neuron_models/neuron_model_sinusoid_readout.py | 2 +- .../stdp/weight_dependence/weight_dependence_eprop_reg.py | 2 +- .../models/neuron/synapse_types/synapse_type_eprop_adaptive.py | 2 +- .../models/neuron/threshold_types/threshold_type_adaptive.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 4bcbb48a1cb..2c5e8cc8ec6 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -13,7 +13,7 @@ # limitations under the License. from spinn_utilities.overrides import overrides -from data_specification.enums import DataType +from spinn_front_end_common.interface.ds import DataType from spynnaker.pyNN.models.neuron.implementations import ( AbstractStandardNeuronComponent) from spynnaker.pyNN.utilities.struct import Struct diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 4033e465bfc..74965571ece 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -13,7 +13,7 @@ # limitations under the License. from spinn_utilities.overrides import overrides -from data_specification.enums import DataType +from spinn_front_end_common.interface.ds import DataType from spynnaker.pyNN.models.neuron.implementations import ( AbstractStandardNeuronComponent) from spynnaker.pyNN.utilities.struct import Struct diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index eb2467bf54f..b8e9fed11d1 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -13,7 +13,7 @@ # limitations under the License. from spinn_utilities.overrides import overrides -from data_specification.enums import DataType +from spinn_front_end_common.interface.ds import DataType from spynnaker.pyNN.models.neuron.implementations import ( AbstractStandardNeuronComponent) from spynnaker.pyNN.utilities.struct import Struct diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py index 4d4194bd8f7..e77e23943c1 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_eprop_reg.py @@ -13,7 +13,7 @@ # limitations under the License. from spinn_utilities.overrides import overrides -from data_specification.enums import DataType +from spinn_front_end_common.interface.ds import DataType from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus from .abstract_weight_dependence import AbstractWeightDependence diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py index e47a4359adb..ea53fbe4e5c 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_eprop_adaptive.py @@ -13,7 +13,7 @@ # limitations under the License. from spinn_utilities.overrides import overrides -from data_specification.enums import DataType +from spinn_front_end_common.interface.ds import DataType from .abstract_synapse_type import AbstractSynapseType from spynnaker.pyNN.utilities.struct import Struct diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py index acb9c1d39a4..9ef8424dd98 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_adaptive.py @@ -14,7 +14,7 @@ # limitations under the License. from spinn_utilities.overrides import overrides -from data_specification.enums import DataType +from spinn_front_end_common.interface.ds import DataType from .abstract_threshold_type import AbstractThresholdType from spynnaker.pyNN.utilities.struct import Struct From 039764790be06ac64eea2262ac63289d785f63fa Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 8 Aug 2023 17:00:11 +0100 Subject: [PATCH 119/123] flake8 missing line --- .../implementations/abstract_standard_neuron_component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py index 53915e01eb9..c984e48f9d2 100644 --- a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py +++ b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py @@ -88,4 +88,4 @@ def uses_eprop(self): :rtype: bool """ - return False \ No newline at end of file + return False From c9be8b3478b6e121bc6e06a27b05dbaec8c2820b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 8 Aug 2023 17:05:48 +0100 Subject: [PATCH 120/123] pylint spelling --- .../implementations/abstract_standard_neuron_component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py index c984e48f9d2..92dc4fc1f50 100644 --- a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py +++ b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py @@ -83,7 +83,7 @@ def get_units(self, variable): @property def uses_eprop(self): """ - Says whether the component is an eprop model (default false) + Says whether the component is an e-prop model (default false) :rtype: bool From bfdee449fa95fc9d99c586966bfb2834df981a54 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 8 Aug 2023 17:25:30 +0100 Subject: [PATCH 121/123] pylint property --- .../models/neuron/neuron_models/neuron_model_eprop_adaptive.py | 3 +-- .../neuron/neuron_models/neuron_model_left_right_readout.py | 3 +-- .../neuron/neuron_models/neuron_model_sinusoid_readout.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index 2c5e8cc8ec6..add8a4d86be 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -356,8 +356,7 @@ def number_of_cues(self): def number_of_cues(self, new_value): self.__number_of_cues = new_value - # TODO: setters for "globals" like target rate, eta, etc. - @overrides(AbstractStandardNeuronComponent.uses_eprop) + @property def uses_eprop(self): return True diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 74965571ece..87f82a6459e 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -298,8 +298,7 @@ def window_size(self): def window_size(self, new_value): self.__window_size = new_value - # TODO: Check setters for all parameters - @overrides(AbstractStandardNeuronComponent.uses_eprop) + @property def uses_eprop(self): return True diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index b8e9fed11d1..f314f153393 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -227,8 +227,7 @@ def w_fb(self): def w_fb(self, w_fb): self.__w_fb = w_fb - # TODO: check whether any further parmeters need setters - @overrides(AbstractStandardNeuronComponent.uses_eprop) + @property def uses_eprop(self): return True From e7375d9cc38ed02e888f87cec0a26c149e8cb960 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 8 Aug 2023 17:31:48 +0100 Subject: [PATCH 122/123] overrides goes last --- .../models/neuron/neuron_models/neuron_model_eprop_adaptive.py | 2 +- .../neuron/neuron_models/neuron_model_left_right_readout.py | 2 +- .../neuron/neuron_models/neuron_model_sinusoid_readout.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py index add8a4d86be..270351a3738 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_eprop_adaptive.py @@ -356,7 +356,7 @@ def number_of_cues(self): def number_of_cues(self, new_value): self.__number_of_cues = new_value - @overrides(AbstractStandardNeuronComponent.uses_eprop) @property + @overrides(AbstractStandardNeuronComponent.uses_eprop) def uses_eprop(self): return True diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py index 87f82a6459e..d024b70dd1d 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_left_right_readout.py @@ -298,7 +298,7 @@ def window_size(self): def window_size(self, new_value): self.__window_size = new_value - @overrides(AbstractStandardNeuronComponent.uses_eprop) @property + @overrides(AbstractStandardNeuronComponent.uses_eprop) def uses_eprop(self): return True diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py index f314f153393..c5e8cbd6e59 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_sinusoid_readout.py @@ -227,7 +227,7 @@ def w_fb(self): def w_fb(self, w_fb): self.__w_fb = w_fb - @overrides(AbstractStandardNeuronComponent.uses_eprop) @property + @overrides(AbstractStandardNeuronComponent.uses_eprop) def uses_eprop(self): return True From 012194f42416c106492b1132223a9c6a736fc618 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 28 Sep 2023 11:07:38 +0100 Subject: [PATCH 123/123] Use the correct structure for the data on the branch --- .../synapse_dynamics_static.py | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index 3fe1d864f2d..b3d30330481 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -104,27 +104,27 @@ def get_static_synaptic_data( n_synapse_type_bits = get_n_bits(n_synapse_types) # Branch - # fixed_fixed = ( - # ((numpy.rint(connections["weight"]).astype("uint16") & - # 0xFFFF).astype("uint32") << 16) | - # ((connections["delay"].astype("uint32") & 0xFF) << - # (n_neuron_id_bits + n_synapse_type_bits)) | - # (connections["synapse_type"].astype( - # "uint32") << n_neuron_id_bits) | - # ((connections["target"] - post_vertex_slice.lo_atom) & - # neuron_id_mask)) - - # Master fixed_fixed = ( - ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & - 0xFFFF) << 16) | - (connections["delay"].astype("uint32") << + ((numpy.rint(connections["weight"]).astype("uint16") & + 0xFFFF).astype("uint32") << 16) | + ((connections["delay"].astype("uint32") & 0xFF) << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype( "uint32") << n_neuron_id_bits) | ((connections["target"] - post_vertex_slice.lo_atom) & neuron_id_mask)) + # Master + # fixed_fixed = ( + # ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & + # 0xFFFF) << 16) | + # (connections["delay"].astype("uint32") << + # (n_neuron_id_bits + n_synapse_type_bits)) | + # (connections["synapse_type"].astype( + # "uint32") << n_neuron_id_bits) | + # ((connections["target"] - post_vertex_slice.lo_atom) & + # neuron_id_mask)) + fixed_fixed_rows = self.convert_per_connection_data_to_rows( connection_row_indices, n_rows, fixed_fixed.view(dtype="uint8").reshape((-1, BYTES_PER_WORD)), @@ -182,15 +182,15 @@ def read_static_synaptic_data( connections["target"] = ( (data & neuron_id_mask) + post_vertex_slice.lo_atom) # branch - # connections["weight"] = ((data >> 16) & 0xFFFF).astype("int16") - # connections["delay"] = (data >> (n_neuron_id_bits + - # n_synapse_type_bits)) & 0xFF - # connections["delay"][connections["delay"] == 0] = 16 + connections["weight"] = ((data >> 16) & 0xFFFF).astype("int16") + connections["delay"] = (data >> (n_neuron_id_bits + + n_synapse_type_bits)) & 0xFF + connections["delay"][connections["delay"] == 0] = 16 # master - connections["weight"] = (data >> 16) & 0xFFFF - connections["delay"] = (data & 0xFFFF) >> ( - n_neuron_id_bits + n_synapse_type_bits) + # connections["weight"] = (data >> 16) & 0xFFFF + # connections["delay"] = (data & 0xFFFF) >> ( + # n_neuron_id_bits + n_synapse_type_bits) return connections