diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 1e49abb5627..6ee33a3c852 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -16,3 +16,13 @@ updates: include: "scope" labels: - "CI" + + - package-ecosystem: "pip" + directory: "/" # use top dir + schedule: + interval: "daily" + target-branch: "devel" + commit-message: + prefix: "requirements" + labels: + - "deps" diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 4b1f9dffa44..bb117f4055a 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -47,7 +47,7 @@ jobs: restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2.1.2 + uses: actions/setup-python@v2.1.3 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.python-architecture }} @@ -102,7 +102,7 @@ jobs: run: pytest --junit-xml=tests_out.xml --verbosity=0 -n auto --maxprocesses=2 - name: Upload test results - uses: actions/upload-artifact@v2.1.4 + uses: actions/upload-artifact@v2.2.0 with: name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: tests_out.xml @@ -114,7 +114,7 @@ jobs: python setup.py sdist bdist_wheel if: contains(github.ref, 'tags') - name: Upload dist packages - uses: actions/upload-artifact@v2.1.4 + uses: actions/upload-artifact@v2.2.0 with: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: dist/ diff --git a/dev_requirements.txt b/dev_requirements.txt index a0352c6e221..9ce4f32f0e1 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,14 +1,12 @@ -ipykernel -ipython -jupyter -psyneulink-sphinx-theme -pytest -pytest-benchmark -pytest-cov -pytest-helpers-namespace -pytest-profiling -pytest-pycodestyle -pytest-pydocstyle -pytest-xdist -sphinx -sphinx_autodoc_typehints +jupyter<=1.0.0 +psyneulink-sphinx-theme<=1.2.1.7 +pytest<6.1.1 +pytest-benchmark<=3.2.3 +pytest-cov<=2.10.1 +pytest-helpers-namespace<=2019.1.8 +pytest-profiling<=1.7.0 +pytest-pycodestyle<=2.2.0 +pytest-pydocstyle<=2.2.0 +pytest-xdist<=2.1.0 +sphinx<=3.2.1 +sphinx_autodoc_typehints<=1.11.0 diff --git a/docs/source/BasicsAndPrimer.rst b/docs/source/BasicsAndPrimer.rst index 1c37fac22ce..213bd914d6a 100644 --- a/docs/source/BasicsAndPrimer.rst +++ b/docs/source/BasicsAndPrimer.rst @@ -619,7 +619,7 @@ Example use-cases for dot notation >>> comp1.run(inputs={m:1}) >>> # returns: [array([1.])] >>> # set slope of m1's function to 2 for the most recent context (which is now comp1) - >>> m.function.slope = 2 + >>> m.function.slope.base = 2 >>> comp1.run(inputs={m:1}) >>> # returns: [array([2.])] >>> # note that changing the slope of m's function produced a different result @@ -640,7 +640,7 @@ Example use-cases for dot notation >>> m.execute([1]) >>> # returns: [array([1.])] >>> # executing m outside of a Composition uses its default context - >>> m.function.slope = 2 + >>> m.function.slope.base = 2 >>> m.execute([1]) >>> # returns: [array([2.])] >>> comp1.run(inputs={m:1}) @@ -711,7 +711,7 @@ parameters. For example, the ``output`` Mechanism was assigned the `Logistic` ` specification. For example, the current value of the `gain ` parameter of the ``output``\'s Logistic Function can be accessed in either of the following ways:: - >>> output.function.gain + >>> output.function.gain.base 1.0 >>> output.function.parameters.gain.get() 1.0 @@ -731,12 +731,20 @@ complete description of modulation. The current *modulated* value of a paramete ` of the corresponding ParameterPort. For instance, the print statement in the example above used ``task.parameter_ports[GAIN].value`` to report the modulated value of the `gain ` parameter of the ``task`` Mechanism's `Logistic` function when the simulation was run. For convenience, it is also possible to -access the value of a modulable parameter by adding the prefix ``mod_`` to the name of the parameter; this returns -the `value ` of the ParameterPort for the parameter:: +access the value of a modulable parameter via dot notation. Dot notation for modulable parameters is slightly different +than for non-modulable parameters to provide easy access to both base and modulated values:: + + >>> task.function.gain + (Logistic Logistic Function-5): + gain.base: 1.0 + gain.modulated: [0.55] + +Instead of just returning a value, the dot notation returns an object with `base` and `modulated` attributes. +`modulated` refers to the `value ` of the ParameterPort for the parameter:: >>> task.parameter_ports[GAIN].value [0.62] - >>> task.mod_gain + >>> task.gain.modulated [0.62] This works for any modulable parameters of the Mechanism, its @@ -745,8 +753,8 @@ here, neither the ``parameters`` nor the ``function`` attributes of the Mechanis Note also that, as explained above, the value returned is different from the base value of the function's gain parameter:: - >>> task.function.gain - [1.0] + >>> task.function.gain.base + 1.0 This is because when the Compoistion was run, the ``control`` Mechanism modulated the value of the gain parameter. diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 1b2aa3d7931..06da412a161 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -626,12 +626,29 @@ def __init__(self, message, component=None): super().__init__(message) -def make_parameter_property(name): +def _get_parametervalue_attr(param): + return f'_{param.name}' + + +def make_parameter_property(param): def getter(self): - return getattr(self.parameters, name)._get(self.most_recent_context) + p = getattr(self.parameters, param.name) + + if p.modulable: + return getattr(self, _get_parametervalue_attr(p)) + else: + return p._get(self.most_recent_context) def setter(self, value): - getattr(self.parameters, name)._set(value, self.most_recent_context) + p = getattr(self.parameters, param.name) + if p.modulable: + warnings.warn( + 'Setting parameter values directly using dot notation' + ' may be removed in a future release. It is replaced with,' + f' for example, .{param.name}.base = {value}', + FutureWarning, + ) + getattr(self.parameters, p.name)._set(value, self.most_recent_context) return property(getter).setter(setter) @@ -666,7 +683,7 @@ def __init__(self, *args, **kwargs): for param in self.parameters: if not hasattr(self, param.name): - setattr(self, param.name, make_parameter_property(param.name)) + setattr(self, param.name, make_parameter_property(param)) try: if param.default_value.owner is None: @@ -2030,6 +2047,10 @@ def _is_user_specified(parameter): if isinstance(p.default_value, Function): p.default_value.owner = p + for p in self.parameters: + if p.stateful: + setattr(self, _get_parametervalue_attr(p), ParameterValue(self, p)) + def _instantiate_parameter_classes(self, context=None): """ An optional method that will take any Parameter values in @@ -3620,6 +3641,12 @@ def make_property_mod(param_name, parameter_port_name=None): parameter_port_name = param_name def getter(self): + warnings.warn( + f'Getting modulated parameter values with .mod_' + ' may be removed in a future release. It is replaced with,' + f' for example, .{param_name}.modulated', + FutureWarning + ) try: return self._parameter_ports[parameter_port_name].value except TypeError: @@ -3647,3 +3674,48 @@ def getter(self, context=None): .format(self.name, param_name)) return getter + + +class ParameterValue: + def __init__(self, owner, parameter): + self._owner = owner + self._parameter = parameter + + def __repr__(self): + return f'{self._owner}:\n\t{self._parameter.name}.base: {self.base}\n\t{self._parameter.name}.modulated: {self.modulated}' + + @property + def modulated(self): + try: + is_modulated = (self._parameter in self._owner.parameter_ports) + except AttributeError: + is_modulated = False + + try: + is_modulated = is_modulated or (self._parameter in self._owner.owner.parameter_ports) + except AttributeError: + pass + + if is_modulated: + return self._owner._get_current_parameter_value( + self._parameter, + self._owner.most_recent_context + ) + else: + warnings.warn(f'{self._parameter.name} is not currently modulated.') + return None + + @modulated.setter + def modulated(self, value): + raise ComponentError( + f"Cannot set {self._owner.name}'s modulated {self._parameter.name}" + ' value directly because it is computed by the ParameterPort.' + ) + + @property + def base(self): + return self._parameter._get(self._owner.most_recent_context) + + @base.setter + def base(self, value): + self._parameter._set(value, self._owner.most_recent_context) diff --git a/psyneulink/core/components/functions/objectivefunctions.py b/psyneulink/core/components/functions/objectivefunctions.py index 52afed17035..518256b7f8a 100644 --- a/psyneulink/core/components/functions/objectivefunctions.py +++ b/psyneulink/core/components/functions/objectivefunctions.py @@ -1000,7 +1000,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, t elif self.metric == MAX_ABS_DIFF: del kwargs['acc'] max_diff_ptr = builder.alloca(ctx.float_ty) - builder.store(ctx.float_ty("NaN"), max_diff_ptr) + builder.store(ctx.float_ty(float("NaN")), max_diff_ptr) kwargs['max_diff_ptr'] = max_diff_ptr inner = functools.partial(self.__gen_llvm_max_diff, **kwargs) elif self.metric == CORRELATION: diff --git a/psyneulink/core/components/functions/optimizationfunctions.py b/psyneulink/core/components/functions/optimizationfunctions.py index 91d88b11fa9..6b286fed3ab 100644 --- a/psyneulink/core/components/functions/optimizationfunctions.py +++ b/psyneulink/core/components/functions/optimizationfunctions.py @@ -1516,7 +1516,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, # Use NaN here. fcmp_unordered below returns true if one of the # operands is a NaN. This makes sure we always set min_* # in the first iteration - builder.store(min_value_ptr.type.pointee("NaN"), min_value_ptr) + builder.store(min_value_ptr.type.pointee(float("NaN")), min_value_ptr) b = builder with contextlib.ExitStack() as stack: @@ -1565,14 +1565,13 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, def _run_cuda_grid(self, ocm, variable, context): assert ocm is ocm.agent_rep.controller # Compiled evaluate expects the same variable as mech function - new_variable = [np.asfarray(ip.parameters.value.get(context)) - for ip in ocm.input_ports] - new_variable = np.array(new_variable, dtype=np.object) + new_variable = [ip.parameters.value.get(context) for ip in ocm.input_ports] # Map allocations to values comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, [context.execution_id]) ct_alloc, ct_values = comp_exec.cuda_evaluate(new_variable, self.search_space) + assert len(ct_values) == len(ct_alloc) # Reduce array of values to min/max # select_min params are: # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count @@ -1582,7 +1581,6 @@ def _run_cuda_grid(self, ocm, variable, context): ct_opt_sample = bin_func.byref_arg_types[2](float("NaN")) ct_opt_value = bin_func.byref_arg_types[4]() ct_opt_count = bin_func.byref_arg_types[6](0) - assert len(ct_values) == len(ct_alloc) ct_count = bin_func.c_func.argtypes[7](len(ct_alloc)) bin_func(ct_param, ct_state, ct_opt_sample, ct_alloc, ct_opt_value, diff --git a/psyneulink/core/components/functions/transferfunctions.py b/psyneulink/core/components/functions/transferfunctions.py index 77eb2a49748..daf30ae4e17 100644 --- a/psyneulink/core/components/functions/transferfunctions.py +++ b/psyneulink/core/components/functions/transferfunctions.py @@ -2357,7 +2357,7 @@ def __gen_llvm_apply(self, ctx, builder, params, _, arg_in, arg_out): builder.store(exp_sum_ptr.type.pointee(0), exp_sum_ptr) max_ptr = builder.alloca(ctx.float_ty) - builder.store(max_ptr.type.pointee('-inf'), max_ptr) + builder.store(max_ptr.type.pointee(float('-inf')), max_ptr) max_ind_ptr = builder.alloca(ctx.int32_ty) builder.store(max_ind_ptr.type.pointee(-1), max_ind_ptr) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 9621d194de4..a8cd212d2b6 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -921,7 +921,7 @@ class `UserList >> T = pnl.TransferMechanism(function=Linear) - >>> T.function.slope #doctest: +SKIP + >>> T.function.slope.base #doctest: +SKIP 1.0 # Default for slope >>> T.clip #doctest: +SKIP None # Default for clip is None @@ -929,7 +929,7 @@ class `UserList >> T.function.slope #doctest: +SKIP + >>> T.function.slope.base #doctest: +SKIP 1.0 # slope is restored 1.0 >>> T.clip #doctest: +SKIP None # clip is restored to None @@ -942,7 +942,7 @@ class `UserList >> T.function.slope = 10 + >>> T.function.slope.base = 10 >>> T.clip = (0,3) >>> T.function.slope 10 @@ -952,7 +952,7 @@ class `UserList >> T.function.slope #doctest: +SKIP + >>> T.function.slope.base #doctest: +SKIP 10 # slope is restored 10.0, its previously assigned value >>> T.clip #doctest: +SKIP (0, 3) # clip is restored to (0,3), its previously assigned value @@ -2783,10 +2783,17 @@ def _get_output_struct_type(self, ctx): def _get_input_struct_type(self, ctx): # Extract the non-modulation portion of InputPort input struct input_type_list = [ctx.get_input_struct_type(port).elements[0] for port in self.input_ports] + + # Get modulatory inputs - mod_input_type_list = [ctx.get_output_struct_type(proj) for proj in self.mod_afferents] - if len(mod_input_type_list) > 0: + if len(self.mod_afferents) > 0: + mod_input_type_list = (ctx.get_output_struct_type(proj) for proj in self.mod_afferents) input_type_list.append(pnlvm.ir.LiteralStructType(mod_input_type_list)) + # Prefer an array type if there is no modulation. + # This is used to keep ctypes inputs as arrays instead of structs. + elif all(t == input_type_list[0] for t in input_type_list): + return pnlvm.ir.ArrayType(input_type_list[0], len(input_type_list)) + return pnlvm.ir.LiteralStructType(input_type_list) def _get_param_initializer(self, context): diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index afd2406c6ea..ad627bc83b7 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -256,9 +256,9 @@ >>> my_transfer_mechanism = pnl.TransferMechanism( ... noise=5.0, ... function=pnl.Linear(slope=2.0)) - >>> assert my_transfer_mechanism.noise == 5.0 + >>> assert my_transfer_mechanism.noise.base == 5.0 >>> assert my_transfer_mechanism.mod_noise == [5.0] - >>> assert my_transfer_mechanism.function.slope == 2.0 + >>> assert my_transfer_mechanism.function.slope.base == 2.0 >>> assert my_transfer_mechanism.mod_slope == [2.0] Notice that the noise attribute, which stores the base value for the noise ParameterPort of my_transfer_mechanism, is @@ -266,11 +266,11 @@ my_transfer_mechanism, is on my_transfer_mechanism's function. However, mod_noise and mod_slope are both properties on my_transfer_mechanism. - >>> my_transfer_mechanism.noise = 4.0 - >>> my_transfer_mechanism.function.slope = 1.0 - >>> assert my_transfer_mechanism.noise == 4.0 + >>> my_transfer_mechanism.noise.base = 4.0 + >>> my_transfer_mechanism.function.slope.base = 1.0 + >>> assert my_transfer_mechanism.noise.base == 4.0 >>> assert my_transfer_mechanism.mod_noise == [5.0] - >>> assert my_transfer_mechanism.function.slope == 1.0 + >>> assert my_transfer_mechanism.function.slope.base == 1.0 >>> assert my_transfer_mechanism.mod_slope == [2.0] When the base values of noise and slope are updated, we can inspect these attributes immediately and observe that they @@ -279,9 +279,9 @@ >>> my_transfer_mechanism.execute([10.0]) array([[14.]]) - >>> assert my_transfer_mechanism.noise == 4.0 + >>> assert my_transfer_mechanism.noise.base == 4.0 >>> assert my_transfer_mechanism.mod_noise == [4.0] - >>> assert my_transfer_mechanism.function.slope == 1.0 + >>> assert my_transfer_mechanism.function.slope.base == 1.0 >>> assert my_transfer_mechanism.mod_slope == 1.0 Now that the mechanism has executed, we can see that each ParameterPort evaluated its function with the base value, @@ -1006,20 +1006,6 @@ def skip_parameter_port(parameter): if isinstance(p, ParameterAlias): port_aliases.add(p.name) - duplicates = [p for p in port_parameters if len(port_parameters[p]) > 1] - if len(duplicates) > 0: - dup_str = '\n\t'.join([f'{name}: {", ".join(port_parameters[name])}' for name in duplicates]) - ex_func_name = next(iter(port_parameters[duplicates[0]])) - ex_port_name = duplicates[0] - warnings.warn( - 'Multiple ParameterPorts will be created for Parameters with the' - f' same name:\n{owner}\n\t{dup_str}' - '\nTo explicitly access the correct Port, you will need to' - " include the function's name as suffix or use the Parameter object." - f" For example,\nself.parameter_ports['{ex_port_name}{owner.parameter_ports.separator}{ex_func_name}']\nor\n" - f'self.parameter_ports[self.{ex_func_name}.parameters.{ex_port_name}]' - ) - for parameter_port_name in port_parameters: if ( len(port_parameters[parameter_port_name]) > 1 diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 746cc3321b3..d9f30eb93a6 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -30,8 +30,8 @@ def _convert_ctype_to_python(x): if isinstance(x, ctypes.Structure): return [_convert_ctype_to_python(getattr(x, field_name)) for field_name, _ in x._fields_] if isinstance(x, ctypes.Array): - return [_convert_ctype_to_python(num) for num in x] - if isinstance(x, ctypes.c_double): + return [_convert_ctype_to_python(el) for el in x] + if isinstance(x, (ctypes.c_double, ctypes.c_float)): return x.value if isinstance(x, (float, int)): return x @@ -45,6 +45,13 @@ def _tupleize(x): except TypeError: return x if x is not None else tuple() +def _element_dtype(x): + dt = np.dtype(x) + while dt.subdtype is not None: + dt = dt.subdtype[0] + + return dt + def _pretty_size(size): units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] for u in units: @@ -135,8 +142,8 @@ def _cuda_out(self): return self._buffer_cuda_out def cuda_execute(self, variable): - # Create input parameter - new_var = np.asfarray(variable) + # Create input argument + new_var = np.asfarray(variable, dtype=self._vi_dty) data_in = jit_engine.pycuda.driver.In(new_var) self._uploaded_bytes['input'] += new_var.nbytes @@ -174,6 +181,7 @@ def __init__(self, component, execution_ids=[None], *, tags=frozenset()): self._vo_ty = vo_ty self._ct_vo = vo_ty() self._vi_ty = vi_ty + self._vi_dty = _element_dtype(vi_ty) def _get_compilation_param(self, name, initializer, arg, context): param = getattr(self._component._compilation_data, name) @@ -210,7 +218,10 @@ def _state_struct(self): return self._get_compilation_param('state_struct', '_get_state_initializer', 1, self._execution_contexts[0]) def execute(self, variable): - new_variable = np.asfarray(variable) + # Make sure function inputs are 2d. + # Mechanism inptus are already 3d so the first part is nop. + new_variable = np.asfarray(np.atleast_2d(variable), + dtype=self._vi_dty) if len(self._execution_contexts) > 1: # wrap_call casts the arguments so we only need contiguous data @@ -220,7 +231,7 @@ def execute(self, variable): self._state_struct, ct_vi, self._ct_vo, self._ct_len) else: - ct_vi = new_variable.ctypes.data_as(ctypes.POINTER(self._vi_ty)) + ct_vi = np.ctypeslib.as_ctypes(new_variable) self._bin_func(ctypes.byref(self._param_struct), ctypes.byref(self._state_struct), ct_vi, ctypes.byref(self._ct_vo)) @@ -235,7 +246,8 @@ def execute(self, variable): # a) the input is vector of input ports # b) input ports take vector of projection outputs # c) projection output is a vector (even 1 element vector) - new_var = [np.atleast_2d(x) for x in np.atleast_1d(variable)] + new_var = np.atleast_3d(variable) + new_var.shape = (len(self._component.input_ports), 1, -1) return super().execute(new_var) @@ -633,15 +645,25 @@ def cuda_evaluate(self, variable, search_space): # There are 6 arguments to evaluate: # comp_param, comp_state, allocations, results, output, input, comp_data # all but #2 and #3 are shared + + # Directly initialized structures ct_comp_param = bin_func.byref_arg_types[0](*ocm.agent_rep._get_param_initializer(context)) ct_comp_state = bin_func.byref_arg_types[1](*ocm.agent_rep._get_state_initializer(context)) - # Make sure the dtype matches _gen_llvm_evaluate_function - allocations = np.asfarray(np.atleast_2d([*itertools.product(*search_space)])) + ct_comp_data = bin_func.byref_arg_types[5](*ocm.agent_rep._get_data_initializer(context)) + + # Construct the allocations array + alloc_vals = itertools.product(*search_space) + alloc_dty = _element_dtype(bin_func.byref_arg_types[2]) + allocations = np.asfarray(np.atleast_2d([*alloc_vals]), dtype=alloc_dty) ct_allocations = allocations.ctypes.data_as(ctypes.POINTER(bin_func.byref_arg_types[2] * len(allocations))) - out_ty = bin_func.byref_arg_types[3] * len(allocations) - ct_in = variable.ctypes.data_as(ctypes.POINTER(bin_func.byref_arg_types[4])) - ct_comp_data = bin_func.byref_arg_types[5](*ocm.agent_rep._get_data_initializer(context)) + # Construct input variable + el_dty = _element_dtype(bin_func.byref_arg_types[4]) + converted_variable = np.array(np.asfarray(x, dtype=el_dty) for x in variable) + ct_in = converted_variable.ctypes.data_as(ctypes.POINTER(bin_func.byref_arg_types[4])) + + # Ouput is allocated on device, but we need the ctype. + out_ty = bin_func.byref_arg_types[3] * len(allocations) cuda_args = (self.upload_ctype(ct_comp_param, 'params'), self.upload_ctype(ct_comp_state, 'state'), diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index a498ed5cf49..d3f847f06ea 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -191,6 +191,7 @@ from collections.abc import Iterable from psyneulink.core import llvm as pnlvm +from psyneulink.core.components.component import _get_parametervalue_attr from psyneulink.core.components.functions.function import Function, get_matrix, is_function_type from psyneulink.core.components.functions.learningfunctions import Hebbian from psyneulink.core.components.functions.objectivefunctions import Stability @@ -1005,7 +1006,7 @@ def recurrent_size(self): # single flag to check whether to get matrix from auto and hetero? @property def matrix(self): - return self.parameters.matrix._get(self.most_recent_context) + return getattr(self, _get_parametervalue_attr(self.parameters.matrix)) @matrix.setter def matrix(self, val): # simplified version of standard setter (in Component.py) @@ -1014,18 +1015,13 @@ def matrix(self, val): # simplified version of standard setter (in Component.py) # KDM 7/1/19: reinstating below if hasattr(self, "recurrent_projection"): self.recurrent_projection.parameter_ports["matrix"].function.previous_value = val + self.recurrent_projection.parameter_ports["matrix"].function.reset = val self.parameters.matrix._set(val, self.most_recent_context) - if hasattr(self, '_parameter_ports') and 'matrix' in self._parameter_ports: - param_port = self._parameter_ports['matrix'] - - if hasattr(param_port.function, 'initializer'): - param_port.function.reset = val - @property def auto(self): - return self.parameters.auto._get(self.most_recent_context) + return getattr(self, _get_parametervalue_attr(self.parameters.auto)) @auto.setter def auto(self, val): @@ -1034,10 +1030,9 @@ def auto(self, val): if hasattr(self, "recurrent_projection") and 'hetero' in self._parameter_ports: self.recurrent_projection.parameter_ports["matrix"].function.previous_value = self.matrix - @property def hetero(self): - return self.parameters.hetero._get(self.most_recent_context) + return getattr(self, _get_parametervalue_attr(self.parameters.hetero)) @hetero.setter def hetero(self, val): diff --git a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py index 237c2bc2cac..5178cafc107 100644 --- a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py +++ b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py @@ -345,23 +345,6 @@ def owner_mech(self): ) ) - # these properties allow the auto and hetero properties to live purely on the RecurrentTransferMechanism - @property - def auto(self): - return self.owner_mech.auto - - @auto.setter - def auto(self, setting): - self.owner_mech.auto = setting - - @property - def hetero(self): - return self.owner_mech.hetero - - @hetero.setter - def hetero(self, setting): - self.owner_mech.hetero = setting - @property def matrix(self): owner_mech = self.owner_mech diff --git a/requirements.txt b/requirements.txt index e1e07dc8280..719cd01662d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,14 +1,14 @@ -autograd -dill -elfi -graphviz -grpcio<1.32.0 -grpcio-tools<1.32.0 -llvmlite -matplotlib -networkx==1.11 -numpy -pillow -toposort -torch; sys_platform != 'win32' and platform_machine == 'x86_64' and platform_python_implementation == 'CPython' -typecheck-decorator==1.2 +autograd<=1.3 +dill<=0.32 +elfi<=0.7.6 +graphviz<=0.14.1 +grpcio<=1.31.0 +grpcio-tools<=1.31.0 +llvmlite<=0.34 +matplotlib<=3.3.2 +networkx<=1.11 +numpy<=1.19.2 +pillow<=7.2.0 +toposort<=1.5 +torch<=1.6.0; sys_platform != 'win32' and platform_machine == 'x86_64' and platform_python_implementation == 'CPython' +typecheck-decorator<=1.2 diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 1f4925a2c78..8fbd849b7b3 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -1264,7 +1264,7 @@ def test_and_training_time(self, eps, opt,mode): # SET UP PROJECTIONS FOR SYSTEM and_map_sys = MappingProjection(name='and_map_sys', - matrix=and_map.matrix.copy(), + matrix=and_map.matrix.base.copy(), sender=and_in_sys, receiver=and_out_sys) @@ -1362,12 +1362,12 @@ def test_xor_training_time(self, eps, opt,mode): # SET UP PROJECTIONS FOR SYSTEM hid_map_sys = MappingProjection(name='hid_map_sys', - matrix=hid_map.matrix.copy(), + matrix=hid_map.matrix.base.copy(), sender=xor_in_sys, receiver=xor_hid_sys) out_map_sys = MappingProjection(name='out_map_sys', - matrix=out_map.matrix.copy(), + matrix=out_map.matrix.base.copy(), sender=xor_hid_sys, receiver=xor_out_sys) @@ -1541,37 +1541,37 @@ def test_semantic_net_training_time(self, eps, opt): # SET UP PROJECTIONS FOR SYSTEM - map_nouns_h1_sys = MappingProjection(matrix=map_nouns_h1.matrix.copy(), + map_nouns_h1_sys = MappingProjection(matrix=map_nouns_h1.matrix.base.copy(), name="map_nouns_h1_sys", sender=nouns_in_sys, receiver=h1_sys) - map_rels_h2_sys = MappingProjection(matrix=map_rels_h2.matrix.copy(), + map_rels_h2_sys = MappingProjection(matrix=map_rels_h2.matrix.base.copy(), name="map_relh2_sys", sender=rels_in_sys, receiver=h2_sys) - map_h1_h2_sys = MappingProjection(matrix=map_h1_h2.matrix.copy(), + map_h1_h2_sys = MappingProjection(matrix=map_h1_h2.matrix.base.copy(), name="map_h1_h2_sys", sender=h1_sys, receiver=h2_sys) - map_h2_I_sys = MappingProjection(matrix=map_h2_I.matrix.copy(), + map_h2_I_sys = MappingProjection(matrix=map_h2_I.matrix.base.copy(), name="map_h2_I_sys", sender=h2_sys, receiver=out_sig_I_sys) - map_h2_is_sys = MappingProjection(matrix=map_h2_is.matrix.copy(), + map_h2_is_sys = MappingProjection(matrix=map_h2_is.matrix.base.copy(), name="map_h2_is_sys", sender=h2_sys, receiver=out_sig_is_sys) - map_h2_has_sys = MappingProjection(matrix=map_h2_has.matrix.copy(), + map_h2_has_sys = MappingProjection(matrix=map_h2_has.matrix.base.copy(), name="map_h2_has_sys", sender=h2_sys, receiver=out_sig_has_sys) - map_h2_can_sys = MappingProjection(matrix=map_h2_can.matrix.copy(), + map_h2_can_sys = MappingProjection(matrix=map_h2_can.matrix.base.copy(), name="map_h2_can_sys", sender=h2_sys, receiver=out_sig_can_sys) @@ -1808,37 +1808,37 @@ def test_semantic_net_training_identicalness(self, eps, opt): # SET UP PROJECTIONS FOR SYSTEM - map_nouns_h1_sys = MappingProjection(matrix=map_nouns_h1.matrix.copy(), + map_nouns_h1_sys = MappingProjection(matrix=map_nouns_h1.matrix.base.copy(), name="map_nouns_h1_sys", sender=nouns_in_sys, receiver=h1_sys) - map_rels_h2_sys = MappingProjection(matrix=map_rels_h2.matrix.copy(), + map_rels_h2_sys = MappingProjection(matrix=map_rels_h2.matrix.base.copy(), name="map_relh2_sys", sender=rels_in_sys, receiver=h2_sys) - map_h1_h2_sys = MappingProjection(matrix=map_h1_h2.matrix.copy(), + map_h1_h2_sys = MappingProjection(matrix=map_h1_h2.matrix.base.copy(), name="map_h1_h2_sys", sender=h1_sys, receiver=h2_sys) - map_h2_I_sys = MappingProjection(matrix=map_h2_I.matrix.copy(), + map_h2_I_sys = MappingProjection(matrix=map_h2_I.matrix.base.copy(), name="map_h2_I_sys", sender=h2_sys, receiver=out_sig_I_sys) - map_h2_is_sys = MappingProjection(matrix=map_h2_is.matrix.copy(), + map_h2_is_sys = MappingProjection(matrix=map_h2_is.matrix.base.copy(), name="map_h2_is_sys", sender=h2_sys, receiver=out_sig_is_sys) - map_h2_has_sys = MappingProjection(matrix=map_h2_has.matrix.copy(), + map_h2_has_sys = MappingProjection(matrix=map_h2_has.matrix.base.copy(), name="map_h2_has_sys", sender=h2_sys, receiver=out_sig_has_sys) - map_h2_can_sys = MappingProjection(matrix=map_h2_can.matrix.copy(), + map_h2_can_sys = MappingProjection(matrix=map_h2_can.matrix.base.copy(), name="map_h2_can_sys", sender=h2_sys, receiver=out_sig_can_sys) @@ -2735,37 +2735,37 @@ def test_semantic_net_nested(self, eps, opt, mode): # SET UP PROJECTIONS FOR SYSTEM - map_nouns_h1_sys = MappingProjection(matrix=map_nouns_h1.matrix.copy(), + map_nouns_h1_sys = MappingProjection(matrix=map_nouns_h1.matrix.base.copy(), name="map_nouns_h1_sys", sender=nouns_in_sys, receiver=h1_sys) - map_rels_h2_sys = MappingProjection(matrix=map_rels_h2.matrix.copy(), + map_rels_h2_sys = MappingProjection(matrix=map_rels_h2.matrix.base.copy(), name="map_relh2_sys", sender=rels_in_sys, receiver=h2_sys) - map_h1_h2_sys = MappingProjection(matrix=map_h1_h2.matrix.copy(), + map_h1_h2_sys = MappingProjection(matrix=map_h1_h2.matrix.base.copy(), name="map_h1_h2_sys", sender=h1_sys, receiver=h2_sys) - map_h2_I_sys = MappingProjection(matrix=map_h2_I.matrix.copy(), + map_h2_I_sys = MappingProjection(matrix=map_h2_I.matrix.base.copy(), name="map_h2_I_sys", sender=h2_sys, receiver=out_sig_I_sys) - map_h2_is_sys = MappingProjection(matrix=map_h2_is.matrix.copy(), + map_h2_is_sys = MappingProjection(matrix=map_h2_is.matrix.base.copy(), name="map_h2_is_sys", sender=h2_sys, receiver=out_sig_is_sys) - map_h2_has_sys = MappingProjection(matrix=map_h2_has.matrix.copy(), + map_h2_has_sys = MappingProjection(matrix=map_h2_has.matrix.base.copy(), name="map_h2_has_sys", sender=h2_sys, receiver=out_sig_has_sys) - map_h2_can_sys = MappingProjection(matrix=map_h2_can.matrix.copy(), + map_h2_can_sys = MappingProjection(matrix=map_h2_can.matrix.base.copy(), name="map_h2_can_sys", sender=h2_sys, receiver=out_sig_can_sys) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 98f1516a2db..b67a65b42c3 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -362,7 +362,7 @@ def test_add_proj_weights_only(self): assert np.allclose(A.parameters.value.get(comp), [[1.1, 1.2, 1.3]]) assert np.allclose(B.get_input_values(comp), [[11.2, 14.8]]) assert np.allclose(B.parameters.value.get(comp), [[22.4, 29.6]]) - assert np.allclose(proj.matrix, weights) + assert np.allclose(proj.matrix.base, weights) def test_add_linear_processing_pathway_with_noderole_specified_in_tuple(self): comp = Composition() diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 0b2c4339409..9940c68cfe4 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -39,7 +39,7 @@ def test_add_node_with_control_specified_then_add_controller(self): comp.add_controller(ctl_mech) assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] - assert np.allclose(comp.controller.control[0].allocation_samples(), + assert np.allclose(comp.controller.control[0].allocation_samples.base(), [0.1, 0.4, 0.7000000000000001, 1.0000000000000002]) def test_add_controller_in_comp_constructor_then_add_node_with_control_specified(self): @@ -60,7 +60,7 @@ def test_add_controller_in_comp_constructor_then_add_node_with_control_specified comp.add_node(ddm) assert comp.controller.control[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert np.allclose(comp.controller.control[0].allocation_samples(), + assert np.allclose(comp.controller.control[0].allocation_samples.base(), [0.1, 0.4, 0.7000000000000001, 1.0000000000000002]) def test_redundant_control_spec_add_node_with_control_specified_then_controller_in_comp_constructor(self): @@ -77,7 +77,7 @@ def test_redundant_control_spec_add_node_with_control_specified_then_controller_ comp.add_controller(pnl.ControlMechanism(control_signals=("drift_rate", ddm))) assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert comp.controller.control_signals[0].allocation_samples is None + assert comp.controller.control_signals[0].allocation_samples.base is None def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node_with_control_specified(self): # First create Composition with controller that has HAS control specification, @@ -92,7 +92,7 @@ def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node comp.add_node(ddm) assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert comp.controller.control_signals[0].allocation_samples is None + assert comp.controller.control_signals[0].allocation_samples.base is None def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node_with_alloc_samples_specified(self): # First create Composition with controller that has HAS control specification, @@ -108,7 +108,7 @@ def test_redundant_control_spec_add_controller_in_comp_constructor_then_add_node comp.add_node(ddm) assert comp.controller.control_signals[0].efferents[0].receiver == ddm.parameter_ports['drift_rate'] assert ddm.parameter_ports['drift_rate'].mod_afferents[0].sender.owner == comp.controller - assert np.allclose(comp.controller.control[0].allocation_samples(), [0.2, 0.5, 0.8]) + assert np.allclose(comp.controller.control[0].allocation_samples.base(), [0.2, 0.5, 0.8]) def test_deferred_init(self): # Test to insure controller works the same regardless of whether it is added to a composition before or after @@ -569,7 +569,7 @@ def test_lvoc_features_function(self): assert len(lvoc.input_ports) == 5 for i in range(1,5): - assert lvoc.input_ports[i].function.offset == 10.0 + assert lvoc.input_ports[i].function.offset.base == 10.0 @pytest.mark.control @pytest.mark.composition diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 8d41222b52e..68917007362 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -1724,7 +1724,7 @@ def test_matrix_spec_and_learning_rate(self): learning_pathway = C.add_backpropagation_learning_pathway(pathway=[T1, W, T2]) target = learning_pathway.target inputs = {T1:[1,0], target:[1]} - C.learning_components[2].learning_rate = 0.5 + C.learning_components[2].learning_rate.base = 0.5 result = C.learn(inputs=inputs, num_trials=2) assert np.allclose(result, [[[0.52497919]], [[0.55439853]]]) diff --git a/tests/composition/test_models.py b/tests/composition/test_models.py index ccffd71d057..2fa220d87e4 100644 --- a/tests/composition/test_models.py +++ b/tests/composition/test_models.py @@ -256,7 +256,7 @@ def switch_integrator_mode(mechanisms, mode): def switch_noise(mechanisms, noise): for mechanism in mechanisms: - mechanism.noise = noise + mechanism.noise.base = noise def switch_to_initialization_trial(mechanisms): # Turn off accumulation diff --git a/tests/composition/test_runtime_params.py b/tests/composition/test_runtime_params.py index 56031412882..2e0cef09602 100644 --- a/tests/composition/test_runtime_params.py +++ b/tests/composition/test_runtime_params.py @@ -17,7 +17,7 @@ class TestMechanismRuntimeParams: def test_mechanism_runtime_param(self): T = TransferMechanism() - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].value == 0.0 # runtime param used for noise @@ -25,17 +25,17 @@ def test_mechanism_runtime_param(self): assert T.value == 12.0 # defalut values are restored - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].value == 0.0 T.execute(input=2.0) - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].value == 0.0 assert T.value == 2.0 def test_function_runtime_param(self): T = TransferMechanism() - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 # runtime param used for slope @@ -43,10 +43,10 @@ def test_function_runtime_param(self): assert T.value == 20.0 # defalut values are restored - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 T.execute(input=2.0) - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 assert T.value == 2.0 @@ -55,8 +55,8 @@ def test_use_and_reset_not_affect_other_assigned_vals(self): T = TransferMechanism() # Intercept attr assigned - T.function.intercept = 2.0 - assert T.function.intercept == 2.0 + T.function.intercept.base = 2.0 + assert T.function.intercept.base == 2.0 # runtime param used for slope T.execute(runtime_params={"slope": 10.0}, input=2.0) @@ -64,36 +64,36 @@ def test_use_and_reset_not_affect_other_assigned_vals(self): assert T.value == 22.0 # slope restored to default, but intercept retains assigned value - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 - assert T.function.intercept == 2.0 + assert T.function.intercept.base == 2.0 # previous runtime_param for slope not used again T.execute(input=2.0) assert T.value == 4.0 - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 def test_reset_to_previously_assigned_val(self): T = TransferMechanism() - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 # set slope directly - T.function.slope = 2.0 - assert T.function.slope == 2.0 + T.function.slope.base = 2.0 + assert T.function.slope.base == 2.0 # runtime param used for slope T.execute(runtime_params={"slope": 10.0}, input=2.0) assert T.value == 20.0 # slope restored to previously assigned value - assert T.function.slope == 2.0 + assert T.function.slope.base == 2.0 assert T.parameter_ports['slope'].value == 2.0 T.execute(input=2.0) assert T.value == 4.0 - assert T.function.slope == 2.0 + assert T.function.slope.base == 2.0 def test_runtime_param_error(self): T = TransferMechanism() @@ -119,7 +119,7 @@ def test_mechanism_param_no_condition(self): C = Composition() C.add_node(T) - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].value == 0.0 # runtime param used for noise @@ -127,12 +127,12 @@ def test_mechanism_param_no_condition(self): runtime_params={T: {"noise": 10.0}}) assert T.parameters.value.get(C.default_execution_id) == 12.0 # noise restored to default - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].parameters.value.get(C) == 0.0 # previous runtime_param for noise not used again C.run(inputs={T: 2.0}, ) - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].parameters.value.get(C) == 0.0 assert T.parameters.value.get(C.default_execution_id) == 2.0 @@ -142,20 +142,20 @@ def test_function_param_no_condition(self): C = Composition() C.add_node(T) - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 C.run(inputs={T: 2.0}, runtime_params={T: {"slope": 10.0}}) # runtime param used for slope assert T.parameters.value.get(C.default_execution_id) == 20.0 # slope restored to default - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].parameters.value.get(C) == 1.0 # previous runtime_param for slope not used again C.run(inputs={T: 2.0}) assert T.parameters.value.get(C.default_execution_id) == 2.0 - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].parameters.value.get(C) == 1.0 def test_input_port_param_no_condition(self): @@ -164,8 +164,8 @@ def test_input_port_param_no_condition(self): T2 = TransferMechanism() C = Composition(pathways=[T1,T2]) - T1.function.slope = 5 - T2.input_port.function.scale = 4 + T1.function.slope.base = 5 + T2.input_port.function.scale.base = 4 C.run(inputs={T1: 2.0}, runtime_params={ T1: {'slope': 3}, # Mechanism's function (Linear) parameter @@ -186,18 +186,18 @@ def test_input_port_param_no_condition(self): assert T1.parameter_ports['slope'].parameters.value.get(C) == 5.0 assert T2.parameters.noise.get(C) == 0.0 assert T2.parameter_ports['noise'].parameters.value.get(C) == 0.0 - assert T2.function.intercept == 0.0 + assert T2.function.intercept.base == 0.0 assert T2.function.parameters.intercept.get(C) == 0.0 - assert T2.input_port.weight is None - assert T2.input_port.function.scale == 4.0 + assert T2.input_port.weight.base is None + assert T2.input_port.function.scale.base == 4.0 assert T2.input_port.function.parameters.scale.get(C) == 4.0 - assert T2.input_port.function.weights is None + assert T2.input_port.function.weights.base is None assert T2.input_port.function.parameters.weights.get(C) is None C.run(inputs={T1: 2.0}, ) assert C.results == [[[1201.5]], # (2*3*20*10)+1+0.5 [[40.]]] # 2*5*4 - assert T1.function.slope == 5.0 + assert T1.function.slope.base == 5.0 assert T1.parameter_ports['slope'].parameters.value.get(C) == 5.0 assert T2.input_port.function.parameters.scale.get(C.default_execution_id) == 4.0 @@ -209,7 +209,7 @@ def test_mechanism_param_with_AtTrial_condition(self): C = Composition() C.add_node(T) - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].value == 0.0 # run with runtime param used for noise only on trial 1 @@ -218,7 +218,7 @@ def test_mechanism_param_with_AtTrial_condition(self): # scheduler=S, num_trials=4) # noise restored to default - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].parameters.value.get(C) == 0.0 # run again to insure restored default for noise after last run @@ -237,7 +237,7 @@ def test_mechanism_param_with_AfterTrial_condition(self): C = Composition() C.add_node(T) - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].value == 0.0 # run with runtime param used for noise after trial 1 (i.e., trials 2 and 3) @@ -245,7 +245,7 @@ def test_mechanism_param_with_AfterTrial_condition(self): runtime_params={T: {"noise": (10.0, AfterTrial(1))}}, num_trials=4) # noise restored to default - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].parameters.value.get(C) == 0.0 # run again to insure restored default for noise after last run C.run(inputs={T: 2.0}) @@ -268,7 +268,7 @@ def test_mechanism_param_with_combined_condition(self): runtime_params={T: {"noise": (10.0, Any(AtTrial(1), AfterTrial(2)))}}, num_trials=5) # noise restored to default - assert T.noise == 0.0 + assert T.noise.base == 0.0 assert T.parameter_ports['noise'].parameters.value.get(C) == 0.0 # run again to insure restored default for noise after last run @@ -288,7 +288,7 @@ def test_function_param_with_combined_condition(self): C = Composition() C.add_node(T) - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 # run with runtime param used for slope only on trial 1 and after 2 (i.e., 3 and 4) @@ -296,7 +296,7 @@ def test_function_param_with_combined_condition(self): runtime_params={T: {"slope": (10.0, Any(AtTrial(1), AfterTrial(2)))}}, num_trials=5) # slope restored to default - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 # run again to insure restored default for slope after last run @@ -316,7 +316,7 @@ def test_function_params_with_different_but_overlapping_conditions(self): C = Composition() C.add_node(T) - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 # run with runtime param used for slope only on trial 1 and after 2 (i.e., 3 and 4) @@ -325,9 +325,9 @@ def test_function_params_with_different_but_overlapping_conditions(self): "intercept": (1.0, AfterTrial(1))}}, num_trials=4) # slope restored to default - assert T.function.slope == 1.0 + assert T.function.slope.base == 1.0 assert T.parameter_ports['slope'].value == 1.0 - assert T.function.intercept == 0.0 + assert T.function.intercept.base == 0.0 assert T.parameter_ports['intercept'].value == 0.0 # run again to insure restored default for slope after last run @@ -346,8 +346,8 @@ def test_mechanism_params_with_combined_conditions_for_all_INPUT_PORT_PARAMS(sel T2 = TransferMechanism() C = Composition(pathways=[T1,T2]) - T1.function.slope = 5 - T2.input_port.function.scale = 4 + T1.function.slope.base = 5 + T2.input_port.function.scale.base = 4 C.run(inputs={T1: 2.0}, runtime_params={ T1: {'slope': (3, AtTrial(1))}, # Condition on Mechanism's function (Linear) parameter @@ -370,12 +370,12 @@ def test_mechanism_params_with_combined_conditions_for_all_INPUT_PORT_PARAMS(sel assert T1.parameter_ports['slope'].parameters.value.get(C) == 5.0 assert T2.parameters.noise.get(C) == 0.0 assert T2.parameter_ports['noise'].parameters.value.get(C) == 0.0 - assert T2.function.intercept == 0.0 + assert T2.function.intercept.base == 0.0 assert T2.function.parameters.intercept.get(C) == 0.0 - assert T2.input_port.weight is None - assert T2.input_port.function.scale == 4.0 + assert T2.input_port.weight.base is None + assert T2.input_port.function.scale.base == 4.0 assert T2.input_port.function.parameters.scale.get(C) == 4.0 - assert T2.input_port.function.weights is None + assert T2.input_port.function.weights.base is None assert T2.input_port.function.parameters.weights.get(C) is None # run again to insure restored default for noise after last run @@ -395,8 +395,8 @@ def test_mechanism_params_with_combined_conditions_for_individual_INPUT_PORT_PAR P = MappingProjection(sender=T1, receiver=T2, name='MY PROJECTION') C = Composition(pathways=[[T1,P,T2]]) - T1.function.slope = 5 - T2.input_port.function.scale = 4 + T1.function.slope.base = 5 + T2.input_port.function.scale.base = 4 # Run 0: Test INPUT_PORT_PARAMS for InputPort function directly (scale) and in FUNCTION_PARAMS dict (weights) C.run(inputs={T1: 2.0}, runtime_params={ @@ -464,12 +464,12 @@ def test_mechanism_params_with_combined_conditions_for_individual_INPUT_PORT_PAR assert T1.parameter_ports['slope'].parameters.value.get(C) == 5.0 assert T2.parameters.noise.get(C) == 0.0 assert T2.parameter_ports['noise'].parameters.value.get(C) == 0.0 - assert T2.function.intercept == 0.0 + assert T2.function.intercept.base == 0.0 assert T2.function.parameters.intercept.get(C) == 0.0 - assert T2.input_port.weight is None - assert T2.input_port.function.scale == 4.0 + assert T2.input_port.weight.base is None + assert T2.input_port.function.scale.base == 4.0 assert T2.input_port.function.parameters.scale.get(C) == 4.0 - assert T2.input_port.function.weights is None + assert T2.input_port.function.weights.base is None assert T2.input_port.function.parameters.weights.get(C) is None # Final Run: insure restored default for noise after last run @@ -502,11 +502,11 @@ def test_params_for_input_port_and_projection_variable_and_value(self): P2 = MappingProjection(sender=TARGET_INPUT, receiver=CM.input_ports[TARGET], name='TARGET PROJECTION') C = Composition(nodes=[SAMPLE_INPUT, TARGET_INPUT, CM], projections=[P1,P2]) - SAMPLE_INPUT.function.slope = 3 - CM.input_ports[SAMPLE].function.scale = 2 + SAMPLE_INPUT.function.slope.base = 3 + CM.input_ports[SAMPLE].function.scale.base = 2 - TARGET_INPUT.input_port.function.scale = 4 - CM.input_ports[TARGET].function.scale = 1.5 + TARGET_INPUT.input_port.function.scale.base = 4 + CM.input_ports[TARGET].function.scale.base = 1.5 C.run(inputs={SAMPLE_INPUT: 2.0, TARGET_INPUT: 5.0}, @@ -603,7 +603,7 @@ def test_params_for_output_port_variable_and_value(self): P2 = MappingProjection(sender=T1.output_ports['SECOND'], receiver=T2) C = Composition(nodes=[T1,T2], projections=[P1,P2]) - T1.output_ports['SECOND'].function.slope = 1.5 + T1.output_ports['SECOND'].function.slope.base = 1.5 # Run 0: Test of both OutputPort variables assigned C.run(inputs={T1: 10.0}, @@ -680,8 +680,8 @@ def test_composition_runtime_param_errors(self): P2 = MappingProjection(sender=T2, receiver=CM.input_ports[TARGET], name='TARGET PROJECTION') C = Composition(nodes=[T1,T2,CM], projections=[P1,P2]) - T1.function.slope = 3 - T2.input_port.function.scale = 4 + T1.function.slope.base = 3 + T2.input_port.function.scale.base = 4 # Bad param specified for Mechanism with pytest.raises(ComponentError) as error_text: diff --git a/tests/functions/test_accumulator_integrator.py b/tests/functions/test_accumulator_integrator.py index ab0b4208483..5f73d06bb69 100644 --- a/tests/functions/test_accumulator_integrator.py +++ b/tests/functions/test_accumulator_integrator.py @@ -185,9 +185,9 @@ def test_accumulator_as_function_of_matrix_param_of_mapping_projection(self): C = Composition() C.add_linear_processing_pathway([T1, M, T2]) C.run(inputs={T1: [1.0, 1.0, 1.0]}) - assert np.allclose(M.matrix, [[ 1., 0., 0.], [ 0., 1., 0.],[ 0., 0., 1.]]) + assert np.allclose(M.matrix.base, [[ 1., 0., 0.], [ 0., 1., 0.],[ 0., 0., 1.]]) M.parameter_ports[MATRIX].function.parameters.increment.set(2, C) C.run(inputs={T1: [1.0, 1.0, 1.0]}) - assert np.allclose(M.matrix, [[ 3., 2., 2.], [ 2., 3., 2.], [ 2., 2., 3.]]) + assert np.allclose(M.matrix.base, [[ 3., 2., 2.], [ 2., 3., 2.], [ 2., 2., 3.]]) C.run(inputs={T1: [1.0, 1.0, 1.0]}) - assert np.allclose(M.matrix, [[ 5., 4., 4.], [ 4., 5., 4.], [ 4., 4., 5.]]) + assert np.allclose(M.matrix.base, [[ 5., 4., 4.], [ 4., 5., 4.], [ 4., 4., 5.]]) diff --git a/tests/functions/test_distance.py b/tests/functions/test_distance.py index 38b26bdc972..fac3457bb97 100644 --- a/tests/functions/test_distance.py +++ b/tests/functions/test_distance.py @@ -7,7 +7,7 @@ SIZE=1000 # Some metrics (CROSS_ENTROPY) don't like 0s -test_var = [np.random.rand(SIZE) + Function.EPSILON, np.random.rand(SIZE) + Function.EPSILON] +test_var = np.random.rand(2, SIZE) + Function.EPSILON v1 = test_var[0] v2 = test_var[1] norm = len(test_var[0]) @@ -18,25 +18,25 @@ def correlation(v1,v2): return np.sum(v1_norm * v2_norm) / np.sqrt(np.sum(v1_norm**2) * np.sum(v2_norm**2)) test_data = [ - (test_var, kw.MAX_ABS_DIFF, False, None, np.max(abs(v1 - v2))), - (test_var, kw.MAX_ABS_DIFF, True, None, np.max(abs(v1 - v2))), - (test_var, kw.DIFFERENCE, False, None, np.sum(np.abs(v1 - v2))), - (test_var, kw.DIFFERENCE, True, None, np.sum(np.abs(v1 - v2)) / norm), - (test_var, kw.COSINE, False, None, 1 - np.abs(np.sum(v1 * v2) / ( + (kw.MAX_ABS_DIFF, False, None, np.max(abs(v1 - v2))), + (kw.MAX_ABS_DIFF, True, None, np.max(abs(v1 - v2))), + (kw.DIFFERENCE, False, None, np.sum(np.abs(v1 - v2))), + (kw.DIFFERENCE, True, None, np.sum(np.abs(v1 - v2)) / norm), + (kw.COSINE, False, None, 1 - np.abs(np.sum(v1 * v2) / ( np.sqrt(np.sum(v1**2)) * np.sqrt(np.sum(v2**2))) )), - (test_var, kw.NORMED_L0_SIMILARITY, False, None, 1 - np.sum(np.abs(v1 - v2) / 4)), - (test_var, kw.NORMED_L0_SIMILARITY, True, None, (1 - np.sum(np.abs(v1 - v2) / 4)) / norm), - (test_var, kw.EUCLIDEAN, False, None, np.linalg.norm(v1 - v2)), - (test_var, kw.EUCLIDEAN, True, None, np.linalg.norm(v1 - v2) / norm), - (test_var, kw.ANGLE, False, "Needs sci-py", 0), - (test_var, kw.ANGLE, True, "Needs sci-py", 0 / norm), - (test_var, kw.CORRELATION, False, None, 1 - np.abs(correlation(v1,v2))), - (test_var, kw.CORRELATION, True, None, 1 - np.abs(correlation(v1,v2))), - (test_var, kw.CROSS_ENTROPY, False, None, -np.sum(v1 * np.log(v2))), - (test_var, kw.CROSS_ENTROPY, True, None, -np.sum(v1 * np.log(v2)) / norm), - (test_var, kw.ENERGY, False, None, -np.sum(v1 * v2) / 2), - (test_var, kw.ENERGY, True, None, (-np.sum(v1 * v2) / 2) / norm**2), + (kw.NORMED_L0_SIMILARITY, False, None, 1 - np.sum(np.abs(v1 - v2) / 4)), + (kw.NORMED_L0_SIMILARITY, True, None, (1 - np.sum(np.abs(v1 - v2) / 4)) / norm), + (kw.EUCLIDEAN, False, None, np.linalg.norm(v1 - v2)), + (kw.EUCLIDEAN, True, None, np.linalg.norm(v1 - v2) / norm), + (kw.ANGLE, False, "Needs sci-py", 0), + (kw.ANGLE, True, "Needs sci-py", 0 / norm), + (kw.CORRELATION, False, None, 1 - np.abs(correlation(v1,v2))), + (kw.CORRELATION, True, None, 1 - np.abs(correlation(v1,v2))), + (kw.CROSS_ENTROPY, False, None, -np.sum(v1 * np.log(v2))), + (kw.CROSS_ENTROPY, True, None, -np.sum(v1 * np.log(v2)) / norm), + (kw.ENERGY, False, None, -np.sum(v1 * v2) / 2), + (kw.ENERGY, True, None, (-np.sum(v1 * v2) / 2) / norm**2), ] # use list, naming function produces ugly names @@ -65,7 +65,8 @@ def correlation(v1,v2): @pytest.mark.function @pytest.mark.distance_function @pytest.mark.benchmark -@pytest.mark.parametrize("variable, metric, normalize, fail, expected", test_data, ids=names) +@pytest.mark.parametrize("metric, normalize, fail, expected", test_data, ids=names) +@pytest.mark.parametrize("variable", [test_var, test_var.astype(np.float32), test_var.tolist()], ids=["np.float", "np.float32", "list"]) @pytest.mark.parametrize("mode", ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.param('PTX', marks=[pytest.mark.llvm, pytest.mark.cuda]) diff --git a/tests/functions/test_stability.py b/tests/functions/test_stability.py index bce867303f7..0407094c005 100644 --- a/tests/functions/test_stability.py +++ b/tests/functions/test_stability.py @@ -11,16 +11,16 @@ SIZE=10 # Some metrics (CROSS_ENTROPY) don't like 0s test_var = np.random.rand(SIZE) + Function.EPSILON -hollow_matrix= Function.get_matrix(kw.HOLLOW_MATRIX, SIZE, SIZE) +hollow_matrix = Function.get_matrix(kw.HOLLOW_MATRIX, SIZE, SIZE) v1 = test_var v2 = np.dot(hollow_matrix * hollow_matrix, v1) norm = len(v1) test_data = [ - (test_var, kw.ENTROPY, False, -np.sum(v1 * np.log(v2))), - (test_var, kw.ENTROPY, True, -np.sum(v1 * np.log(v2)) / norm), - (test_var, kw.ENERGY, False, -np.sum(v1 * v2) / 2), - (test_var, kw.ENERGY, True, (-np.sum(v1 * v2) / 2) / norm**2), + (kw.ENTROPY, False, -np.sum(v1 * np.log(v2))), + (kw.ENTROPY, True, -np.sum(v1 * np.log(v2)) / norm), + (kw.ENERGY, False, -np.sum(v1 * v2) / 2), + (kw.ENERGY, True, (-np.sum(v1 * v2) / 2) / norm**2), ] # use list, naming function produces ugly names @@ -34,7 +34,8 @@ @pytest.mark.function @pytest.mark.stability_function @pytest.mark.benchmark -@pytest.mark.parametrize("variable, metric, normalize, expected", test_data, ids=names) +@pytest.mark.parametrize("metric, normalize, expected", test_data, ids=names) +@pytest.mark.parametrize("variable", [test_var, test_var.astype(np.float32)], ids=["float", "float32"] ) @pytest.mark.parametrize('mode', ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), pytest.param('PTX', marks=[pytest.mark.llvm, pytest.mark.cuda])]) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index f64d9ec878d..665df1d33c5 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -21,8 +21,7 @@ def test_helper_fclamp(mode): with pnlvm.LLVMBuilderContext() as ctx: - local_vec = copy.deepcopy(VECTOR) - double_ptr_ty = ctx.float_ty.as_pointer() + double_ptr_ty = ir.DoubleType().as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, ctx.int32_ty, double_ptr_ty)) @@ -48,8 +47,9 @@ def test_helper_fclamp(mode): ref = np.clip(VECTOR, TST_MIN, TST_MAX) bounds = np.asfarray([TST_MIN, TST_MAX]) bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + local_vec = copy.deepcopy(VECTOR) if mode == 'CPU': - ct_ty = pnlvm._convert_llvm_ir_to_ctype(double_ptr_ty) + ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) ct_vec = local_vec.ctypes.data_as(ct_ty) ct_bounds = bounds.ctypes.data_as(ct_ty) @@ -66,8 +66,7 @@ def test_helper_fclamp(mode): def test_helper_fclamp_const(mode): with pnlvm.LLVMBuilderContext() as ctx: - local_vec = copy.deepcopy(VECTOR) - double_ptr_ty = ctx.float_ty.as_pointer() + double_ptr_ty = ir.DoubleType().as_pointer() func_ty = ir.FunctionType(ir.VoidType(), (double_ptr_ty, ctx.int32_ty)) # Create clamp function @@ -86,10 +85,11 @@ def test_helper_fclamp_const(mode): builder.ret_void() + local_vec = copy.deepcopy(VECTOR) ref = np.clip(VECTOR, TST_MIN, TST_MAX) bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': - ct_ty = pnlvm._convert_llvm_ir_to_ctype(double_ptr_ty) + ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) ct_vec = local_vec.ctypes.data_as(ct_ty) bin_f(ct_vec, DIM_X) @@ -105,7 +105,7 @@ def test_helper_fclamp_const(mode): def test_helper_is_close(mode): with pnlvm.LLVMBuilderContext() as ctx: - double_ptr_ty = ctx.float_ty.as_pointer() + double_ptr_ty = ir.DoubleType().as_pointer() func_ty = ir.FunctionType(ir.VoidType(), [double_ptr_ty, double_ptr_ty, double_ptr_ty, ctx.int32_ty]) @@ -124,7 +124,9 @@ def test_helper_is_close(mode): val2 = b1.load(val2_ptr) close = pnlvm.helpers.is_close(b1, val1, val2) out_ptr = b1.gep(out, [index]) - out_val = b1.select(close, ctx.float_ty(1), ctx.float_ty(0)) + out_val = b1.select(close, val1.type(1), val1.type(0)) + res = b1.select(close, out_ptr.type.pointee(1), + out_ptr.type.pointee(0)) b1.store(out_val, out_ptr) builder.ret_void() @@ -139,7 +141,7 @@ def test_helper_is_close(mode): ref = np.isclose(vec1, vec2) bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': - ct_ty = pnlvm._convert_llvm_ir_to_ctype(double_ptr_ty) + ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) ct_vec1 = vec1.ctypes.data_as(ct_ty) ct_vec2 = vec2.ctypes.data_as(ct_ty) ct_res = res.ctypes.data_as(ct_ty) @@ -157,7 +159,7 @@ def test_helper_is_close(mode): def test_helper_all_close(mode): with pnlvm.LLVMBuilderContext() as ctx: - arr_ptr_ty = ir.ArrayType(ctx.float_ty, DIM_X).as_pointer() + arr_ptr_ty = ir.ArrayType(ir.DoubleType(), DIM_X).as_pointer() func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty, ir.IntType(32).as_pointer()]) @@ -178,7 +180,7 @@ def test_helper_all_close(mode): ref = np.allclose(vec1, vec2) bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': - ct_ty = pnlvm._convert_llvm_ir_to_ctype(arr_ptr_ty) + ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) ct_vec1 = vec1.ctypes.data_as(ct_ty) ct_vec2 = vec2.ctypes.data_as(ct_ty) res = ctypes.c_int32() @@ -580,7 +582,7 @@ def test_helper_elementwise_op(mode, var, expected): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': - ct_vec = var.ctypes.data_as(ctypes.POINTER(bin_f.byref_arg_types[0])) + ct_vec = np.ctypeslib.as_ctypes(var) res = bin_f.byref_arg_types[1]() bin_f(ct_vec, ctypes.byref(res)) else: diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index aa31f6c992a..a04a0cae542 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -51,8 +51,8 @@ def report_trial(composition): gain_created_by_LC_output_port_1.append(LC.output_ports[0].parameters.value.get(context)) mod_gain_assigned_to_A.append([A.get_mod_gain(composition)]) mod_gain_assigned_to_B.append([B.get_mod_gain(composition)]) - base_gain_assigned_to_A.append(A.function.gain) - base_gain_assigned_to_B.append(B.function.gain) + base_gain_assigned_to_A.append(A.function.gain.base) + base_gain_assigned_to_B.append(B.function.gain.base) C._analyze_graph() benchmark(C.run, inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]}, diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 8d8e50eb9c6..6887c8ea603 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -89,10 +89,10 @@ def test_threshold_param(self): D = DDM(name='DDM', function=DriftDiffusionIntegrator(threshold=10.0)) - assert D.function.threshold == 10.0 + assert D.function.threshold.base == 10.0 - D.function.threshold = 5.0 - assert D.function.threshold == 5.0 + D.function.threshold.base = 5.0 + assert D.function.threshold.base == 5.0 def test_threshold_sets_is_finished(self): D = DDM(name='DDM', diff --git a/tests/mechanisms/test_drift_diffusion_analytical.py b/tests/mechanisms/test_drift_diffusion_analytical.py index bf31baa512f..e60615ef8a6 100644 --- a/tests/mechanisms/test_drift_diffusion_analytical.py +++ b/tests/mechanisms/test_drift_diffusion_analytical.py @@ -28,11 +28,11 @@ def check_drift_diffusion_analytical(B, data, degenerate_cases=False): r_stim, r_drift_rate, r_threshold, r_starting_point, r_bias, r_t0, r_noise = data[i, 0:7].tolist() ground_truth = data[i,7:] - B.function.drift_rate = r_drift_rate - B.function.threshold = r_threshold - B.function.starting_point = r_starting_point - B.function.t0 = r_t0 - B.function.noise = r_noise + B.function.drift_rate.base = r_drift_rate + B.function.threshold.base = r_threshold + B.function.starting_point.base = r_starting_point + B.function.t0.base = r_t0 + B.function.noise.base = r_noise results_b = B.execute(r_stim) diff --git a/tests/mechanisms/test_input_state_spec.py b/tests/mechanisms/test_input_state_spec.py index 2a440ed71cf..fe7e69634ec 100644 --- a/tests/mechanisms/test_input_state_spec.py +++ b/tests/mechanisms/test_input_state_spec.py @@ -745,21 +745,21 @@ def test_lists_of_mechanisms_and_output_ports(self): assert len(T2.input_ports[0].path_afferents)==2 assert T2.input_ports[0].path_afferents[0].sender.owner.name=='T0' assert T2.input_ports[0].path_afferents[1].sender.owner.name=='T1' - assert T2.input_ports[0].path_afferents[1].matrix.shape == (2,1) + assert T2.input_ports[0].path_afferents[1].matrix.base.shape == (2,1) # Test list of Mechanisms in 4-item tuple specification T3 = TransferMechanism(name='T3', input_ports=[([T0, T1], None, None, InputPort)]) assert len(T3.input_ports[0].path_afferents)==2 assert T3.input_ports[0].path_afferents[0].sender.owner.name=='T0' assert T3.input_ports[0].path_afferents[1].sender.owner.name=='T1' - assert T3.input_ports[0].path_afferents[1].matrix.shape == (2,1) + assert T3.input_ports[0].path_afferents[1].matrix.base.shape == (2,1) # Test "bare" list of OutputPorts T4= TransferMechanism(name='T4', input_ports=[[T0.output_ports[0], T1.output_ports[1]]]) assert len(T4.input_ports[0].path_afferents)==2 assert T4.input_ports[0].path_afferents[0].sender.owner.name=='T0' assert T4.input_ports[0].path_afferents[1].sender.owner.name=='T1' - assert T4.input_ports[0].path_afferents[1].matrix.shape == (3,1) + assert T4.input_ports[0].path_afferents[1].matrix.base.shape == (3,1) # Test list of OutputPorts in 4-item tuple specification T5 = TransferMechanism(name='T5', input_ports=[([T0.output_ports[0], T1.output_ports[1]], @@ -768,7 +768,7 @@ def test_lists_of_mechanisms_and_output_ports(self): assert len(T5.input_ports[0].path_afferents)==2 assert T5.input_ports[0].path_afferents[0].sender.owner.name=='T0' assert T5.input_ports[0].path_afferents[1].sender.owner.name=='T1' - assert T5.input_ports[0].path_afferents[1].matrix.shape == (3,1) + assert T5.input_ports[0].path_afferents[1].matrix.base.shape == (3,1) # ------------------------------------------------------------------------------------------------ # TEST 35 diff --git a/tests/mechanisms/test_kwta.py b/tests/mechanisms/test_kwta.py index 2df4ba06043..3aaa34a2710 100644 --- a/tests/mechanisms/test_kwta.py +++ b/tests/mechanisms/test_kwta.py @@ -18,7 +18,7 @@ def test_kwta_empty_spec(self): np.testing.assert_allclose(K.value, K.defaults.value) assert(K.defaults.variable == [[0]]) assert(K.size == [1]) - assert(K.matrix == [[5]]) + assert(K.matrix.base == [[5]]) def test_kwta_check_attrs(self): K = KWTAMechanism( @@ -28,7 +28,7 @@ def test_kwta_check_attrs(self): np.testing.assert_allclose(K.value, K.defaults.value) assert(np.allclose(K.defaults.variable, [[0., 0., 0.]])) assert(K.size == [3]) - assert(np.allclose(K.matrix, [[5, 0, 0], [0, 5, 0], [0, 0, 5]])) + assert(np.allclose(K.matrix.base, [[5, 0, 0], [0, 5, 0], [0, 0, 5]])) assert(K.recurrent_projection.sender is K.output_port) assert(K.recurrent_projection.receiver is K.input_port) @@ -182,7 +182,7 @@ def test_kwta_matrix_auto_hetero_spec(self): auto=3, hetero=2 ) - assert(np.allclose(K.recurrent_projection.matrix, [[3, 2, 2, 2], [2, 3, 2, 2], [2, 2, 3, 2], [2, 2, 2, 3]])) + assert(np.allclose(K.recurrent_projection.matrix.base, [[3, 2, 2, 2], [2, 3, 2, 2], [2, 2, 3, 2], [2, 2, 2, 3]])) def test_kwta_matrix_hetero_spec(self): K = KWTAMechanism( @@ -190,7 +190,7 @@ def test_kwta_matrix_hetero_spec(self): size=3, hetero=-.5, ) - assert(np.allclose(K.recurrent_projection.matrix, [[5, -.5, -.5], [-.5, 5, -.5], [-.5, -.5, 5]])) + assert(np.allclose(K.recurrent_projection.matrix.base, [[5, -.5, -.5], [-.5, 5, -.5], [-.5, -.5, 5]])) def test_kwta_matrix_auto_spec(self): K = KWTAMechanism( @@ -198,7 +198,7 @@ def test_kwta_matrix_auto_spec(self): size=3, auto=-.5, ) - assert(np.allclose(K.recurrent_projection.matrix, [[-.5, 0, 0], [0, -.5, 0], [0, 0, -.5]])) + assert(np.allclose(K.recurrent_projection.matrix.base, [[-.5, 0, 0], [0, -.5, 0], [0, 0, -.5]])) class TestKWTARatio: @@ -297,7 +297,7 @@ def test_kwta_k_value_empty_size_4(self): name='K', size=4 ) - assert K.k_value == 0.5 + assert K.k_value.base == 0.5 c = Composition(pathways=[K], prefs=TestKWTARatio.simple_prefs) @@ -311,7 +311,7 @@ def test_kwta_k_value_empty_size_6(self): name='K', size=6 ) - assert K.k_value == 0.5 + assert K.k_value.base == 0.5 c = Composition(pathways=[K], prefs=TestKWTARatio.simple_prefs) @@ -327,7 +327,7 @@ def test_kwta_k_value_int_size_5(self): size=5, k_value=3 ) - assert K.k_value == 3 + assert K.k_value.base == 3 # This is a deprecated test used when the int_k optimization was being used. It's no longer useful since int_k is # dynamically calculated as of 8/9/17 -CW @@ -339,7 +339,7 @@ def test_kwta_k_value_int_size_5(self): # size=size_val, # k_value=0.4 # ) - # assert K.k_value == 0.4 + # assert K.k_value.base == 0.4 # assert K.int_k == expected_int_k # p = Process(pathway=[K], prefs=TestKWTARatio.simple_prefs) # s = System(processes=[p], prefs=TestKWTARatio.simple_prefs) @@ -391,7 +391,7 @@ def test_kwta_threshold_empty(self): name='K', size=4 ) - assert K.threshold == 0 + assert K.threshold.base == 0 def test_kwta_threshold_int(self): K = KWTAMechanism( diff --git a/tests/mechanisms/test_lca.py b/tests/mechanisms/test_lca.py index e5de17c3acc..64dfaecedc4 100644 --- a/tests/mechanisms/test_lca.py +++ b/tests/mechanisms/test_lca.py @@ -158,9 +158,9 @@ def test_equivalance_of_threshold_and_when_finished_condition(self): def test_LCAMechanism_matrix(self): matrix = [[0,-2],[-2,0]] lca1 = LCAMechanism(size=2, leak=0.5, competition=2) - assert np.allclose(lca1.matrix, matrix) + assert np.allclose(lca1.matrix.base, matrix) lca2 = LCAMechanism(size=2, leak=0.5, matrix=matrix) - assert np.allclose(lca1.matrix, lca2.matrix) + assert np.allclose(lca1.matrix.base, lca2.matrix.base) # Note: In the following tests, since the LCAMechanism's threshold is specified # it executes until the it reaches threshold. diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index fbf04912747..a4ab38020b1 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import FunctionError from psyneulink.core.components.functions.learningfunctions import Hebbian, Reinforcement, TDLearning from psyneulink.core.components.functions.objectivefunctions import Stability, Distance @@ -18,6 +19,42 @@ class TestProcessingMechanismFunctions: + @pytest.mark.benchmark(group="ProcessingMechanism[DefaultFunction]") + @pytest.mark.parametrize("variable", [[1, 2, 3, 4], + [1., 2., 3., 4.], + np.asarray([1., 2., 3., 4.], dtype=np.int8), + np.asarray([1., 2., 3., 4.], dtype=np.int16), + np.asarray([1., 2., 3., 4.], dtype=np.int32), + np.asarray([1., 2., 3., 4.], dtype=np.int64), + np.asarray([1., 2., 3., 4.], dtype=np.float32), + np.asarray([1., 2., 3., 4.], dtype=np.float64), + [[1, 2, 3, 4]], + [[1., 2., 3., 4.]], + np.asarray([[1., 2., 3., 4.]], dtype=np.int8), + np.asarray([[1., 2., 3., 4.]], dtype=np.int16), + np.asarray([[1., 2., 3., 4.]], dtype=np.int32), + np.asarray([[1., 2., 3., 4.]], dtype=np.int64), + np.asarray([[1., 2., 3., 4.]], dtype=np.float32), + np.asarray([[1., 2., 3., 4.]], dtype=np.float64), + ], + ids=["list.int", "list.float", "np.1d.i8", "np.1d.i16", "np.1d.i32", "np.1d.i64", "np.1d.f32", "np.1d.f64", + "list2d.int", "list2d.float", "np.2d.i8", "np.2d.i16", "np.2d.i32", "np.2d.i64", "np.2d.f32", "np.2d.f64", + ]) + @pytest.mark.parametrize("mode", ["Python", + pytest.param("LLVM", marks=[pytest.mark.llvm]), + pytest.param("PTX", marks=[pytest.mark.llvm, pytest.mark.cuda]), + ]) + def test_processing_mechanism_default_function(self, mode, variable, benchmark): + PM = ProcessingMechanism(default_variable=[0, 0, 0, 0]) + if mode == "Python": + ex = PM.execute + elif mode == "LLVM": + ex = pnlvm.MechExecution(PM).execute + elif mode == "PTX": + ex = pnlvm.MechExecution(PM).cuda_execute + res = benchmark(ex, variable) + assert np.allclose(res, [[1., 2., 3., 4.]]) + def test_processing_mechanism_linear_function(self): PM1 = ProcessingMechanism() diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 6291d2a60f8..f7a4521b0e9 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -73,7 +73,7 @@ def test_recurrent_mech_empty_spec(self): R = RecurrentTransferMechanism(auto=1.0) np.testing.assert_allclose(R.value, R.defaults.value) np.testing.assert_allclose(R.defaults.variable, [[0]]) - np.testing.assert_allclose(R.matrix, [[1]]) + np.testing.assert_allclose(R.matrix.base, [[1]]) def test_recurrent_mech_check_attrs(self): R = RecurrentTransferMechanism( @@ -81,19 +81,19 @@ def test_recurrent_mech_check_attrs(self): size=3, auto=1.0 ) - print("matrix = ", R.matrix) + print("matrix = ", R.matrix.base) print("auto = ", R.auto) print("hetero = ", R.hetero) # np.testing.assert_allclose(R.value, R.defaults.value) # np.testing.assert_allclose(R.defaults.variable, [[0., 0., 0.]]) - # np.testing.assert_allclose(R.matrix, [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) + # np.testing.assert_allclose(R.matrix.base, [[1., 1., 1.], [1., 1., 1.], [1., 1., 1.]]) def test_recurrent_mech_check_proj_attrs(self): R = RecurrentTransferMechanism( name='R', size=3 ) - np.testing.assert_allclose(R.recurrent_projection.matrix, R.matrix) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base) assert R.recurrent_projection.sender is R.output_port assert R.recurrent_projection.receiver is R.input_port @@ -274,7 +274,7 @@ def test_recurrent_mech_matrix_keyword_spec(self, matrix): ) val = R.execute([10, 10, 10, 10]) np.testing.assert_allclose(val, [[10., 10., 10., 10.]]) - np.testing.assert_allclose(R.recurrent_projection.matrix, get_matrix(matrix, R.size[0], R.size[0])) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.size[0], R.size[0])) @pytest.mark.parametrize("matrix", [np.matrix('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4']) def test_recurrent_mech_matrix_other_spec(self, matrix): @@ -287,10 +287,10 @@ def test_recurrent_mech_matrix_other_spec(self, matrix): val = R.execute([10, 10]) # np.testing.assert_allclose(val, [[10., 10.]]) - # assert isinstance(R.matrix, np.ndarray) - # np.testing.assert_allclose(R.matrix, [[1, 2], [3, 4]]) - # np.testing.assert_allclose(R.recurrent_projection.matrix, [[1, 2], [3, 4]]) - # assert isinstance(R.recurrent_projection.matrix, np.ndarray) + # assert isinstance(R.matrix.base, np.ndarray) + # np.testing.assert_allclose(R.matrix.base, [[1, 2], [3, 4]]) + # np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[1, 2], [3, 4]]) + # assert isinstance(R.recurrent_projection.matrix.base, np.ndarray) def test_recurrent_mech_matrix_auto_spec(self): R = RecurrentTransferMechanism( @@ -298,8 +298,8 @@ def test_recurrent_mech_matrix_auto_spec(self): size=3, auto=2 ) - assert isinstance(R.matrix, np.ndarray) - np.testing.assert_allclose(R.matrix, [[2, 1, 1], [1, 2, 1], [1, 1, 2]]) + assert isinstance(R.matrix.base, np.ndarray) + np.testing.assert_allclose(R.matrix.base, [[2, 1, 1], [1, 2, 1], [1, 1, 2]]) np.testing.assert_allclose(run_twice_in_composition(R, [1, 2, 3], [10, 11, 12]), [17, 19, 21]) def test_recurrent_mech_matrix_hetero_spec(self): @@ -312,8 +312,8 @@ def test_recurrent_mech_matrix_hetero_spec(self): # the behavior of execute() changes, feel free to change these numbers val = R.execute([-1, -2, -3]) np.testing.assert_allclose(val, [[-1, -2, -3]]) - assert isinstance(R.matrix, np.ndarray) - np.testing.assert_allclose(R.matrix, [[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]) + assert isinstance(R.matrix.base, np.ndarray) + np.testing.assert_allclose(R.matrix.base, [[0, -1, -1], [-1, 0, -1], [-1, -1, 0]]) # Execution 1: # Recurrent input = [5, 4, 3] | New input = [1, 2, 3] | Total input = [6, 6, 6] # Output 1 = [6, 6, 6] @@ -331,8 +331,8 @@ def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self): ) val = R.execute([10]) np.testing.assert_allclose(val, [[10.]]) - assert isinstance(R.matrix, np.ndarray) - np.testing.assert_allclose(R.matrix, [[-2]]) + assert isinstance(R.matrix.base, np.ndarray) + np.testing.assert_allclose(R.matrix.base, [[-2]]) def test_recurrent_mech_matrix_auto_hetero_spec_size_4(self): R = RecurrentTransferMechanism( @@ -343,8 +343,8 @@ def test_recurrent_mech_matrix_auto_hetero_spec_size_4(self): ) val = R.execute([10, 10, 10, 10]) np.testing.assert_allclose(val, [[10., 10., 10., 10.]]) - np.testing.assert_allclose(R.matrix, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]]) - assert isinstance(R.matrix, np.ndarray) + np.testing.assert_allclose(R.matrix.base, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]]) + assert isinstance(R.matrix.base, np.ndarray) def test_recurrent_mech_matrix_auto_hetero_matrix_spec(self): # when auto, hetero, and matrix are all specified, auto and hetero should take precedence @@ -357,8 +357,8 @@ def test_recurrent_mech_matrix_auto_hetero_matrix_spec(self): ) val = R.execute([10, 10, 10, 10]) np.testing.assert_allclose(val, [[10., 10., 10., 10.]]) - np.testing.assert_allclose(R.matrix, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]]) - assert isinstance(R.matrix, np.ndarray) + np.testing.assert_allclose(R.matrix.base, [[2.2, -3, -3, -3], [-3, 2.2, -3, -3], [-3, -3, 2.2, -3], [-3, -3, -3, 2.2]]) + assert isinstance(R.matrix.base, np.ndarray) def test_recurrent_mech_auto_matrix_spec(self): # auto should override the diagonal only @@ -370,7 +370,7 @@ def test_recurrent_mech_auto_matrix_spec(self): ) val = R.execute([10, 11, 12, 13]) np.testing.assert_allclose(val, [[10., 11., 12., 13.]]) - np.testing.assert_allclose(R.matrix, [[2.2, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 2.2, 4], [1, 2, 3, 2.2]]) + np.testing.assert_allclose(R.matrix.base, [[2.2, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 2.2, 4], [1, 2, 3, 2.2]]) def test_recurrent_mech_auto_array_matrix_spec(self): R = RecurrentTransferMechanism( @@ -381,7 +381,7 @@ def test_recurrent_mech_auto_array_matrix_spec(self): ) val = R.execute([10, 11, 12, 13]) np.testing.assert_allclose(val, [[10., 11., 12., 13.]]) - np.testing.assert_allclose(R.matrix, [[1.1, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 3.3, 4], [1, 2, 3, 4.4]]) + np.testing.assert_allclose(R.matrix.base, [[1.1, 2, 3, 4], [1, 2.2, 3, 4], [1, 2, 3.3, 4], [1, 2, 3, 4.4]]) def test_recurrent_mech_hetero_float_matrix_spec(self): # hetero should override off-diagonal only @@ -394,7 +394,7 @@ def test_recurrent_mech_hetero_float_matrix_spec(self): val = R.execute([1, 2, 3, 4]) np.testing.assert_allclose(val, [[1., 2., 3., 4.]]) np.testing.assert_allclose( - R.matrix, + R.matrix.base, [[1, -2.2, -2.2, -2.2], [-2.2, 2, -2.2, -2.2], [-2.2, -2.2, 3, -2.2], [-2.2, -2.2, -2.2, 4]] ) @@ -408,7 +408,7 @@ def test_recurrent_mech_hetero_matrix_matrix_spec(self): val = R.execute([1, 2, 3, 4]) np.testing.assert_allclose(val, [[1., 2., 3., 4.]]) np.testing.assert_allclose( - R.matrix, + R.matrix.base, [[1, -3, -2, -1], [-4, 2, -2, -1], [-4, -3, 3, -1], [-4, -3, -2, 4]] ) @@ -424,7 +424,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v1(self): val = R.execute([1, 2, 3, 4]) np.testing.assert_allclose(val, [[1., 2., 3., 4.]]) np.testing.assert_allclose( - R.matrix, + R.matrix.base, [[1, -3, -2, -1], [-4, 3, -2, -1], [-4, -3, 5, -1], [-4, -3, -2, 7]] ) @@ -439,7 +439,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v2(self): val = R.execute([1, 2, 3, 4]) np.testing.assert_allclose(val, [[1., 2., 3., 4.]]) np.testing.assert_allclose( - R.matrix, + R.matrix.base, [[3, -3, -2, -1], [-4, 3, -2, -1], [-4, -3, 3, -1], [-4, -3, -2, 3]] ) @@ -454,7 +454,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v3(self): val = R.execute([1, 2, 3, 4]) np.testing.assert_allclose(val, [[1., 2., 3., 4.]]) np.testing.assert_allclose( - R.matrix, + R.matrix.base, [[3, 2, 2, 2], [2, 3, 2, 2], [2, 2, 3, 2], [2, 2, 2, 3]] ) @@ -689,14 +689,14 @@ def test_transfer_mech_process_matrix_change(self): function=Linear) c = Composition(pathways=[[T1, proj, T2]]) c.run(inputs={T1: [[1, 2, 3, 4]]}) - proj.matrix = [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]] - assert np.allclose(proj.matrix, [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]) + proj.matrix.base = [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]] + assert np.allclose(proj.matrix.base, [[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]]) # c.run(inputs={T1: [[1, 2, 3, 4]]}) T1.execute([[1, 2, 3, 4]]) proj.execute() # removed this assert, because before the changes of most_recent_execution_id -> most_recent_context - # proj.matrix referred to the 'Process-0' execution_id, even though it was last executed with None - # assert np.allclose(proj.matrix, np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])) + # proj.matrix.base referred to the 'Process-0' execution_id, even though it was last executed with None + # assert np.allclose(proj.matrix.base, np.array([[2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2]])) def test_recurrent_mech_process_matrix_change(self): R = RecurrentTransferMechanism( @@ -713,7 +713,7 @@ def test_recurrent_mech_process_matrix_change(self): np.testing.assert_allclose(T.parameters.value.get(c), [[1, 2, 3, 4]]) np.testing.assert_allclose(R.parameters.value.get(c), [[1, 2, 3, 4]]) c.run(inputs={T: [[1, 3, 2, 5]]}) - np.testing.assert_allclose(R.recurrent_projection.matrix, [[2, 0, 1, 3]] * 4) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[2, 0, 1, 3]] * 4) np.testing.assert_allclose(T.parameters.value.get(c), [[1, 3, 2, 5]]) np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]]) @@ -732,7 +732,7 @@ def test_recurrent_mech_process_proj_matrix_change(self): np.testing.assert_allclose(T.parameters.value.get(c), [[1, 2, 3, 4]]) np.testing.assert_allclose(R.parameters.value.get(c), [[1, 2, 3, 4]]) c.run(inputs={T: [[1, 3, 2, 5]]}) - np.testing.assert_allclose(R.recurrent_projection.matrix, [[2, 0, 1, 3]] * 4) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, [[2, 0, 1, 3]] * 4) np.testing.assert_allclose(T.parameters.value.get(c), [[1, 3, 2, 5]]) np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]]) @@ -863,8 +863,8 @@ def test_recurrent_mech_with_learning(self): [0.1, 0.1, 0.1, 0.1] ] ) - np.testing.assert_allclose(R.recurrent_projection.matrix, R.matrix) - np.testing.assert_allclose(R.input_port.path_afferents[0].matrix, R.matrix) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base) + np.testing.assert_allclose(R.input_port.path_afferents[0].matrix.base, R.matrix.base) # Test that activity is properly computed prior to learning # p = Process(pathway=[R]) @@ -907,9 +907,9 @@ def test_recurrent_mech_change_learning_rate(self): ) c = Composition(pathways=[R]) - assert R.learning_rate == 0.1 - assert R.learning_mechanism.learning_rate == 0.1 - # assert R.learning_mechanism.function.learning_rate == 0.1 + assert R.learning_rate.base == 0.1 + assert R.learning_mechanism.learning_rate.base == 0.1 + # assert R.learning_mechanism.function.learning_rate.base == 0.1 c.learn(inputs={R:[[1.0, 1.0, 1.0, 1.0]]}) matrix_1 = [[0., 1.1, 1.1, 1.1], [1.1, 0., 1.1, 1.1], @@ -917,11 +917,11 @@ def test_recurrent_mech_change_learning_rate(self): [1.1, 1.1, 1.1, 0.]] assert np.allclose(R.recurrent_projection.mod_matrix, matrix_1) print(R.recurrent_projection.mod_matrix) - R.learning_rate = 0.9 + R.learning_rate.base = 0.9 - assert R.learning_rate == 0.9 - assert R.learning_mechanism.learning_rate == 0.9 - # assert R.learning_mechanism.function.learning_rate == 0.9 + assert R.learning_rate.base == 0.9 + assert R.learning_mechanism.learning_rate.base == 0.9 + # assert R.learning_mechanism.function.learning_rate.base == 0.9 c.learn(inputs={R:[[1.0, 1.0, 1.0, 1.0]]}) matrix_2 = [[0., 1.911125, 1.911125, 1.911125], [1.911125, 0., 1.911125, 1.911125], @@ -1214,7 +1214,7 @@ def test_defaults(self): print("R.parameters.hetero.get() = ", R.parameters.hetero.get()) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get() = ", R.parameters.matrix.get()) comp = pnl.Composition() @@ -1234,7 +1234,7 @@ def test_defaults(self): print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid)) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid)) def test_auto(self): @@ -1253,7 +1253,7 @@ def test_auto(self): print("R.parameters.hetero.get() = ", R.parameters.hetero.get()) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get() = ", R.parameters.matrix.get()) comp = pnl.Composition() @@ -1273,7 +1273,7 @@ def test_auto(self): print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid)) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid)) def test_hetero(self): @@ -1291,7 +1291,7 @@ def test_hetero(self): print("R.parameters.hetero.get() = ", R.parameters.hetero.get()) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get() = ", R.parameters.matrix.get()) comp = pnl.Composition() @@ -1313,7 +1313,7 @@ def test_hetero(self): print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid)) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid)) def test_auto_and_hetero(self): @@ -1335,7 +1335,7 @@ def test_auto_and_hetero(self): print("R.parameters.hetero.get() = ", R.parameters.hetero.get()) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get() = ", R.parameters.matrix.get()) comp = pnl.Composition() @@ -1355,7 +1355,7 @@ def test_auto_and_hetero(self): print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid)) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid)) def test_matrix(self): @@ -1376,7 +1376,7 @@ def test_matrix(self): print("R.parameters.hetero.get() = ", R.parameters.hetero.get()) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get() = ", R.parameters.matrix.get()) comp = pnl.Composition() @@ -1396,5 +1396,5 @@ def test_matrix(self): print("R.parameters.hetero.get(eid) = ", R.parameters.hetero.get(eid)) print("\n\nMatrix Values ----------------------------------") - print("R.matrix = ", R.matrix) + print("R.matrix = ", R.matrix.base) print("R.parameters.matrix.get(eid) = ", R.parameters.matrix.get(eid)) diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 7f81227a8bb..d1dd9875762 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -339,7 +339,7 @@ def test_transfer_mech_uniform_to_normal_noise(self): noise=UniformToNormalDist(), integration_rate=1.0 ) - T.noise.parameters.random_state.get(None).seed(22) + T.noise.base.parameters.random_state.get(None).seed(22) val = T.execute([0, 0, 0, 0]) assert np.allclose(val, [[-0.81177443, -0.04593492, -0.20051725, 1.07665147]]) @@ -1147,7 +1147,7 @@ def test_transfer_mech_integration_rate_0_8_initial_0_5(self, mode): val = e.execute([1, 1, 1, 1]) assert np.allclose(val, [[0.9, 0.9, 0.9, 0.9]]) - T.noise = 10 + T.noise.base = 10 if mode == 'Python': val = T.execute([1, 2, -3, 0]) diff --git a/tests/ports/test_parameter_ports.py b/tests/ports/test_parameter_ports.py index a34be007d94..c651737b15a 100644 --- a/tests/ports/test_parameter_ports.py +++ b/tests/ports/test_parameter_ports.py @@ -11,58 +11,58 @@ class TestParameterPorts: def test_inspect_function_params_slope_noise(self): A = TransferMechanism() B = TransferMechanism() - assert A.function.slope == 1.0 - assert B.function.slope == 1.0 - assert A.mod_slope == [1.0] - assert B.mod_slope == [1.0] + assert A.function.slope.base == 1.0 + assert B.function.slope.base == 1.0 + assert A.function.slope.modulated == [1.0] + assert B.function.slope.modulated == [1.0] - assert A.noise == 0.0 - assert B.noise == 0.0 - assert A.mod_noise == 0.0 - assert B.mod_noise == 0.0 + assert A.noise.base == 0.0 + assert B.noise.base == 0.0 + assert A.noise.modulated == 0.0 + assert B.noise.modulated == 0.0 - A.function.slope = 0.2 + A.function.slope.base = 0.2 - assert A.function.slope == 0.2 - assert B.function.slope == 1.0 - assert A.mod_slope == [1.0] - assert B.mod_slope == [1.0] + assert A.function.slope.base == 0.2 + assert B.function.slope.base == 1.0 + assert A.function.slope.modulated == [1.0] + assert B.function.slope.modulated == [1.0] - A.noise = 0.5 + A.noise.base = 0.5 - assert A.noise == 0.5 - assert B.noise == 0.0 - assert A.mod_noise == 0.0 - assert B.mod_noise == 0.0 + assert A.noise.base == 0.5 + assert B.noise.base == 0.0 + assert A.noise.modulated == 0.0 + assert B.noise.modulated == 0.0 - B.function.slope = 0.7 + B.function.slope.base = 0.7 - assert A.function.slope == 0.2 - assert B.function.slope == 0.7 - assert A.mod_slope == [1.0] - assert B.mod_slope == [1.0] + assert A.function.slope.base == 0.2 + assert B.function.slope.base == 0.7 + assert A.function.slope.modulated == [1.0] + assert B.function.slope.modulated == [1.0] - B.noise = 0.6 + B.noise.base = 0.6 - assert A.noise == 0.5 - assert B.noise == 0.6 - assert A.mod_noise == 0.0 - assert B.mod_noise == 0.0 + assert A.noise.base == 0.5 + assert B.noise.base == 0.6 + assert A.noise.modulated == 0.0 + assert B.noise.modulated == 0.0 A.execute(1.0) - assert A.mod_slope == [0.2] + assert A.function.slope.modulated == [0.2] B.execute(1.0) - assert A.function.slope == 0.2 - assert B.function.slope == 0.7 - assert A.mod_slope == [0.2] - assert B.mod_slope == [0.7] + assert A.function.slope.base == 0.2 + assert B.function.slope.base == 0.7 + assert A.function.slope.modulated == [0.2] + assert B.function.slope.modulated == [0.7] - assert A.noise == 0.5 - assert B.noise == 0.6 - assert A.mod_noise == 0.5 - assert B.mod_noise == 0.6 + assert A.noise.base == 0.5 + assert B.noise.base == 0.6 + assert A.noise.modulated == 0.5 + assert B.noise.modulated == 0.6 def test_direct_call_to_constructor_error(self): from psyneulink.core.components.ports.parameterport import ParameterPort, ParameterPortError @@ -81,67 +81,67 @@ def test_configurable_params(self): # SLOPE - - - - - - - - - assert np.allclose(T.function.slope, old_value) - assert np.allclose(T.mod_slope, old_value) + assert np.allclose(T.function.slope.base, old_value) + assert np.allclose(T.function.slope.modulated, old_value) - T.function.slope = new_value + T.function.slope.base = new_value - assert np.allclose(T.function.slope, new_value) - assert np.allclose(T.mod_slope, old_value) + assert np.allclose(T.function.slope.base, new_value) + assert np.allclose(T.function.slope.modulated, old_value) # INTERCEPT - - - - - - - - - assert np.allclose(T.function.intercept, old_value) - assert np.allclose(T.mod_intercept, old_value) + assert np.allclose(T.function.intercept.base, old_value) + assert np.allclose(T.function.intercept.modulated, old_value) - T.function.intercept = new_value + T.function.intercept.base = new_value - assert np.allclose(T.function.intercept, new_value) - assert np.allclose(T.mod_intercept, old_value) + assert np.allclose(T.function.intercept.base, new_value) + assert np.allclose(T.function.intercept.modulated, old_value) # SMOOTHING FACTOR - - - - - - - - - assert np.allclose(T.integration_rate, old_value) - assert np.allclose(T.mod_integration_rate, old_value) + assert np.allclose(T.integration_rate.base, old_value) + assert np.allclose(T.integration_rate.modulated, old_value) - T.integration_rate = new_value + T.integration_rate.base = new_value # KAM changed 3/2/18 -- # function_params looks at ParameterPort value, so this will not update until next execution - assert np.allclose(T.integration_rate, new_value) - assert np.allclose(T.mod_integration_rate, old_value) + assert np.allclose(T.integration_rate.base, new_value) + assert np.allclose(T.integration_rate.modulated, old_value) # NOISE - - - - - - - - - assert np.allclose(T.noise, old_value) - assert np.allclose(T.mod_noise, old_value) + assert np.allclose(T.noise.base, old_value) + assert np.allclose(T.noise.modulated, old_value) - T.noise = new_value + T.noise.base = new_value # KAM changed 3/2/18 -- # function_params looks at ParameterPort value, so this will not update until next execution - assert np.allclose(T.noise, new_value) - assert np.allclose(T.mod_noise, old_value) + assert np.allclose(T.noise.base, new_value) + assert np.allclose(T.noise.modulated, old_value) T.execute(1.0) - assert np.allclose(T.function.slope, new_value) - assert np.allclose(T.mod_slope, new_value) + assert np.allclose(T.function.slope.base, new_value) + assert np.allclose(T.function.slope.modulated, new_value) - assert np.allclose(T.function.intercept, new_value) - assert np.allclose(T.mod_intercept, new_value) + assert np.allclose(T.function.intercept.base, new_value) + assert np.allclose(T.function.intercept.modulated, new_value) - assert np.allclose(T.integration_rate, new_value) - assert np.allclose(T.mod_integration_rate, new_value) + assert np.allclose(T.integration_rate.base, new_value) + assert np.allclose(T.integration_rate.modulated, new_value) - assert np.allclose(T.noise, new_value) - assert np.allclose(T.mod_noise, new_value) + assert np.allclose(T.noise.base, new_value) + assert np.allclose(T.noise.modulated, new_value) class TestModParams: def test_mod_param_error(self): T = TransferMechanism() with pytest.raises(ComponentError) as error_text: - T.mod_slope = 20.0 + T.function.slope.modulated = 20.0 assert "directly because it is computed by the ParameterPort" in str(error_text.value) @@ -189,7 +189,3 @@ def test_alias_duplicate_base_access_fails(self): match='Did you want leak-function or rate' ): mech.parameter_ports['leak'] - - def test_multiple_ports_warning(self): - with pytest.warns(UserWarning, match='Multiple ParameterPorts will be created'): - TransferMechanism(function=pnl.Logistic) diff --git a/tests/projections/test_projection_specifications.py b/tests/projections/test_projection_specifications.py index f783354284b..e7bff1857f0 100644 --- a/tests/projections/test_projection_specifications.py +++ b/tests/projections/test_projection_specifications.py @@ -43,9 +43,9 @@ def test_projection_specification_formats(self): M3_M4_matrix_B, M4]) - assert np.allclose(M2_M3_proj.matrix, M2_M3_matrix) + assert np.allclose(M2_M3_proj.matrix.base, M2_M3_matrix) assert M2.efferents[0] is M2_M3_proj - assert np.allclose(M3.efferents[0].matrix, M3_M4_matrix_A) + assert np.allclose(M3.efferents[0].matrix.base, M3_M4_matrix_A) # This is if different Projections are allowed between the same sender and receiver in different Compositions: # assert np.allclose(M3.efferents[1].matrix, M3_M4_matrix_B) c.run(inputs={M1:[2, -30]}) @@ -141,9 +141,9 @@ def test_mapping_projection_using_2_item_tuple_with_list_of_port_Names(self): output_ports=[(['InputPort-0','InputPort-1'], T1)]) assert len(T2.output_ports)==1 assert T2.output_ports[0].efferents[0].receiver.name == 'InputPort-0' - assert T2.output_ports[0].efferents[0].matrix.shape == (1,2) + assert T2.output_ports[0].efferents[0].matrix.base.shape == (1,2) assert T2.output_ports[0].efferents[1].receiver.name == 'InputPort-1' - assert T2.output_ports[0].efferents[1].matrix.shape == (1,3) + assert T2.output_ports[0].efferents[1].matrix.base.shape == (1,3) def test_mapping_projection_using_2_item_tuple_and_3_item_tuples_with_index_specs(self): @@ -155,9 +155,9 @@ def test_mapping_projection_using_2_item_tuple_and_3_item_tuples_with_index_spec (['InputPort-0','InputPort-1'], 1, T1)]) assert len(T2.output_ports)==3 assert T2.output_ports[0].efferents[0].receiver.name == 'InputPort-0' - assert T2.output_ports[0].efferents[0].matrix.shape == (1,2) + assert T2.output_ports[0].efferents[0].matrix.base.shape == (1,2) assert T2.output_ports[0].efferents[1].receiver.name == 'InputPort-1' - assert T2.output_ports[0].efferents[1].matrix.shape == (1,3) + assert T2.output_ports[0].efferents[1].matrix.base.shape == (1,3) assert T2.output_ports[1].owner_value_index == 2 assert T2.output_ports[2].owner_value_index == 1 diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index fefe14cd6d0..4748e19a21b 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,4 +1,3 @@ -graphviz -ipython -jupyter -matplotlib +graphviz<=0.14.1 +jupyter<=1.0.0 +matplotlib<=3.3.2