Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…Link into refactor/learning_pathways_using_ports
  • Loading branch information
jdcpni committed Nov 3, 2023
2 parents 71821f9 + 87fd1a9 commit 60bbf36
Show file tree
Hide file tree
Showing 10 changed files with 163 additions and 104 deletions.
10 changes: 9 additions & 1 deletion .github/workflows/codeql.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ name: "CodeQL"

on:
push:
branches: [ "master", "devel" ]
branches: [ "master", "devel", "codeql" ]
paths-ignore:
- 'docs/**'
pull_request:
Expand Down Expand Up @@ -34,6 +34,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 100

- name: Initialize CodeQL
uses: github/codeql-action/init@v2
Expand All @@ -44,6 +46,12 @@ jobs:
- name: Autobuild
uses: github/codeql-action/autobuild@v2

- name: Cache cleanup
shell: bash
run: |
$CODEQL_PYTHON -m pip cache info
$CODEQL_PYTHON -m pip cache purge
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
Expand Down
19 changes: 18 additions & 1 deletion .github/workflows/pnl-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ jobs:
python-architecture: ['x64']
extra-args: ['']
os: [ubuntu, macos, windows]
version-restrict: ['']
include:
# code-coverage build on macos python 3.9
- python-version: '3.9'
Expand All @@ -78,11 +79,18 @@ jobs:
- python-version: '3.10'
os: macos
# pytest needs both '--benchmark-only' and '-m benchmark'
# The former fails the test if benchamrks cannot be enabled
# The former fails the test if benchmarks cannot be enabled
# (e.g. due to --dist setting)
# The latter works around a crash in pytest when collecting tests:
# https://github.com/ionelmc/pytest-benchmark/issues/243
extra-args: '-m benchmark --benchmark-enable --benchmark-only --benchmark-min-rounds=2 --benchmark-max-time=0.001 --benchmark-warmup=off -n0 --dist=no'

# add python 3.7 with deps restricted to min supported version
- python-version: '3.7'
python-architecture: 'x64'
os: ubuntu
version-restrict: 'min'

# add python 3.8 build on macos since 3.7 is broken
# https://github.com/actions/virtual-environments/issues/4230
- python-version: '3.8'
Expand Down Expand Up @@ -115,6 +123,15 @@ jobs:
python-version: ${{ matrix.python-version }}
architecture: ${{ matrix.python-architecture }}

- name: Restrict version of direct dependencies
if: ${{ matrix.version-restrict == 'min' }}
shell: bash
run: |
sed -i '/^[^#]/s/>=/==/' *requirements.txt
git config user.name "github actions"
git config user.email "none"
git commit -a -m "Restrict version of direct dependencies to min"
- name: Get pip cache location
shell: bash
id: pip_cache
Expand Down
2 changes: 1 addition & 1 deletion dev_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
jupyter<1.0.1
packaging<24.0
pytest<7.4.3
pytest<7.4.4
pytest-benchmark<4.0.1
pytest-cov<4.1.1
pytest-forked<1.7.0
Expand Down
6 changes: 5 additions & 1 deletion psyneulink/core/components/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -1317,6 +1317,10 @@ def _get_compilation_state(self):
if getattr(self.parameters, 'has_recurrent_input_port', False):
blacklist.update(['combination_function'])

# Drop previous_value from MemoryFunctions
if hasattr(self.parameters, 'duplicate_keys'):
blacklist.add("previous_value")

def _is_compilation_state(p):
# FIXME: This should use defaults instead of 'p.get'
return p.name not in blacklist and \
Expand Down Expand Up @@ -1400,7 +1404,7 @@ def _get_compilation_params(self):
"random_variables", "smoothing_factor", "per_item",
"key_size", "val_size", "max_entries", "random_draw",
"randomization_dimension", "save_values", "save_samples",
"max_iterations",
"max_iterations", "duplicate_keys",
# not used in compiled learning
"learning_results", "learning_signal", "learning_signals",
"error_matrix", "error_signal", "activation_input",
Expand Down
47 changes: 35 additions & 12 deletions psyneulink/core/components/functions/stateful/memoryfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2427,8 +2427,8 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out,
store_prob = pnlvm.helpers.load_extract_scalar_array_one(builder, store_prob_ptr)
store_rand = builder.fcmp_ordered('<', store_prob, store_prob.type(1.0))

# The call to random function needs to be behind jump to match python
# code
# The call to random function needs to be behind the check of 'store_rand'
# to match python code semantics
with builder.if_then(store_rand):
rand_ptr = builder.alloca(ctx.float_ty)
builder.call(uniform_f, [rand_struct, rand_ptr])
Expand All @@ -2439,6 +2439,27 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out,
# Store
store = builder.load(store_ptr)
with builder.if_then(store, likely=True):
modified_key_ptr = builder.alloca(var_key_ptr.type.pointee)

# Apply noise to key.
# There are 3 types of noise: scalar, vector1, and vector matching variable
noise_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, "noise")
rate_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, "rate")
with pnlvm.helpers.array_ptr_loop(b, var_key_ptr, "key_apply_rate_noise") as (b, idx):
if pnlvm.helpers.is_2d_matrix(noise_ptr):
noise_elem_ptr = b.gep(noise_ptr, [ctx.int32_ty(0), ctx.int32_ty(0), idx])
noise_val = b.load(noise_elem_ptr)
else:
noise_val = pnlvm.helpers.load_extract_scalar_array_one(b, noise_ptr)

rate_val = pnlvm.helpers.load_extract_scalar_array_one(b, rate_ptr)

modified_key_elem_ptr = b.gep(modified_key_ptr, [ctx.int32_ty(0), idx])
key_elem_ptr = b.gep(var_key_ptr, [ctx.int32_ty(0), idx])
key_elem = b.load(key_elem_ptr)
key_elem = b.fmul(key_elem, rate_val)
key_elem = b.fadd(key_elem, noise_val)
b.store(key_elem, modified_key_elem_ptr)

# Check if such key already exists
is_new_key_ptr = builder.alloca(ctx.bool_ty)
Expand All @@ -2451,7 +2472,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out,
key_differs_ptr = b.alloca(ctx.bool_ty)
b.store(key_differs_ptr.type.pointee(0), key_differs_ptr)
with pnlvm.helpers.array_ptr_loop(b, cmp_key_ptr, "key_compare") as (b2, idx2):
var_key_element = b2.gep(var_key_ptr, [ctx.int32_ty(0), idx2])
var_key_element = b2.gep(modified_key_ptr, [ctx.int32_ty(0), idx2])
cmp_key_element = b2.gep(cmp_key_ptr, [ctx.int32_ty(0), idx2])
element_differs = b.fcmp_unordered('!=',
b.load(var_key_element),
Expand All @@ -2473,7 +2494,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out,
store_key_ptr = builder.gep(keys_ptr, [ctx.int32_ty(0), write_idx])
store_val_ptr = builder.gep(vals_ptr, [ctx.int32_ty(0), write_idx])

builder.store(builder.load(var_key_ptr), store_key_ptr)
builder.store(builder.load(modified_key_ptr), store_key_ptr)
builder.store(builder.load(var_val_ptr), store_val_ptr)

# Update counters
Expand Down Expand Up @@ -2674,18 +2695,20 @@ def _function(self,
# CURRENT PROBLEM WITH LATTER IS THAT IT CAUSES CRASH ON INIT, SINCE NOT OUTPUT_PORT
# SO, WOULD HAVE TO RETURN ZEROS ON INIT AND THEN SUPPRESS AFTERWARDS, AS MOCKED UP BELOW
memory = [[0]* self.parameters.key_size._get(context), [0]* self.parameters.val_size._get(context)]

# Store variable to dict:
rate = self._get_current_parameter_value(RATE, context)
if rate is not None:
key = np.asfarray(key) * np.asfarray(rate)
assert len(key) == len(variable[KEYS]), "{} vs. {}".format(key, variable[KEYS])

if noise is not None:
key = np.asarray(key, dtype=float)
if isinstance(noise, numbers.Number):
key += noise
else:
# assume array with same shape as variable
# TODO: does val need noise?
key += noise[KEYS]
# TODO: does val need noise?
key = np.asfarray(key) + np.asfarray(noise)[KEYS]
assert len(key) == len(variable[KEYS]), "{} vs. {}".format(key, variable[KEYS])

if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.uniform()):
self._store_memory(variable, context)
self._store_memory([key, val], context)

# Return 3d array with keys and vals as lists
# IMPLEMENTATION NOTE: if try to create np.ndarray directly, and keys and vals have same length
Expand Down
2 changes: 0 additions & 2 deletions psyneulink/core/globals/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,8 +527,6 @@ def __init__(self, owner):

# Log to which this dict belongs
self._ownerLog = owner
# Object to which the log belongs
self._owner = owner.owner

# # VERSION THAT USES OWNER'S logPref TO LIST ENTRIES TO BE RECORDED
# # List of entries (in owner's logPrefs) of entries to record
Expand Down
3 changes: 2 additions & 1 deletion psyneulink/core/llvm/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,8 @@ def printf(builder, fmt, *args, override_debug=False):
global_fmt.initializer = fmt_ty(fmt_data)

fmt_ptr = builder.gep(global_fmt, [ir.IntType(32)(0), ir.IntType(32)(0)])
builder.call(printf, [fmt_ptr] + list(args))
conv_args = [builder.fpext(a, ir.DoubleType()) if is_floating_point(a) else a for a in args]
builder.call(printf, [fmt_ptr] + conv_args)


def printf_float_array(builder, array, prefix="", suffix="\n", override_debug=False):
Expand Down
14 changes: 7 additions & 7 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
autograd<1.7
beartype<0.16.0
dill<0.3.8
fastkde>=1.0.24, <1.0.27
graph-scheduler>=0.2.0, <1.1.3
fastkde>=1.0.24, <1.0.31
graph-scheduler>=1.1.1, <1.1.3
graphviz<0.21.0
grpcio<1.60.0
leabra-psyneulink<0.3.3
llvmlite<0.42
matplotlib<3.7.3
modeci_mdf<0.5, >=0.3.4; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython'
networkx<3.2
numpy>=1.19.0, <1.24.5
modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython'
networkx<3.3
numpy>=1.21.0, <1.24.5
optuna<3.4.0
packaging<24.0
pandas<2.1.1
pillow<10.1.0
pillow<10.2.0
pint<0.22.0
protobuf<3.20.4
rich>=10.1, <10.13
toposort<1.11
torch>=1.8.0, <2.1.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython'
torch>=1.10.0, <2.1.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython'
Loading

0 comments on commit 60bbf36

Please sign in to comment.