Skip to content

Commit

Permalink
Began loop.py refactor. All regression tests currently pass
Browse files Browse the repository at this point in the history
  • Loading branch information
braxtoncuneo committed Apr 22, 2024
1 parent 82c3695 commit 17fe4b0
Show file tree
Hide file tree
Showing 32 changed files with 1,653 additions and 1,885 deletions.
2 changes: 1 addition & 1 deletion examples/fixed_source/kobayashi3-TD/input.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
)

# Setting
mcdc.setting(N_particle=1e6)
mcdc.setting(N_particle=1e3)
mcdc.implicit_capture()

# Run
Expand Down
20 changes: 10 additions & 10 deletions mcdc/adapt.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,18 @@ def blankout_fn(func):
mod_name = func.__module__
fn_name = func.__name__
id = (mod_name,fn_name)

if id not in blankout_roster:
global do_nothing_id
name = func.__name__
#print(f"do_nothing_{do_nothing_id} for {name}")
arg_count = len(inspect.signature(func).parameters)
blankout_roster[id] = generate_do_nothing(arg_count,crash_on_call=f"blankout fn for {name} should never be called")

blank = blankout_roster[id]

return blank




Expand Down Expand Up @@ -176,7 +176,7 @@ def nopython_mode(is_on):
return
if not isinstance(target_rosters['cpu'],dict):
return

for impl in target_rosters['cpu'].values():
overwrite_func(impl,impl)

Expand Down Expand Up @@ -331,7 +331,7 @@ def add_IC(particle, prog):
@for_cpu()
def local_translate():
return np.zeros(1, dtype=type_.translate)[0]

@for_gpu()
def local_translate():
trans = cuda.local.array(1, type_.translate)[0]
Expand All @@ -343,7 +343,7 @@ def local_translate():
@for_cpu()
def local_group_array():
return np.zeros(1, dtype=type_.group_array)[0]

@for_gpu()
def local_group_array():
return cuda.local.array(1, type_.group_array)[0]
Expand All @@ -352,7 +352,7 @@ def local_group_array():
@for_cpu()
def local_j_array():
return np.zeros(1, dtype=type_.j_array)[0]

@for_gpu()
def local_j_array():
return cuda.local.array(1, type_.j_array)[0]
Expand Down Expand Up @@ -406,7 +406,7 @@ def global_max(ary,idx,val):
# Program Specifications
# =========================================================================

state_spec = None
state_spec = None
one_event_fns = None
multi_event_fns = None

Expand All @@ -420,7 +420,7 @@ def make_spec(target):
global device_gpu, group_gpu, thread_gpu
global iterate_async
if target == 'gpu':
state_spec = (dev_state_type,grp_state_type,thd_state_type)
state_spec = (dev_state_type,grp_state_type,thd_state_type)
one_event_fns = [iterate]
#multi_event_fns = [source,move,scattering,fission,leakage,bcollision]
device_gpu, group_gpu, thread_gpu = harm.RuntimeSpec.access_fns(state_spec)
Expand All @@ -443,7 +443,7 @@ def initialize(prog: numba.uintp):
def finalize(prog: numba.uintp):
final_fn(prog)
def step(prog: numba.uintp, arg: arg_type):

step_async()

step_async, = harm.RuntimeSpec.async_dispatch(step)
Expand Down
Loading

0 comments on commit 17fe4b0

Please sign in to comment.