-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathanimate_limits_as_slices_vary.py
47 lines (42 loc) · 2.32 KB
/
animate_limits_as_slices_vary.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
""" Example: compute the limit sets of a Riley group with fixed μ and varying θ, η.
"""
from bella import cayley, riley
import mpmath as mp
import holoviews as hv
import pandas as pd
hv.extension('bokeh')
import multiprocessing
import os
# Output one frame of the animation. See below for explanation of the parameters.
def one_frame(kk,θ,num_points,first,scale):
print(f"#{kk}")
η = (kk/scale) * mp.pi
G = riley.RileyGroup(θ,η,2j) # <- we fix the value of μ.
df = G.coloured_limit_set_fast(num_points)
scatter = hv.Scatter(df, kdims = ['x'], vdims = ['y','colour'])\
.opts(marker = "dot", size = 1, color = 'colour', width=1000, height=1000, data_aspect=1, cmap='Category10')\
.redim(x=hv.Dimension('x', range=(-1,1)),y=hv.Dimension('y', range=(-1, 1)))
hv.save(scatter, f'animate_limits_as_slices_vary/frame{-first+kk:05}.png', fmt='png')
print(f"Done {kk}")
# We use this __main__ pattern since we have to use multiprocessing, see
# the documentation: https://docs.python.org/dev/library/multiprocessing.html#multiprocessing-programming
if __name__=='__main__':
# θ is fixed. num_points is passed directly into GroupCache.coloured_limit_set_fast().
θ = 0
num_points = 20*10**4
# We will compute η as (kk/scale)*π, where kk runs from first to last. Hence to adjust the number of frames
# adjust scale and modify last so that last/scale is the endpoint value of the anumation that you want.
first = 0
last = 400
scale = 200
# Make the output directory if necessary.
try:
os.mkdir('animate_limits_as_slices_vary')
except FileExistsError:
pass
# If we don't run all of the one_frame() calls in different processes, then we leak memory in holoviews. (The bokeh renderer keeps a copy of every dataframe
# passed in, so the garbage collector doesn't throw them away even when the one_frame() function ends; since this is global in the holoviews
# module, the only way we get out of running out of memory is to start a new process each time.) As a nice side-effect, it's fast.
multiprocessing.set_start_method('spawn') # Using "fork" also leaks memory.
with multiprocessing.Pool(4, maxtasksperchild=1) as pool:
pool.starmap(one_frame, [ (kk,θ,num_points,first,scale) for kk in range(first, last) ], chunksize=1)