Skip to content

Commit

Permalink
ActivityDrivenModel: Reworked constructor
Browse files Browse the repository at this point in the history
Now takes the `ActivityDrivenSettings` struct to construct. This also
enabled making many member variables private. Also gave the `Model`
base class a constructor that takes `max_iterations`.

Co-authored-by: Amrita Goswami <[email protected]>
  • Loading branch information
MSallermann and amritagos committed Mar 21, 2024
1 parent 1505bdc commit b822f8e
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 57 deletions.
3 changes: 3 additions & 0 deletions include/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ class Model

std::optional<size_t> max_iterations = std::nullopt;

Model() = default;
Model( std::optional<size_t> max_iterations ) : max_iterations( max_iterations ){};

virtual void initialize_iterations()
{
_n_iterations = 0;
Expand Down
35 changes: 17 additions & 18 deletions include/models/ActivityDrivenModel.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#pragma once

#include "agents/activity_agent.hpp"
#include "config_parser.hpp"

#include "model.hpp"
#include "network.hpp"
#include <cstddef>
Expand All @@ -19,12 +21,17 @@ class ActivityDrivenModel : public Model<ActivityAgent>
using AgentT = ActivityAgent;
using NetworkT = Network<AgentT>;

ActivityDrivenModel( NetworkT & network, std::mt19937 & gen );

void get_agents_from_power_law(); // This needs to be called after eps and gamma have been set
ActivityDrivenModel( const Config::ActivityDrivenSettings & settings, NetworkT & network, std::mt19937 & gen );

void iteration() override;

private:
NetworkT & network;
std::vector<std::vector<NetworkT::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};

// Model-specific parameters
double dt = 0.01; // Timestep for the integration of the coupled ODEs
// Various free parameters
Expand All @@ -41,39 +48,31 @@ class ActivityDrivenModel : public Model<ActivityAgent>
bool mean_activities = false;
bool mean_weights = false;

double convergence_tol = 1e-12; // TODO: ??

bool use_reluctances = false;
double reluctance_mean{};
double reluctance_sigma{};
double reluctance_eps{};
double covariance_factor{};

// bot @TODO: less hacky
size_t n_bots = 0; // The first n_bots agents are bots
std::vector<int> bot_m = std::vector<int>( 0 );
std::vector<double> bot_activity = std::vector<double>( 0 );
std::vector<double> bot_opinion = std::vector<double>( 0 );
std::vector<double> bot_homophily = std::vector<double>( 0 );

[[nodiscard]] bool bot_present() const
{
return n_bots > 0;
}

private:
NetworkT & network;
std::vector<std::vector<NetworkT::WeightT>> contact_prob_list; // Probability of choosing i in 1 to m rounds
// Random number generation
std::mt19937 & gen; // reference to simulation Mersenne-Twister engine
std::set<std::pair<size_t, size_t>> reciprocal_edge_buffer{};

// Buffers for RK4 integration
std::vector<double> k1_buffer{};
std::vector<double> k2_buffer{};
std::vector<double> k3_buffer{};
std::vector<double> k4_buffer{};

void get_agents_from_power_law();

[[nodiscard]] bool bot_present() const
{
return n_bots > 0;
}

template<typename Opinion_Callback>
void get_euler_slopes( std::vector<double> & k_buffer, Opinion_Callback opinion )
{
Expand Down
35 changes: 1 addition & 34 deletions include/simulation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,40 +107,7 @@ class Simulation : public SimulationInterface

model = [&]()
{
auto model = std::make_unique<ActivityDrivenModel>( network, gen );
model->dt = activitydriven_settings.dt;
model->m = activitydriven_settings.m;
model->eps = activitydriven_settings.eps;
model->gamma = activitydriven_settings.gamma;
model->homophily = activitydriven_settings.homophily;
model->reciprocity = activitydriven_settings.reciprocity;
model->alpha = activitydriven_settings.alpha;
model->K = activitydriven_settings.K;
model->mean_activities = activitydriven_settings.mean_activities;
model->mean_weights = activitydriven_settings.mean_weights;
model->max_iterations = activitydriven_settings.max_iterations;
// Reluctance
model->use_reluctances = activitydriven_settings.use_reluctances;
model->reluctance_mean = activitydriven_settings.reluctance_mean;
model->reluctance_sigma = activitydriven_settings.reluctance_sigma;
model->reluctance_eps = activitydriven_settings.reluctance_eps;
// Bot
model->n_bots = activitydriven_settings.n_bots;
model->bot_opinion = activitydriven_settings.bot_opinion;
model->bot_m = activitydriven_settings.bot_m;
model->bot_homophily = activitydriven_settings.bot_homophily;
model->bot_activity = activitydriven_settings.bot_activity;
model->get_agents_from_power_law();

// TODO: this is stupid and should be done in the constructor, but right now it cant since we set mean
// weights only later
if( model->mean_weights )
{
auto agents_copy = network.agents;
network = NetworkGeneration::generate_fully_connected<AgentType>( network.n_agents() );
network.agents = agents_copy;
}

auto model = std::make_unique<ActivityDrivenModel>( activitydriven_settings, network, gen );
return model;
}();

Expand Down
30 changes: 25 additions & 5 deletions src/models/ActivityDrivenModel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,33 @@
namespace Seldon
{

ActivityDrivenModel::ActivityDrivenModel( NetworkT & network, std::mt19937 & gen )
: Model<ActivityDrivenModel::AgentT>(),
ActivityDrivenModel::ActivityDrivenModel(
const Config::ActivityDrivenSettings & settings, NetworkT & network, std::mt19937 & gen )
: Model<ActivityDrivenModel::AgentT>( settings.max_iterations ),
network( network ),
contact_prob_list( std::vector<std::vector<NetworkT::WeightT>>( network.n_agents() ) ),
gen( gen )
gen( gen ),
dt( settings.dt ),
m( settings.m ),
eps( settings.eps ),
gamma( settings.gamma ),
alpha( settings.alpha ),
homophily( settings.homophily ),
reciprocity( settings.reciprocity ),
K( settings.K ),
mean_activities( settings.mean_activities ),
mean_weights( settings.mean_weights ),
use_reluctances( settings.use_reluctances ),
reluctance_mean( settings.reluctance_mean ),
reluctance_sigma( settings.reluctance_sigma ),
reluctance_eps( settings.reluctance_eps ),
n_bots( settings.n_bots ),
bot_m( settings.bot_m ),
bot_activity( settings.bot_activity ),
bot_opinion( settings.bot_opinion ),
bot_homophily( settings.bot_homophily )
{
get_agents_from_power_law();

if( mean_weights )
{
Expand Down Expand Up @@ -160,8 +181,7 @@ void ActivityDrivenModel::update_network_mean()
contact_prob_list[idx_agent] = weights; // set to zero
}

auto probability_helper = []( double omega, size_t m )
{
auto probability_helper = []( double omega, size_t m ) {
double p = 0;
for( size_t i = 1; i <= m; i++ )
p += ( std::pow( -omega, i + 1 ) + omega ) / ( omega + 1 );
Expand Down

0 comments on commit b822f8e

Please sign in to comment.