From 6f7cc3e970a8ce07e50ee7f86109c88c13109d2a Mon Sep 17 00:00:00 2001 From: amritagos Date: Sun, 29 Oct 2023 20:26:49 +0000 Subject: [PATCH] First attempt at mean field theory for the activity driven model --- include/models/ActivityDrivenModel.hpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/models/ActivityDrivenModel.hpp b/include/models/ActivityDrivenModel.hpp index 1e770d2..0522e36 100644 --- a/include/models/ActivityDrivenModel.hpp +++ b/include/models/ActivityDrivenModel.hpp @@ -56,6 +56,7 @@ class ActivityAgentModel : public Model> { // h is the timestep auto neighbour_buffer = std::vector(); + auto weight_buffer = std::vector(); size_t j_index = 0; k_buffer.resize( network.n_agents() ); @@ -63,12 +64,13 @@ class ActivityAgentModel : public Model> for( size_t idx_agent = 0; idx_agent < network.n_agents(); ++idx_agent ) { network.get_neighbours( idx_agent, neighbour_buffer ); // Get the incoming neighbours + network.get_weights(idx_agent, weight_buffer); // Get incoming weights k_buffer[idx_agent] = -opinion( idx_agent ); // Loop through neighbouring agents for( size_t j = 0; j < neighbour_buffer.size(); j++ ) { j_index = neighbour_buffer[j]; - k_buffer[idx_agent] += K * std::tanh( alpha * opinion( j_index ) ); + k_buffer[idx_agent] += K * weight_buffer[j] * std::tanh( alpha * opinion( j_index ) ); } // Multiply by the timestep k_buffer[idx_agent] *= dt;