Skip to content

Class VoteAgent

Bases: Agent

An agent that has limited knowledge and resources and can decide to use them to participate in elections.

Source code in democracy_sim/participation_agent.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
class VoteAgent(Agent):
    """An agent that has limited knowledge and resources and
    can decide to use them to participate in elections.
    """

    def __init__(self, unique_id, model, pos, personality, assets=1, add=True):
        """ Create a new agent.

        Attributes:
            unique_id: The unique identifier of the agent.
            model: The simulation model of which the agent is part of.
            pos: The position of the agent in the grid.
            personality: Represents the agent's preferences among colors.
            assets: The wealth/assets/motivation of the agent.
        """
        super().__init__(unique_id=unique_id, model=model)
        # The "pos" variable in mesa is special, so I avoid it here
        try:
            row, col = pos
        except ValueError:
            raise ValueError("Position must be a tuple of two integers.")
        self._position = row, col
        self._assets = assets
        self._num_elections_participated = 0
        self.personality = personality
        self.cell = model.grid.get_cell_list_contents([(row, col)])[0]
        # ColorCell objects the agent knows (knowledge)
        self.known_cells: List[Optional[ColorCell]] = [None] * model.known_cells
        # Add the agent to the models' agent list and the cell
        if add:
            model.voting_agents.append(self)
            cell = model.grid.get_cell_list_contents([(row, col)])[0]
            cell.add_agent(self)
        # Election relevant variables
        self.est_real_dist = np.zeros(self.model.num_colors)
        self.confidence = 0.0

    def __str__(self):
        return (f"Agent(id={self.unique_id}, pos={self.position}, "
                f"personality={self.personality}, assets={self.assets})")

    @property
    def position(self):
        """Return the location of the agent."""
        return self._position

    @property
    def row(self):
        """Return the row location of the agent."""
        return self._position[0]

    @property
    def col(self):
        """Return the col location of the agent."""
        return self._position[1]

    @property
    def assets(self):
        """Return the assets of this agent."""
        return self._assets

    @assets.setter
    def assets(self, value):
        self._assets = value

    @assets.deleter
    def assets(self):
        del self._assets

    @property
    def num_elections_participated(self):
        return self._num_elections_participated

    @num_elections_participated.setter
    def num_elections_participated(self, value):
        self._num_elections_participated = value

    def update_known_cells(self, area):
        """
        This method is to update the list of known cells before casting a vote.

        Args:
            area: The area that holds the pool of cells in question
        """
        n_cells = len(area.cells)
        k = len(self.known_cells)
        self.known_cells = (
            self.random.sample(area.cells, k)
            if n_cells >= k
            else area.cells
        )

    def ask_for_participation(self, area):
        """
        The agent decides
        whether to participate in the upcoming election of a given area.

        Args:
            area: The area in which the election takes place.

        Returns:
            True if the agent decides to participate, False otherwise
        """
        #print("Agent", self.unique_id, "decides whether to participate",
        #      "in election of area", area.unique_id)
        # TODO Implement this (is to be decided upon a learned decision tree)
        return np.random.choice([True, False])

    def decide_altruism_factor(self, area):
        """
        Uses a trained decision tree to decide on the altruism factor.
        """
        # TODO Implement this (is to be decided upon a learned decision tree)
        # This part is important - also for monitoring - save/plot a_factors
        a_factor = np.random.uniform(0.0, 1.0)
        #print(f"Agent {self.unique_id} has an altruism factor of: {a_factor}")
        return a_factor

    def compute_assumed_opt_dist(self, area):
        """
        Computes a color distribution that the agent assumes to be an optimal
        choice in any election (regardless of whether it exists as a real option
        to vote for or not). It takes "altruistic" concepts into consideration.

        Args:
            area (Area): The area in which the election takes place.

        Returns:
            ass_opt: The assumed optimal color distribution (normalized).
        """
        # Compute the "altruism_factor" via a decision tree
        a_factor = self.decide_altruism_factor(area)  # TODO: Implement this
        # Compute the preference ranking vector as a mix between the agent's own
        #   preferences/personality traits and the estimated real distribution.
        est_dist, conf = self.estimate_real_distribution(area)
        ass_opt = combine_and_normalize(est_dist, self.personality, a_factor)
        return ass_opt

    def vote(self, area):
        """
        The agent votes in the election of a given area,
        i.e., she returns a preference ranking vector over all options.
        (Ranking: `index = option`, `value proportional to rank`)
        The available options are set in the model.

        Args:
            area (Area): The area in which the election takes place.
        """
        # TODO Implement this (is to be decided upon a learned decision tree)
        # Compute the color distribution that is assumed to be the best choice.
        est_best_dist = self.compute_assumed_opt_dist(area)
        # Make sure that r= is normalized!
        # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!)
        ##############
        if TYPE_CHECKING:  # Type hint for IDEs
            self.model = cast(ParticipationModel, self.model)

        options = self.model.options
        dist_func = self.model.distance_func
        ranking = np.zeros(options.shape[0])
        color_search_pairs = self.model.color_search_pairs
        for i, option in enumerate(options):
            # TODO: is it possible to leave out white?
            ranking[i] = dist_func(self.personality, option, color_search_pairs)
        ranking /= ranking.sum()  # Normalize the preference vector
        return ranking

    def estimate_real_distribution(self, area):
        """
        The agent estimates the real color distribution in the area based on
        her own knowledge (self.known_cells).

        Args:
            area (Area): The area the agent uses to estimate.
        """
        known_colors = np.array([cell.color for cell in self.known_cells])
        # Get the unique color ids present and count their occurrence
        unique, counts = np.unique(known_colors, return_counts=True)
        # Update the est_real_dist and confidence values of the agent
        self.est_real_dist.fill(0)  # To ensure the ones not in unique are 0
        self.est_real_dist[unique] = counts / known_colors.size
        self.confidence = len(self.known_cells) / area.num_cells
        return self.est_real_dist, self.confidence

assets deletable property writable

Return the assets of this agent.

col property

Return the col location of the agent.

position property

Return the location of the agent.

row property

Return the row location of the agent.

__init__(unique_id, model, pos, personality, assets=1, add=True)

Create a new agent.

Attributes:

Name Type Description
unique_id

The unique identifier of the agent.

model

The simulation model of which the agent is part of.

pos

The position of the agent in the grid.

personality

Represents the agent's preferences among colors.

assets

The wealth/assets/motivation of the agent.

Source code in democracy_sim/participation_agent.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def __init__(self, unique_id, model, pos, personality, assets=1, add=True):
    """ Create a new agent.

    Attributes:
        unique_id: The unique identifier of the agent.
        model: The simulation model of which the agent is part of.
        pos: The position of the agent in the grid.
        personality: Represents the agent's preferences among colors.
        assets: The wealth/assets/motivation of the agent.
    """
    super().__init__(unique_id=unique_id, model=model)
    # The "pos" variable in mesa is special, so I avoid it here
    try:
        row, col = pos
    except ValueError:
        raise ValueError("Position must be a tuple of two integers.")
    self._position = row, col
    self._assets = assets
    self._num_elections_participated = 0
    self.personality = personality
    self.cell = model.grid.get_cell_list_contents([(row, col)])[0]
    # ColorCell objects the agent knows (knowledge)
    self.known_cells: List[Optional[ColorCell]] = [None] * model.known_cells
    # Add the agent to the models' agent list and the cell
    if add:
        model.voting_agents.append(self)
        cell = model.grid.get_cell_list_contents([(row, col)])[0]
        cell.add_agent(self)
    # Election relevant variables
    self.est_real_dist = np.zeros(self.model.num_colors)
    self.confidence = 0.0

ask_for_participation(area)

The agent decides whether to participate in the upcoming election of a given area.

Parameters:

Name Type Description Default
area

The area in which the election takes place.

required

Returns:

Type Description

True if the agent decides to participate, False otherwise

Source code in democracy_sim/participation_agent.py
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
def ask_for_participation(self, area):
    """
    The agent decides
    whether to participate in the upcoming election of a given area.

    Args:
        area: The area in which the election takes place.

    Returns:
        True if the agent decides to participate, False otherwise
    """
    #print("Agent", self.unique_id, "decides whether to participate",
    #      "in election of area", area.unique_id)
    # TODO Implement this (is to be decided upon a learned decision tree)
    return np.random.choice([True, False])

compute_assumed_opt_dist(area)

Computes a color distribution that the agent assumes to be an optimal choice in any election (regardless of whether it exists as a real option to vote for or not). It takes "altruistic" concepts into consideration.

Parameters:

Name Type Description Default
area Area

The area in which the election takes place.

required

Returns:

Name Type Description
ass_opt

The assumed optimal color distribution (normalized).

Source code in democracy_sim/participation_agent.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
def compute_assumed_opt_dist(self, area):
    """
    Computes a color distribution that the agent assumes to be an optimal
    choice in any election (regardless of whether it exists as a real option
    to vote for or not). It takes "altruistic" concepts into consideration.

    Args:
        area (Area): The area in which the election takes place.

    Returns:
        ass_opt: The assumed optimal color distribution (normalized).
    """
    # Compute the "altruism_factor" via a decision tree
    a_factor = self.decide_altruism_factor(area)  # TODO: Implement this
    # Compute the preference ranking vector as a mix between the agent's own
    #   preferences/personality traits and the estimated real distribution.
    est_dist, conf = self.estimate_real_distribution(area)
    ass_opt = combine_and_normalize(est_dist, self.personality, a_factor)
    return ass_opt

decide_altruism_factor(area)

Uses a trained decision tree to decide on the altruism factor.

Source code in democracy_sim/participation_agent.py
144
145
146
147
148
149
150
151
152
def decide_altruism_factor(self, area):
    """
    Uses a trained decision tree to decide on the altruism factor.
    """
    # TODO Implement this (is to be decided upon a learned decision tree)
    # This part is important - also for monitoring - save/plot a_factors
    a_factor = np.random.uniform(0.0, 1.0)
    #print(f"Agent {self.unique_id} has an altruism factor of: {a_factor}")
    return a_factor

estimate_real_distribution(area)

The agent estimates the real color distribution in the area based on her own knowledge (self.known_cells).

Parameters:

Name Type Description Default
area Area

The area the agent uses to estimate.

required
Source code in democracy_sim/participation_agent.py
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
def estimate_real_distribution(self, area):
    """
    The agent estimates the real color distribution in the area based on
    her own knowledge (self.known_cells).

    Args:
        area (Area): The area the agent uses to estimate.
    """
    known_colors = np.array([cell.color for cell in self.known_cells])
    # Get the unique color ids present and count their occurrence
    unique, counts = np.unique(known_colors, return_counts=True)
    # Update the est_real_dist and confidence values of the agent
    self.est_real_dist.fill(0)  # To ensure the ones not in unique are 0
    self.est_real_dist[unique] = counts / known_colors.size
    self.confidence = len(self.known_cells) / area.num_cells
    return self.est_real_dist, self.confidence

update_known_cells(area)

This method is to update the list of known cells before casting a vote.

Parameters:

Name Type Description Default
area

The area that holds the pool of cells in question

required
Source code in democracy_sim/participation_agent.py
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def update_known_cells(self, area):
    """
    This method is to update the list of known cells before casting a vote.

    Args:
        area: The area that holds the pool of cells in question
    """
    n_cells = len(area.cells)
    k = len(self.known_cells)
    self.known_cells = (
        self.random.sample(area.cells, k)
        if n_cells >= k
        else area.cells
    )

vote(area)

The agent votes in the election of a given area, i.e., she returns a preference ranking vector over all options. (Ranking: index = option, value proportional to rank) The available options are set in the model.

Parameters:

Name Type Description Default
area Area

The area in which the election takes place.

required
Source code in democracy_sim/participation_agent.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
def vote(self, area):
    """
    The agent votes in the election of a given area,
    i.e., she returns a preference ranking vector over all options.
    (Ranking: `index = option`, `value proportional to rank`)
    The available options are set in the model.

    Args:
        area (Area): The area in which the election takes place.
    """
    # TODO Implement this (is to be decided upon a learned decision tree)
    # Compute the color distribution that is assumed to be the best choice.
    est_best_dist = self.compute_assumed_opt_dist(area)
    # Make sure that r= is normalized!
    # (r.min()=0.0 and r.max()=1.0 and all vals x are within [0.0, 1.0]!)
    ##############
    if TYPE_CHECKING:  # Type hint for IDEs
        self.model = cast(ParticipationModel, self.model)

    options = self.model.options
    dist_func = self.model.distance_func
    ranking = np.zeros(options.shape[0])
    color_search_pairs = self.model.color_search_pairs
    for i, option in enumerate(options):
        # TODO: is it possible to leave out white?
        ranking[i] = dist_func(self.personality, option, color_search_pairs)
    ranking /= ranking.sum()  # Normalize the preference vector
    return ranking