[CT421]: Finish Assignment 2
@ -2,6 +2,7 @@
|
||||
|
||||
import argparse
|
||||
import random
|
||||
import copy
|
||||
|
||||
# Each strategy is defined as follows:
|
||||
# [first move, reaction to defection, reaction to co-operation]
|
||||
@ -10,11 +11,11 @@ import random
|
||||
strategies = [
|
||||
[0, 0, 0], # Always defect.
|
||||
[0, 0, 1], # Grim tit-for-tat.
|
||||
[0, 1, 0], # Grim opposite day: defect at first, then do opposite of what opponent did last.
|
||||
[0, 1, 1], # Self-sabotage: defect at first, then always co-operate.
|
||||
[0, 1, 0], # Defect at first, then do opposite of what opponent did last.
|
||||
[0, 1, 1], # Defect at first, then always co-operate.
|
||||
[1, 0, 0], # Feint co-operation, then always defect.
|
||||
[1, 0, 1], # Tit-for-tat.
|
||||
[1, 1, 0], # Opposite day: co-operate at first, then do opposite of what opponent did last.
|
||||
[1, 1, 0], # Co-operate at first, then do opposite of what opponent did last.
|
||||
[1, 1, 1] # Always co-operate.
|
||||
]
|
||||
|
||||
@ -30,63 +31,20 @@ def initialise_population(size):
|
||||
"""
|
||||
|
||||
# Since there are only 8 possible strategies, to initialise the population just perform a random over-sampling of the space.
|
||||
return random.choices(strategies, k=size)
|
||||
return [copy.deepcopy(strategy) for strategy in random.choices(strategies, k=size)]
|
||||
|
||||
|
||||
def coevolve(agent1, agent2, num_iterations):
|
||||
"""
|
||||
Play the Iterated Prisoner's Dilemma with two agents a specified number of times, and return each agent's score.
|
||||
|
||||
Args:
|
||||
agent1 (list): the strategy of agent1.
|
||||
agent2 (list): the strategy of agent2.
|
||||
iterations (int): the number of iterations to play.
|
||||
|
||||
Returns:
|
||||
fitness1 (int): the score obtained by agent1.
|
||||
fitness2 (int): the score obtained by agent2.
|
||||
"""
|
||||
|
||||
fitness1 = 0
|
||||
fitness2 = 0
|
||||
|
||||
agent1_last_move = None
|
||||
agent2_last_move = None
|
||||
|
||||
for iteration in range(num_iterations):
|
||||
if (iteration == 0):
|
||||
agent1_move = agent1[0]
|
||||
agent2_move = agent2[0]
|
||||
else:
|
||||
# Set an agent's move to its reaction to co-operation if the other agent's last move was co-operation (1), else set it to its reaction to defection.
|
||||
agent1_move = agent1[2] if agent2_last_move else agent1[1]
|
||||
agent2_move = agent2[2] if agent1_last_move else agent2[1]
|
||||
|
||||
match (agent1_move, agent2_move):
|
||||
case (0, 0):
|
||||
fitness1 += 1
|
||||
fitness2 += 1
|
||||
case (0, 1):
|
||||
fitness1 += 5
|
||||
case (1, 0):
|
||||
fitness2 += 5
|
||||
case (1, 1):
|
||||
fitness1 += 3
|
||||
fitness2 += 3
|
||||
|
||||
return fitness1, fitness2
|
||||
|
||||
|
||||
def fitness(agent, num_iterations):
|
||||
def fitness(agent, num_iterations, noise_level):
|
||||
"""
|
||||
Play the Iterated Prisoner's Dilemma against a number of fixed strategies and return its score.
|
||||
|
||||
Args:
|
||||
agent1 (list): the strategy of agent1.
|
||||
iterations (int): the number of iterations to play.
|
||||
noise_level (float): the probability that the opponent's last move will be misrepresented to the agent
|
||||
|
||||
Returns:
|
||||
fitness1 (int): the score obtained by agent1.
|
||||
fitness (int): the score obtained by agent1.
|
||||
"""
|
||||
|
||||
fitness = 0
|
||||
@ -99,7 +57,7 @@ def fitness(agent, num_iterations):
|
||||
# [1, 0, 0], # Feint co-operation, then always defect.
|
||||
[1, 0, 1], # Tit-for-tat.
|
||||
# [1, 1, 0], # Opposite day: co-operate at first, then do opposite of what opponent did last.
|
||||
[1, 1, 1] # Always co-operate.
|
||||
[1, 1, 1], # Always co-operate.
|
||||
]
|
||||
|
||||
for fixed_strategy in fixed_strategies:
|
||||
@ -115,8 +73,8 @@ def fitness(agent, num_iterations):
|
||||
agent_move = agent[2] if fixed_strategy_last_move else agent[1]
|
||||
fixed_strategy_move = fixed_strategy[2] if agent_last_move else fixed_strategy[1]
|
||||
|
||||
agent_last_move = agent_move
|
||||
fixed_strategy_last_move = fixed_strategy_move
|
||||
agent_last_move = agent_move if random.random() > noise_level else 1 - agent_move
|
||||
fixed_strategy_last_move = fixed_strategy_move if random.random() > noise_level else 1 - fixed_strategy_move
|
||||
|
||||
match (agent_move, fixed_strategy_move):
|
||||
case (0, 0):
|
||||
@ -134,13 +92,14 @@ def fitness(agent, num_iterations):
|
||||
return fitness
|
||||
|
||||
|
||||
def list_fitnesses(population, num_iterations):
|
||||
def list_fitnesses(population, num_iterations, noise_level):
|
||||
"""
|
||||
Calculate the fitness of each agent in a population.
|
||||
|
||||
Args:
|
||||
population (list): the population of strategies.
|
||||
iterations (int): the number of iterations to play.
|
||||
noise_level (float): the probability that the opponent's last move will be misrepresented to the agent
|
||||
|
||||
Returns:
|
||||
fitnesses (list): the fitness of each agent.
|
||||
@ -149,7 +108,7 @@ def list_fitnesses(population, num_iterations):
|
||||
fitnesses = []
|
||||
|
||||
for agent in population:
|
||||
fitnesses.append(fitness(agent, num_iterations))
|
||||
fitnesses.append(fitness(agent, num_iterations, noise_level))
|
||||
|
||||
return fitnesses
|
||||
|
||||
@ -186,16 +145,16 @@ def tournament_selection(population, fitnesses, num_survivors, tournament_size=3
|
||||
Returns:
|
||||
survivors (list): the selected agents.
|
||||
"""
|
||||
|
||||
survivors = []
|
||||
|
||||
for _ in range(num_survivors):
|
||||
tournament = random.sample(list(zip(population, fitnesses)), tournament_size)
|
||||
winner = max(tournament, key=lambda agent: agent[1])
|
||||
survivors.append(winner[0])
|
||||
survivors.append(copy.deepcopy(winner[0])) # Deep copy to prevent unintended modifications
|
||||
|
||||
return survivors
|
||||
|
||||
|
||||
def crossover(parents, crossover_rate, num_offspring):
|
||||
"""
|
||||
Perform single-point crossover on selected parents.
|
||||
@ -216,12 +175,12 @@ def crossover(parents, crossover_rate, num_offspring):
|
||||
|
||||
crossover_point = random.randint(1, 2)
|
||||
|
||||
child1 = p1[:crossover_point] + p2[crossover_point:]
|
||||
child2 = p2[:crossover_point] + p1[crossover_point:]
|
||||
child1 = copy.deepcopy(p1[:crossover_point] + p2[crossover_point:])
|
||||
child2 = copy.deepcopy(p2[:crossover_point] + p1[crossover_point:])
|
||||
|
||||
offspring.extend([child1, child2])
|
||||
else:
|
||||
offspring.append(random.choice(parents))
|
||||
offspring.append(copy.deepcopy(random.choice(parents)))
|
||||
|
||||
return offspring[:num_offspring]
|
||||
|
||||
@ -237,14 +196,16 @@ def mutate(offspring, mutation_rate):
|
||||
Returns:
|
||||
mutated_offspring (list): List of mutated strategies.
|
||||
"""
|
||||
for i in range(len(offspring)):
|
||||
mutated_offspring = copy.deepcopy(offspring) # Deep copy to prevent modifying original offspring
|
||||
|
||||
for i in range(len(mutated_offspring)):
|
||||
if random.random() < mutation_rate:
|
||||
mutation_point = random.randint(0, 2)
|
||||
offspring[i][mutation_point] = 1 - offspring[i][mutation_point]
|
||||
mutated_offspring[i][mutation_point] = 1 - mutated_offspring[i][mutation_point]
|
||||
|
||||
return offspring
|
||||
return mutated_offspring
|
||||
|
||||
def evolve(size, num_generations, give_up_after, num_iterations, selection_proportion, crossover_rate, mutation_rate):
|
||||
def evolve(size, num_generations, give_up_after, num_iterations, selection_proportion, crossover_rate, mutation_rate, noise_level):
|
||||
"""
|
||||
Evolves strategies over a number of generations for the Iterated Prisoner's Dilemma.
|
||||
|
||||
@ -255,13 +216,14 @@ def evolve(size, num_generations, give_up_after, num_iterations, selection_propo
|
||||
selection_proportion (float): The proportion of the population to be selected (survive) on each generation
|
||||
crossover_rate (float): Probability of a selected pair of solutions to sexually reproduce
|
||||
mutation_rate (float): Probability of a selected offspring to undergo mutation
|
||||
noise_level (float): The probability that the opponent's last move will be misrepresented to the agent
|
||||
|
||||
Returns:
|
||||
results (str): The results of the evolution in TSV format
|
||||
"""
|
||||
|
||||
population = initialise_population(size)
|
||||
fitnesses = list_fitnesses(population, num_iterations)
|
||||
fitnesses = list_fitnesses(population, num_iterations, noise_level)
|
||||
current_best = get_best(population, fitnesses, 0)
|
||||
|
||||
results = ["Generation\tBestFitness\tBestStrategy\tAvgFitness\t000\t001\t010\t011\t100\t101\t110\t111"]
|
||||
@ -272,7 +234,7 @@ def evolve(size, num_generations, give_up_after, num_iterations, selection_propo
|
||||
offspring = crossover(population, crossover_rate, size - len(population))
|
||||
population += mutate(offspring, mutation_rate)
|
||||
|
||||
fitnesses = list_fitnesses(population, num_iterations)
|
||||
fitnesses = list_fitnesses(population, num_iterations, noise_level)
|
||||
generation_best = get_best(population, fitnesses, generation)
|
||||
|
||||
if (generation_best['fitness'] > current_best['fitness']):
|
||||
@ -300,9 +262,13 @@ if __name__ == "__main__":
|
||||
parser.add_argument("-c", "--crossover-rate", type=float, help="Probability of a selected pair of solutions to sexually reproduce", required=False, default=0.8)
|
||||
parser.add_argument("-m", "--mutation-rate", type=float, help="Probability of a selected offspring to undergo mutation", required=False, default=0.1)
|
||||
parser.add_argument("-o", "--output-file", type=str, help="File to write TSV results to", required=False, default="output.tsv")
|
||||
parser.add_argument("-n", "--noise-level", type=float, help="The probability that the opponent's last move will be misrepresented to the agent", required=False, default=0)
|
||||
args=parser.parse_args()
|
||||
|
||||
results = evolve(args.size, args.num_generations, args.give_up_after, args.num_iterations, args.selection_proportion, args.crossover_rate, args.mutation_rate)
|
||||
results = evolve(args.size, args.num_generations, args.give_up_after, args.num_iterations, args.selection_proportion, args.crossover_rate, args.mutation_rate, args.noise_level)
|
||||
|
||||
for strategy in strategies:
|
||||
print(str(strategy) + ": " + str(fitness(strategy, args.num_iterations, args.noise_level)))
|
||||
|
||||
if (args.output_file):
|
||||
with open(args.output_file, "w") as f:
|
||||
|
@ -78,7 +78,228 @@
|
||||
\medskip
|
||||
|
||||
\section{Part 1: Evolution Against Fixed Strategies}
|
||||
\section{Part 2: Extension}
|
||||
\subsection{Implementation}
|
||||
To implement the genetic algorithm for this assignment, I largely re-used the general framework I developed in the previous assignment, making appropriate changes and removing unnecessary features.
|
||||
The genetic algorithm can be tuned by providing command-line flags \& arguments, the possible options for which can be displayed by running the program with the \texttt{-h} flag, i.e.,
|
||||
\mintinline{bash}{python3 ipd.py -h}, which gives the following output:
|
||||
|
||||
\begin{code}
|
||||
\begin{minted}[linenos, breaklines, frame=single]{text}
|
||||
usage: ipd.py [-h] [-s SIZE] [-g NUM_GENERATIONS] [-a GIVE_UP_AFTER]
|
||||
[-i NUM_ITERATIONS] [-p SELECTION_PROPORTION]
|
||||
[-c CROSSOVER_RATE] [-m MUTATION_RATE] [-o OUTPUT_FILE]
|
||||
|
||||
Program to evolve strategies for the Iterated Prisoner's Dilemma
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-s, --size SIZE Initial population size
|
||||
-g, --num-generations NUM_GENERATIONS
|
||||
Number of generations
|
||||
-a, --give-up-after GIVE_UP_AFTER
|
||||
Number of generations to give up after if best
|
||||
solution has remained unchanged
|
||||
-i, --num-iterations NUM_ITERATIONS
|
||||
Number of iterations of the dilemma between two agents
|
||||
-p, --selection-proportion SELECTION_PROPORTION
|
||||
The proportion of the population to be selected
|
||||
(survive) on each generation
|
||||
-c, --crossover-rate CROSSOVER_RATE
|
||||
Probability of a selected pair of solutions to
|
||||
sexually reproduce
|
||||
-m, --mutation-rate MUTATION_RATE
|
||||
Probability of a selected offspring to undergo
|
||||
mutation
|
||||
-o, --output-file OUTPUT_FILE
|
||||
File to write TSV results to
|
||||
\end{minted}
|
||||
\caption{Output of \texttt{python3 ipd.py -h}}
|
||||
\end{code}
|
||||
|
||||
I chose to represent each strategy as a 3-bit string, where \verb|0| represents defection and \verb|1| represents co-operation;
|
||||
the first bit of the string represents the strategy's first move, the second bit represents the strategy's reaction to a defection by its opponent, and the third bit represents the strategy's reaction to a co-operation by its opponent.
|
||||
For that reason, there are only eight possible strategies in the search space:
|
||||
\begin{itemize}
|
||||
\item \verb|[0, 0, 0]|: always defect.
|
||||
\item \verb|[0, 0, 1]|: grim tit-for-tat.
|
||||
\item \verb|[0, 1, 0]|: defect at first, then do opposite of what opponent did last.
|
||||
\item \verb|[0, 1, 1]|: defect at first, then always co-operate.
|
||||
\item \verb|[1, 0, 0]|: feint co-operation, then always defect.
|
||||
\item \verb|[1, 0, 1]|: tit-for-tat.
|
||||
\item \verb|[1, 1, 0]|: co-operate at first, then do opposite of what opponent did last.
|
||||
\item \verb|[1, 1, 1]|: always co-operate.
|
||||
\end{itemize}
|
||||
|
||||
Because there are only 8 possibilities in the search space, and the recommended population size in the assignment specification was 50 -- 100, any random initialisation of the population was almost guaranteed to find not only the optimal solution, but every possible solution in the search space (assuming there is one optimal solution out of the eight, and that we randomly initialise 100 individuals, the chances of \textit{not} finding the optimal solution immediately are $\frac{7}{8}^{100} \approx 0.0000015878$).
|
||||
Therefore, I took the assignment not to focus on finding the optimal solution, but exploring how the population converges on the optimal solution for a given fixed fitness landscape.
|
||||
This small search space also meant that the population converges very quickly, and quickly sheds diversity in the population;
|
||||
in an attempt to to mitigate this, I set the mutation rate to be relatively high (0.1), and allowed the number of generations to be longer than necessary so as to observe the population dynamics over time.
|
||||
My crossover \& mutation operators were also relatively simple to reflect this small search space:
|
||||
I implemented one crossover operator, that being single-point crossover, and one mutation operator, that being a simple bit-flip mutation.
|
||||
|
||||
\subsection{Exploring Convergence with different Fitness Evaluations}
|
||||
\subsubsection{Equally-Proportioned Always Co-Operate, Always Defect, \& Tit-for-Tat}
|
||||
When the fitness function consisted of an always co-operate strategy, an always defect strategy, \& a tit-for-tat strategy, the best-performing evolved strategy was \verb|[0,1,0]|: defect on the first move, then do the opposite of what the opponent did last time.
|
||||
It achieved a fitness of 75, narrowly outperforming its more-polite sibling of \verb|[1,1,0]| (co-operate at first, then do the opposite of what the opponent did) and \verb|[0,0,0]| (always defect), both with fitnesses of 74.
|
||||
This surprised me at first, but it makes sense:
|
||||
the strategy of doing the opposite of what the opponent last did performs poorly against Always Defect, as it will lose each time, but exploits Always Co-Operate efficiently, allowing it to gain a high fitness score.
|
||||
It also does reasonably well against Tit-for-Tat: every second iteration, it will successfully exploit Tit-for-Tat, but lose every other iteration.
|
||||
The strategy of defecting then doing the opposite of the opponent out-performs its sibling co-operate then do the opposite because it doesn't miss the opportunity to exploit Tit-for-Tat \& Always Co-Operate on its first move.
|
||||
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/onefitness.png}
|
||||
\caption{ Fitness over generations}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/1strats.png}
|
||||
\caption{Diversity of the strategy population over generations}
|
||||
\end{figure}
|
||||
|
||||
\subsubsection{$2 \times $ Always Defect, $1 \times$ Always Co-Operate, \& $1 \times$ Tit-for-Tat}
|
||||
\begin{code}
|
||||
\begin{minted}[linenos, breaklines, frame=single]{text}
|
||||
Best strategy: [0, 0, 0]
|
||||
Fitness: 84
|
||||
Generation: 0
|
||||
[0, 0, 0]: 84
|
||||
[0, 0, 1]: 77
|
||||
[0, 1, 0]: 76
|
||||
[0, 1, 1]: 63
|
||||
[1, 0, 0]: 82
|
||||
[1, 0, 1]: 78
|
||||
[1, 1, 0]: 74
|
||||
[1, 1, 1]: 60
|
||||
\end{minted}
|
||||
\caption{Output of $2 \times $ always defect, $1 \times$ always co-operate, \& $1 \times$ tit-for-tat}
|
||||
\end{code}
|
||||
|
||||
When a second Always Defect is added to the mix, it becomes a better strategy to also always defect:
|
||||
while this means it will do poorly against Always Defect and Tit-for-Tat, it can efficiently \& ruthlessly exploit the Always Co-Operate to gain a high fitness regardless.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/defect_fitness.png}
|
||||
\caption{Fitness over generations}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/defect_strats.png}
|
||||
\caption{Diversity of the strategy population over generations}
|
||||
\end{figure}
|
||||
|
||||
\subsubsection{$1 \times $ Always Defect, $2 \times$ Always Co-Operate, \& $1 \times$ Tit-for-Tat}
|
||||
\begin{code}
|
||||
\begin{minted}[linenos, breaklines, frame=single]{text}
|
||||
Best strategy: [0, 1, 0]
|
||||
Fitness: 125
|
||||
Generation: 0
|
||||
[0, 0, 0]: 124
|
||||
[0, 0, 1]: 99
|
||||
[0, 1, 0]: 125
|
||||
[0, 1, 1]: 94
|
||||
[1, 0, 0]: 121
|
||||
[1, 0, 1]: 99
|
||||
[1, 1, 0]: 122
|
||||
[1, 1, 1]: 90
|
||||
\end{minted}
|
||||
\caption{$1 \times $ always defect, $2 \times$ always co-operate, \& $1 \times$ tit-for-tat}
|
||||
\end{code}
|
||||
|
||||
When a second Always Co-Operate is added to the mix, the best strategy reverts to being defect on the first move, then do the opposite of what the opponent did, for the same reasons as previously explored.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/cop_fitness.png}
|
||||
\caption{Fitness over generations}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/cop_strats.png}
|
||||
\caption{Diversity of the strategy population over generations}
|
||||
\end{figure}
|
||||
|
||||
\subsubsection{$1 \times $ Always Defect, $1 \times$ Always Co-Operate, \& $3 \times$ Tit-for-Tat}
|
||||
\begin{code}
|
||||
\begin{minted}[linenos, breaklines, frame=single]{text}
|
||||
Best strategy: [1, 0, 1]
|
||||
Fitness: 129
|
||||
Generation: 0
|
||||
[0, 0, 0]: 102
|
||||
[0, 0, 1]: 117
|
||||
[0, 1, 0]: 123
|
||||
[0, 1, 1]: 120
|
||||
[1, 0, 0]: 105
|
||||
[1, 0, 1]: 129
|
||||
[1, 1, 0]: 126
|
||||
[1, 1, 1]: 120
|
||||
\end{minted}
|
||||
\caption{ $1 \times $ always defect, $1 \times$ always co-operate, \& $3 \times$ tit-for-tat} \end{code}
|
||||
\end{code}
|
||||
|
||||
When three Tit-for-Tats are added into the mix, we see Tit-for-Tat emerge as the dominant strategy, which is to be expected;
|
||||
it achieves a steady middle-ground approach via co-operation, and it has enough fellow co-operators to make up for the defection it suffers.
|
||||
The defector strategies get punished frequently enough to reduce their winnings from defection, allowing co-operation to become a dominant \& winning strategy.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/tit_fitness.png}
|
||||
\caption{Fitness over generations}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{./images/tit_strats.png}
|
||||
\caption{Diversity of the strategy population over generations}
|
||||
\end{figure}
|
||||
|
||||
\section{Part 2: Extension}
|
||||
To extend the genetic algorithm implementation, I chose to add a variable level of noise to the program, supplied via command-line argument \mintinline{shell}{-n, --noise-level NOISE_LEVEL}.
|
||||
I then re-ran the same experiments as before with varying noise levels.
|
||||
|
||||
\subsection{\mintinline{shell}{NOISE_LEVEL = 0.1}}
|
||||
The results that I got for each of the previously-attempted experiments when I set the noise level to 0.1 were as follows:
|
||||
\begin{itemize}
|
||||
\item \textbf{Equally proportioned:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always defect:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always co-operate:} \verb|[0,0,0]|.
|
||||
\item \textbf{$3 \times$ tit-for-tat:} \verb|[1,0,1]|.
|
||||
\end{itemize}
|
||||
|
||||
\subsection{\mintinline{shell}{NOISE_LEVEL = 0.2}}
|
||||
The results that I got for each of the previously-attempted experiments when I set the noise level to 0.2 were as follows:
|
||||
\begin{itemize}
|
||||
\item \textbf{Equally proportioned:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always defect:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always co-operate:} \verb|[0,0,0]|.
|
||||
\item \textbf{$3 \times$ tit-for-tat:} \verb|[0,0,0]|.
|
||||
\end{itemize}
|
||||
|
||||
\subsection{\mintinline{shell}{NOISE_LEVEL = 0.5}}
|
||||
The results that I got for each of the previously-attempted experiments when I set the noise level to 0.5 were as follows:
|
||||
\begin{itemize}
|
||||
\item \textbf{Equally proportioned:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always defect:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always co-operate:} \verb|[0,0,0]|.
|
||||
\item \textbf{$3 \times$ tit-for-tat:} \verb|[0,0,0]|.
|
||||
\end{itemize}
|
||||
\subsection{\mintinline{shell}{NOISE_LEVEL = 0.8}}
|
||||
The results that I got for each of the previously-attempted experiments when I set the noise level to 0.8 were as follows:
|
||||
\begin{itemize}
|
||||
\item \textbf{Equally proportioned:} \verb|[0, 0, 0]|.
|
||||
\item \textbf{$2 \times$ always defect:} \verb|[0,0,0]|.
|
||||
\item \textbf{$2 \times$ always co-operate:} \verb|[0,0,0]|.
|
||||
\item \textbf{$3 \times$ tit-for-tat:} \verb|[0,0,0]|.
|
||||
\end{itemize}
|
||||
|
||||
As can be seen from the above outputs, the introduction of even just a little noise to each evolution immediately broke any possibility for co-operation, the one exception being that tit-for-tat still performed well against three other tit-for-tats at the lowest noise level, most likely because it got some co-operation in before noise disrupted the chain of co-operation.
|
||||
Noise makes co-operation more difficult, and is highly detrimental to these simple strategies defined by short bitstrings:
|
||||
these genomes don't have the necessary complexity to express a level of forgiveness, so one one bit of noise can destroy all chances of co-operation for the rest of the game.
|
||||
For a noisy environment, error-tolerant strategies are required, like generous tit-for-tat, which can avoid falling into the defection loops that overly rigid and/or grudging strategies fall into.
|
||||
|
||||
\end{document}
|
||||
|
After Width: | Height: | Size: 149 KiB |
After Width: | Height: | Size: 63 KiB |
After Width: | Height: | Size: 133 KiB |
After Width: | Height: | Size: 53 KiB |
After Width: | Height: | Size: 137 KiB |
After Width: | Height: | Size: 65 KiB |
After Width: | Height: | Size: 64 KiB |
After Width: | Height: | Size: 131 KiB |