Skip to content

Simulated Agents

Agent-based models for simulating navigation behavior in the labyrinth.

Overview

This module provides computational models that simulate different navigation strategies:

  • Basic simulated agents with configurable parameters
  • Explore-exploit agents that balance exploration and exploitation
  • Multi-agent simulations for comparative analysis

Simulated Agent

compass_labyrinth.behavior.behavior_metrics.simulation_modeling.simulated_agent

SIMULATED AGENT MODELING AND ANALYSIS Author: Shreya Bangera Goal: ├── Simulated Agent Modeling & Visualisation ├── Chi Square Analysis, Visualisation

get_valid_and_optimal_transitions

get_valid_and_optimal_transitions(
    df: DataFrame,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
) -> tuple[dict, dict]

Extract valid and optimal transitions per session.

Parameters:

  • df (DataFrame) –

    DataFrame containing navigation data.

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label for decision points.

  • reward_label (str, default: 'reward_path' ) –

    Label for reward path.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def get_valid_and_optimal_transitions(
    df: pd.DataFrame,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
) -> tuple[dict, dict]:
    """
    Extract valid and optimal transitions per session.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing navigation data.
    decision_label : str
        Label for decision points.
    reward_label : str
        Label for reward path.
    """
    valid_transitions, optimal_transitions = {}, {}

    for session, group in df.groupby("Session"):
        valid, optimal = {}, {}

        for i in range(len(group) - 1):
            if group.iloc[i]["NodeType"] == decision_label:
                current = group.iloc[i]["Grid Number"]
                nxt = group.iloc[i + 1]["Grid Number"]
                region = group.iloc[i + 1]["Region"]

                valid.setdefault(current, set()).add(nxt)
                if region == reward_label:
                    optimal.setdefault(current, set()).add(nxt)

        valid_transitions[session] = valid
        optimal_transitions[session] = optimal

    return valid_transitions, optimal_transitions

simulate_agent_vs_actual

simulate_agent_vs_actual(
    df_slice: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
) -> tuple[list, list]

Simulate random agent transitions and compare with actual.

Parameters:

  • df_slice (DataFrame) –

    DataFrame segment for the epoch.

  • valid_dict (dict) –

    Valid transitions for the session.

  • optimal_dict (dict) –

    Optimal transitions for the session.

  • n_simulations (int) –

    Number of random simulations per decision point.

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label for decision points.

Returns:

  • tuple of lists

    Lists of actual and simulated optimal transitions (1 for optimal, 0 otherwise).

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def simulate_agent_vs_actual(
    df_slice: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
) -> tuple[list, list]:
    """
    Simulate random agent transitions and compare with actual.

    Parameters
    -----------
    df_slice : pd.DataFrame
        DataFrame segment for the epoch.
    valid_dict : dict
        Valid transitions for the session.
    optimal_dict : dict
        Optimal transitions for the session.
    n_simulations : int
        Number of random simulations per decision point.
    decision_label : str
        Label for decision points.

    Returns
    --------
    tuple of lists
        Lists of actual and simulated optimal transitions (1 for optimal, 0 otherwise).
    """
    actual, simulated = [], []

    for i in range(len(df_slice) - 1):
        if df_slice.iloc[i]["NodeType"] == decision_label:
            current = df_slice.iloc[i]["Grid Number"]
            actual_next = df_slice.iloc[i + 1]["Grid Number"]

            is_actual_optimal = actual_next in optimal_dict.get(current, set())
            actual.append(1 if is_actual_optimal else 0)

            rand_results = []
            for _ in range(n_simulations):
                if current in valid_dict:
                    rand_choice = random.choice(list(valid_dict[current]))
                    is_rand_optimal = rand_choice in optimal_dict.get(current, set())
                    rand_results.append(1 if is_rand_optimal else 0)
            simulated.append(np.mean(rand_results))

    return actual, simulated

bootstrap_distribution

bootstrap_distribution(
    data: list, n_samples: int = 10000
) -> np.ndarray

Generate bootstrap sample means.

Parameters:

  • data (list) –

    Data points.

  • n_samples (int, default: 10000 ) –

    Number of bootstrap samples.

Returns:

  • ndarray

    Array of bootstrap sample means.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def bootstrap_distribution(
    data: list,
    n_samples: int = 10000,
) -> np.ndarray:
    """
    Generate bootstrap sample means.

    Parameters
    -----------
    data : list
        Data points.
    n_samples : int
        Number of bootstrap samples.

    Returns
    --------
    np.ndarray
        Array of bootstrap sample means.
    """
    samples = np.random.choice(data, (n_samples, len(data)), replace=True)
    return np.mean(samples, axis=1)

compute_epoch_metrics

compute_epoch_metrics(
    df_slice: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
) -> pd.Series

Compute performance metrics for a single epoch of navigation.

Parameters:

  • df_slice (DataFrame) –

    DataFrame segment for the epoch.

  • valid_dict (dict) –

    Valid transitions for the session.

  • optimal_dict (dict) –

    Optimal transitions for the session.

  • n_bootstrap (int) –

    Number of bootstrap samples.

  • n_simulations (int) –

    Number of random simulations per decision point.

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label for decision points.

Returns:

  • Series

    Series with computed metrics.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def compute_epoch_metrics(
    df_slice: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
) -> pd.Series:
    """
    Compute performance metrics for a single epoch of navigation.

    Parameters
    -----------
    df_slice : pd.DataFrame
        DataFrame segment for the epoch.
    valid_dict : dict
        Valid transitions for the session.
    optimal_dict : dict
        Optimal transitions for the session.
    n_bootstrap : int
        Number of bootstrap samples.
    n_simulations : int
        Number of random simulations per decision point.
    decision_label : str
        Label for decision points.

    Returns
    --------
    pd.Series
        Series with computed metrics.
    """
    if df_slice.empty or decision_label not in df_slice["NodeType"].values:
        return pd.Series(
            {
                k: np.nan
                for k in [
                    "Actual Reward Path %",
                    "Simulated Agent Reward Path %",
                    "Actual Reward Path % CI Lower",
                    "Actual Reward Path % CI Upper",
                    "Simulated Agent Reward Path % CI Lower",
                    "Simulated Agent Reward Path % CI Upper",
                    "Relative Performance",
                ]
            }
        )

    actual, simulated = simulate_agent_vs_actual(df_slice, valid_dict, optimal_dict, n_simulations, decision_label)

    if not actual or not simulated:
        return pd.Series(
            {
                k: np.nan
                for k in [
                    "Actual Reward Path %",
                    "Simulated Agent Reward Path %",
                    "Actual Reward Path % CI Lower",
                    "Actual Reward Path % CI Upper",
                    "Simulated Agent Reward Path % CI Lower",
                    "Simulated Agent Reward Path % CI Upper",
                    "Relative Performance",
                ]
            }
        )

    actual_dist = bootstrap_distribution(actual, n_bootstrap)
    simulated_dist = bootstrap_distribution(simulated, n_bootstrap)

    return pd.Series(
        {
            "Actual Reward Path %": np.mean(actual_dist),
            "Simulated Agent Reward Path %": np.mean(simulated_dist),
            "Actual Reward Path % CI Lower": np.percentile(actual_dist, 5),
            "Actual Reward Path % CI Upper": np.percentile(actual_dist, 95),
            "Simulated Agent Reward Path % CI Lower": np.percentile(simulated_dist, 5),
            "Simulated Agent Reward Path % CI Upper": np.percentile(simulated_dist, 95),
            "Relative Performance": (
                np.mean(actual_dist) / np.mean(simulated_dist) if np.mean(simulated_dist) > 0 else np.nan
            ),
        }
    )

segment_data_by_epoch

segment_data_by_epoch(
    df: DataFrame, epoch_size: int
) -> list

Split DataFrame by genotype and session into sequential time-based epochs.

Parameters:

  • df (DataFrame) –

    DataFrame containing navigation data.

  • epoch_size (int) –

    Number of rows per epoch.

Returns:

  • list of tuples

    Each tuple contains (session, epoch_number, epoch_dataframe).

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def segment_data_by_epoch(
    df: pd.DataFrame,
    epoch_size: int,
) -> list:
    """
    Split DataFrame by genotype and session into sequential time-based epochs.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing navigation data.
    epoch_size : int
        Number of rows per epoch.

    Returns
    --------
    list of tuples
        Each tuple contains (session, epoch_number, epoch_dataframe).
    """
    epochs = []
    for (genotype, session), group in df.groupby(["Genotype", "Session"]):
        for i in range(0, len(group), epoch_size):
            segment = group.iloc[i : i + epoch_size]
            if not segment.empty:
                epochs.append((session, i // epoch_size + 1, segment))
    return epochs

trim_to_common_epochs

trim_to_common_epochs(
    df_results: DataFrame,
) -> pd.DataFrame

Trims the results dataframe to retain only the maximum number of epochs common across all sessions.

Parameters:

  • df_results (DataFrame) –

    The output of evaluate_agent_performance. - 'Session' (str): Column name indicating sessions. - 'Epoch_Number' (str): Column name indicating epoch/bin number.

Returns:

  • DataFrame

    Trimmed dataframe with only common epochs.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def trim_to_common_epochs(df_results: pd.DataFrame) -> pd.DataFrame:
    """
    Trims the results dataframe to retain only the maximum number of epochs common across all sessions.

    Parameters
    -----------
    df_results : pd.DataFrame
        The output of evaluate_agent_performance.
            - 'Session' (str): Column name indicating sessions.
            - 'Epoch_Number' (str): Column name indicating epoch/bin number.

    Returns
    --------
    pd.DataFrame
        Trimmed dataframe with only common epochs.
    """
    df_trimmed = df_results.copy()

    # Ensure correct dtypes
    df_trimmed["Session"] = df_trimmed["Session"].astype(int)
    df_trimmed["Epoch Number"] = df_trimmed["Epoch Number"].astype(int)

    # Find common epochs across all sessions
    epoch_sets = df_trimmed.groupby("Session")["Epoch Number"].apply(set)
    common_epochs = set.intersection(*epoch_sets)

    if not common_epochs:
        print("Warning: No common epochs across sessions. Returning original dataframe.")
        return df_trimmed

    max_common_epoch = max(common_epochs)
    print(f" Max common epoch across all sessions: {max_common_epoch}")

    # Filter
    df_trimmed = df_trimmed[df_trimmed["Epoch Number"] <= max_common_epoch].reset_index(drop=True)
    return df_trimmed

evaluate_agent_performance

evaluate_agent_performance(
    df: DataFrame,
    epoch_size: int,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    genotype: str | None = None,
    trim: bool = True,
) -> pd.DataFrame

Run full evaluation pipeline for simulated agent vs. actual mouse.

Parameters:

  • df (DataFrame) –

    DataFrame containing navigation data.

  • epoch_size (int) –

    Number of rows per epoch.

  • n_bootstrap (int) –

    Number of bootstrap samples.

  • n_simulations (int) –

    Number of random simulations per decision point.

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label for decision points.

  • reward_label (str, default: 'reward_path' ) –

    Label for reward path.

  • genotype (str | None, default: None ) –

    Genotype to filter data.

  • trim (bool, default: True ) –

    Whether to trim to common epochs across sessions.

Returns:

  • DataFrame

    DataFrame with performance metrics per epoch.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def evaluate_agent_performance(
    df: pd.DataFrame,
    epoch_size: int,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    genotype: str | None = None,
    trim: bool = True,
) -> pd.DataFrame:
    """
    Run full evaluation pipeline for simulated agent vs. actual mouse.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing navigation data.
    epoch_size : int
        Number of rows per epoch.
    n_bootstrap : int
        Number of bootstrap samples.
    n_simulations : int
        Number of random simulations per decision point.
    decision_label : str
        Label for decision points.
    reward_label : str
        Label for reward path.
    genotype : str | None
        Genotype to filter data.
    trim : bool
        Whether to trim to common epochs across sessions.

    Returns
    --------
    pd.DataFrame
        DataFrame with performance metrics per epoch.
    """
    df = df.copy()

    # Filter by genotype if specified
    if genotype is not None:
        if genotype not in df["Genotype"].unique():
            raise ValueError(f"Genotype '{genotype}' not found in DataFrame.")
        genotypes = [genotype]
    else:
        genotypes = df["Genotype"].unique()

    results = dict()
    for i, genotype in enumerate(genotypes):
        df_genotype = df.loc[df["Genotype"] == genotype]

        valid_dict, optimal_dict = get_valid_and_optimal_transitions(df_genotype, decision_label, reward_label)
        epochs = segment_data_by_epoch(df_genotype, epoch_size)

        all_results = []
        for session, epoch_num, segment in epochs:
            valid = valid_dict.get(session, {})
            optimal = optimal_dict.get(session, {})
            result = compute_epoch_metrics(segment, valid, optimal, n_bootstrap, n_simulations, decision_label)
            result["Session"] = session
            result["Epoch Number"] = epoch_num
            all_results.append(result)

        if trim:
            df_results = pd.DataFrame(all_results)
            df_results = trim_to_common_epochs(df_results)
        else:
            df_results = pd.DataFrame(all_results)

        results[genotype] = df_results

    return results

plot_agent_transition_performance

plot_agent_transition_performance(
    config: dict,
    evaluation_results: dict,
    genotype: str | None = None,
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plot performance comparison between actual mouse and simulated agent over time.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings.

  • evaluation_results (dict) –

    Dictionary with evaluation results for each genotype.

  • genotype (str | None, default: None ) –

    Specific genotype to plot. If None, plots all genotypes.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_agent_transition_performance(
    config: dict,
    evaluation_results: dict,
    genotype: str | None = None,
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plot performance comparison between actual mouse and simulated agent over time.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings.
    evaluation_results : dict
        Dictionary with evaluation results for each genotype.
    genotype : str | None
        Specific genotype to plot. If None, plots all genotypes.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    if genotype is not None:
        if genotype not in evaluation_results:
            raise ValueError(f"Genotype '{genotype}' not found in evaluation results.")
        genotypes = [genotype]
    else:
        genotypes = evaluation_results.keys()
    n_genotypes = len(genotypes)

    n_cols = math.ceil(np.sqrt(n_genotypes))
    n_rows = math.ceil(n_genotypes / n_cols)

    fig, axes = plt.subplots(n_rows, n_cols, figsize=(6 * n_cols, 5 * n_rows), squeeze=False)
    axes = axes.flatten()

    for i, genotype in enumerate(genotypes):
        ax = axes[i]
        df_result = evaluation_results[genotype]

        sns.lineplot(
            data=df_result,
            x="Epoch Number",
            y="Actual Reward Path %",
            marker="o",
            label="Mouse",
            color="black",
            ax=ax,
        )
        sns.lineplot(
            data=df_result,
            x="Epoch Number",
            y="Simulated Agent Reward Path %",
            linestyle="dashed",
            label="Simulated Agent",
            color="navy",
            ax=ax,
        )

        ax.set_title(f"{genotype}: Mouse vs. Agent")
        ax.set_xlabel("Epochs (in Maze)")
        ax.set_ylabel("Reward Path Transition %")
        ax.grid(True)
        ax.legend()

    # Hide unused axes
    for j in range(len(genotypes), len(axes)):
        fig.delaxes(axes[j])

    fig.suptitle("Mouse vs. Simulated Agent: Reward Path Transition Proportion", fontsize=16)
    plt.tight_layout(rect=[0, 0, 1, 0.97])

    # Save figure
    fig = plt.gcf()
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "all_genotypes_sim_agent_mouse_perf.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

plot_relative_agent_performance

plot_relative_agent_performance(
    config: dict,
    evaluation_results: dict,
    genotype: str | None = None,
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plot relative performance of mouse vs simulated agent over time.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings.

  • evaluation_results (dict) –

    Dictionary with evaluation results for each genotype.

  • genotype (str | None, default: None ) –

    Specific genotype to plot. If None, plots all genotypes.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_relative_agent_performance(
    config: dict,
    evaluation_results: dict,
    genotype: str | None = None,
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plot relative performance of mouse vs simulated agent over time.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings.
    evaluation_results : dict
        Dictionary with evaluation results for each genotype.
    genotype : str | None
        Specific genotype to plot. If None, plots all genotypes.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    if genotype is not None:
        if genotype not in evaluation_results:
            raise ValueError(f"Genotype '{genotype}' not found in evaluation results.")
        genotypes = [genotype]
    else:
        genotypes = evaluation_results.keys()
    n_genotypes = len(genotypes)

    n_cols = 1
    n_rows = n_genotypes

    fig, axes = plt.subplots(n_rows, n_cols, figsize=(12, 5 * n_rows), squeeze=False)
    axes = axes.flatten()

    for i, genotype in enumerate(genotypes):
        ax = axes[i]
        df_result = evaluation_results[genotype]
        sns.lineplot(
            data=df_result,
            x="Epoch Number",
            y="Relative Performance",
            marker="o",
            color="black",
            ax=ax,
        )
        ax.axhline(
            y=1,
            color="black",
            linestyle="dashed",
            label="Simulated Agent Baseline",
        )

        ax.set_xlabel("Epochs (in Maze)")
        ax.set_ylabel("Relative Performance (Mouse / Simulated)")
        ax.set_title(f"{genotype}: Mouse vs. Simulated Agent - Relative Performance Over Time")
        ax.legend(["Relative Performance", "Simulated Agent Baseline"])
        ax.grid(True)
        plt.tight_layout()

    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "all_genotypes_relative_perf.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

plot_agent_performance_boxplot

plot_agent_performance_boxplot(
    df_long: DataFrame,
    p_value: float,
    palette: None | list = None,
) -> None

Plot boxplot comparing actual vs simulated agent with p-value annotation.

Parameters:

  • df_long (DataFrame) –

    Long-form DataFrame.

  • p_value (float) –

    P-value from mixed model.

  • palette (list or None, default: None ) –

    Color palette for the boxplot.

Returns:

  • None
Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_agent_performance_boxplot(df_long: pd.DataFrame, p_value: float, palette: None | list = None) -> None:
    """
    Plot boxplot comparing actual vs simulated agent with p-value annotation.

    Parameters
    -----------
    df_long : pd.DataFrame
        Long-form DataFrame.
    p_value : float
        P-value from mixed model.
    palette : list or None
        Color palette for the boxplot.

    Returns
    --------
    None
    """
    plt.figure(figsize=(6, 6))
    sns.boxplot(x="AgentType", y="Performance", data=df_long, palette=palette, showfliers=False)

    plt.title(f"Performance: Mouse vs. Simulated Agent (across sessions)\n LMM p-value = {p_value:.4f}", fontsize=13)
    plt.xlabel("Agent Type", fontsize=11)
    plt.ylabel("Proportion of Optimal Transitions", fontsize=11)
    plt.xticks(ticks=[0, 1], labels=["Mouse", "Simulated Agent"], fontsize=10)
    plt.tight_layout()

reshape_for_mixedlm

reshape_for_mixedlm(df_results: DataFrame) -> pd.DataFrame

Reshape the dataframe to long format for mixed-effects modeling.

Parameters:

  • df_results (DataFrame) –

    DataFrame with columns 'Actual Reward Path %', 'Simulated Agent Reward Path %', 'Session', 'Epoch Number' and 'Genotype'.

Returns:

  • DataFrame

    Long-form DataFrame suitable for mixedlm.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def reshape_for_mixedlm(df_results: pd.DataFrame) -> pd.DataFrame:
    """
    Reshape the dataframe to long format for mixed-effects modeling.

    Parameters
    -----------
    df_results : pd.DataFrame
        DataFrame with columns 'Actual Reward Path %', 'Simulated Agent Reward Path %',
        'Session', 'Epoch Number' and 'Genotype'.

    Returns
    --------
    pd.DataFrame
        Long-form DataFrame suitable for mixedlm.
    """
    df_long = pd.melt(
        df_results,
        id_vars=["Session", "Epoch Number", "Genotype"],
        value_vars=["Actual Reward Path %", "Simulated Agent Reward Path %"],
        var_name="AgentType",
        value_name="Performance",
    )
    df_long = df_long.dropna(subset=["Performance"])
    df_long["Session"] = df_long["Session"].astype(str)
    return df_long.reset_index(drop=True)

fit_mixed_effects_model

fit_mixed_effects_model(df_long: DataFrame) -> tuple

Fit a linear mixed-effects model comparing agent types.

Parameters:

  • df_long (DataFrame) –

    Long-form DataFrame.

Returns:

  • tuple

    Tuple with result (Fitted model object) and p_value (P-value for AgentType effect).

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def fit_mixed_effects_model(df_long: pd.DataFrame) -> tuple:
    """
    Fit a linear mixed-effects model comparing agent types.

    Parameters
    -----------
    df_long : pd.DataFrame
        Long-form DataFrame.

    Returns
    --------
    tuple
        Tuple with result (Fitted model object) and p_value (P-value for AgentType effect).
    """
    model = mixedlm("Performance ~ AgentType", df_long, groups=df_long["Session"])
    result = model.fit()
    coef_key = [key for key in result.pvalues.keys() if "Simulated Agent" in key]
    p_value = result.pvalues.get(coef_key[0], np.nan) if coef_key else np.nan
    return result, p_value

plot_agent_performance_boxplot_ax

plot_agent_performance_boxplot_ax(
    ax: Axes,
    df_long: DataFrame,
    p_value: float,
    palette: list | None = None,
    genotype: str | None = None,
) -> None

Plot a boxplot of agent performance.

Parameters:

  • ax (Axes) –

    Matplotlib Axes object to plot on.

  • df_long (DataFrame) –

    Long-form DataFrame.

  • p_value (float) –

    P-value from mixed model.

  • palette (list or None, default: None ) –

    Color palette for the boxplot.

  • genotype (str or None, default: None ) –

    Genotype name for the title.

Returns:

  • None
Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_agent_performance_boxplot_ax(
    ax: plt.Axes,
    df_long: pd.DataFrame,
    p_value: float,
    palette: list | None = None,
    genotype: str | None = None,
) -> None:
    """
    Plot a boxplot of agent performance.

    Parameters
    -----------
    ax : plt.Axes
        Matplotlib Axes object to plot on.
    df_long : pd.DataFrame
        Long-form DataFrame.
    p_value : float
        P-value from mixed model.
    palette : list or None
        Color palette for the boxplot.
    genotype : str or None
        Genotype name for the title.

    Returns
    --------
    None
    """
    sns.boxplot(x="AgentType", y="Performance", data=df_long, palette=palette, showfliers=False, ax=ax)
    title = f"Mouse vs. Agent Performance\n{genotype} | LMM p = {p_value:.4f}"
    ax.set_title(title, fontsize=11)
    ax.set_xlabel("Agent Type", fontsize=10)
    ax.set_ylabel("Proportion Optimal", fontsize=10)

run_mixedlm_for_all_genotypes

run_mixedlm_for_all_genotypes(
    config: dict,
    evaluation_results: dict,
    plot_palette=None,
    save_fig: bool = True,
    show_fig: bool = True,
) -> dict

Run mixed-effects modeling and plot results for all genotypes.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings..

  • evaluation_results (dict) –

    Dictionary with evaluation results for each genotype.

  • plot_palette (list or None, default: None ) –

    Color palette for the boxplots.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

Returns:

  • dict

    Dictionary with p-values for each genotype.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def run_mixedlm_for_all_genotypes(
    config: dict,
    evaluation_results: dict,
    plot_palette=None,
    save_fig: bool = True,
    show_fig: bool = True,
) -> dict:
    """
    Run mixed-effects modeling and plot results for all genotypes.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings..
    evaluation_results : dict
        Dictionary with evaluation results for each genotype.
    plot_palette : list or None
        Color palette for the boxplots.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.

    Returns
    --------
    dict
        Dictionary with p-values for each genotype.
    """
    genotype_pvals = {}
    all_dfs_long = []

    genotypes = evaluation_results.keys()
    n_genotypes = len(genotypes)

    n_cols = math.ceil(n_genotypes**0.5)
    n_rows = math.ceil(n_genotypes / n_cols)
    fig, axs = plt.subplots(n_rows, n_cols, figsize=(5 * n_cols, 5 * n_rows))

    # Safe handling: ensure axs is always iterable
    if isinstance(axs, np.ndarray):
        axs = axs.flatten()
    else:
        axs = [axs]

    for i, genotype in enumerate(genotypes):
        df_eval = evaluation_results[genotype]
        df_eval["Genotype"] = genotype

        df_long = reshape_for_mixedlm(df_eval)
        result, p_val = fit_mixed_effects_model(df_long)
        genotype_pvals[genotype] = p_val

        plot_agent_performance_boxplot_ax(axs[i], df_long, p_val, palette=plot_palette, genotype=genotype)
        all_dfs_long.append(df_long)

    # Hide unused axes
    for j in range(n_genotypes, len(axs)):
        fig.delaxes(axs[j])

    plt.tight_layout()
    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "cumulative_sim_agent_mouse_perf.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    return genotype_pvals

compute_chi_square_statistic

compute_chi_square_statistic(df: DataFrame) -> pd.DataFrame

Compute the chi-square statistic between actual and simulated reward path usage for each row in the DataFrame. Also ensures 'Epoch Number' and 'Session' are integers.

Parameters:

  • df (DataFrame) –

    DataFrame with columns 'Actual Reward Path %' and 'Simulated Agent Reward Path %'.

Returns:

  • DataFrame

    Updated DataFrame with 'Chi Square Statistic' and cleaned column types.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def compute_chi_square_statistic(df: pd.DataFrame) -> pd.DataFrame:
    """
    Compute the chi-square statistic between actual and simulated reward path usage
    for each row in the DataFrame. Also ensures 'Epoch Number' and 'Session' are integers.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame with columns 'Actual Reward Path %' and 'Simulated Agent Reward Path %'.

    Returns
    --------
    pd.DataFrame
        Updated DataFrame with 'Chi Square Statistic' and cleaned column types.
    """
    df = df.copy()
    chi_square = ((df["Actual Reward Path %"] - df["Simulated Agent Reward Path %"]) ** 2) / df[
        "Simulated Agent Reward Path %"
    ]
    df["Chi Square Statistic"] = chi_square
    # Ensure consistent types
    if "Epoch Number" in df.columns:
        df["Epoch Number"] = df["Epoch Number"].astype(int)
    if "Session" in df.columns:
        df["Session"] = df["Session"].astype(int)
    return df

compute_rolling_chi_square

compute_rolling_chi_square(
    df: DataFrame, window: int = 3
) -> pd.DataFrame

Compute rolling average of chi-square statistic within each session.

Patameters:

df : pd.DataFrame DataFrame with 'Chi Square Statistic' column. window : int Window size for rolling average.

Returns:

  • DataFrame

    Updated DataFrame with 'Rolling Chi Square' column.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def compute_rolling_chi_square(df: pd.DataFrame, window: int = 3) -> pd.DataFrame:
    """
    Compute rolling average of chi-square statistic within each session.

    Patameters:
    -----------
    df : pd.DataFrame
        DataFrame with 'Chi Square Statistic' column.
    window : int
        Window size for rolling average.

    Returns
    --------
    pd.DataFrame
        Updated DataFrame with 'Rolling Chi Square' column.
    """
    df = df.copy()
    df["Rolling Chi Square"] = df.groupby("Session")["Chi Square Statistic"].transform(
        lambda x: x.rolling(window=window, min_periods=1).mean()
    )
    return df

compute_cumulative_chi_square

compute_cumulative_chi_square(
    df: DataFrame,
) -> pd.DataFrame

Compute cumulative sum of chi-square statistic within each session.

Parameters:

  • df (DataFrame) –

    DataFrame with 'Chi Square Statistic' column.

Returns:

  • DataFrame

    Updated DataFrame with 'Cumulative Chi Square' column.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def compute_cumulative_chi_square(df: pd.DataFrame) -> pd.DataFrame:
    """
    Compute cumulative sum of chi-square statistic within each session.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame with 'Chi Square Statistic' column.

    Returns
    --------
    pd.DataFrame
        Updated DataFrame with 'Cumulative Chi Square' column.
    """
    df = df.copy()
    df["Cumulative Chi Square"] = df.groupby("Session")["Chi Square Statistic"].cumsum()
    return df

run_chi_square_analysis

run_chi_square_analysis(
    config: dict,
    evaluation_results: dict,
    rolling_window: int = 3,
) -> dict

Run chi-square analysis for each genotype in the evaluation results.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def run_chi_square_analysis(
    config: dict,
    evaluation_results: dict,
    rolling_window: int = 3,
) -> dict:
    """
    Run chi-square analysis for each genotype in the evaluation results.
    """
    genotypes = evaluation_results.keys()
    results = dict()
    for genotype in genotypes:
        df_result = evaluation_results[genotype].copy()
        df_result["Genotype"] = genotype
        df_chisq = compute_chi_square_statistic(df=df_result)
        df_chisq = compute_rolling_chi_square(df=df_chisq, window=rolling_window)
        df_chisq = compute_cumulative_chi_square(df=df_chisq)
        results[genotype] = df_chisq
    return results

plot_chi_square_and_rolling

plot_chi_square_and_rolling(
    config: dict,
    chisquare_results: dict,
    epoch_col: str = "Epoch Number",
    chi_col: str = "Chi Square Statistic",
    rolling_col: str = "Rolling Chi Square",
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plot chi-square and rolling statistics for each genotype.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings..

  • chisquare_results (dict) –

    Chi-square results dictionary.

  • epoch_col (str, default: 'Epoch Number' ) –

    Column name for epochs.

  • chi_col (str, default: 'Chi Square Statistic' ) –

    Column name for chi-square statistic.

  • rolling_col (str, default: 'Rolling Chi Square' ) –

    Column name for rolling chi-square.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_chi_square_and_rolling(
    config: dict,
    chisquare_results: dict,
    epoch_col: str = "Epoch Number",
    chi_col: str = "Chi Square Statistic",
    rolling_col: str = "Rolling Chi Square",
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plot chi-square and rolling statistics for each genotype.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings..
    chisquare_results : dict
        Chi-square results dictionary.
    epoch_col : str
        Column name for epochs.
    chi_col : str
        Column name for chi-square statistic.
    rolling_col : str
        Column name for rolling chi-square.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    genotypes = chisquare_results.keys()
    n_genotypes = len(genotypes)
    n_cols = 1
    n_rows = n_genotypes

    fig, axes = plt.subplots(n_rows, n_cols, figsize=(12, 5 * n_rows), squeeze=False)
    axes = axes.flatten()

    for i, genotype in enumerate(genotypes):
        ax = axes[i]
        df_geno = chisquare_results[genotype]

        sns.barplot(
            data=df_geno,
            x=epoch_col,
            y=chi_col,
            errorbar="se",
            palette="viridis",
            ax=ax,
        )
        sns.lineplot(
            data=df_geno,
            x=epoch_col,
            y=rolling_col,
            color="black",
            lw=2,
            ax=ax,
        )

        ax.set_title(f"{genotype}: Chi-Square & Rolling")
        ax.set_xlabel("Epochs")
        ax.set_ylabel("Chi-Square")

    # Hide unused subplots
    for j in range(n_genotypes, len(axes)):
        fig.delaxes(axes[j])

    fig.suptitle("Chi-Square Statistic + Rolling Average by Genotype", fontsize=16)
    plt.tight_layout(rect=[0, 0, 1, 0.97])

    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "all_genotypes_chi_square_rolling.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

plot_rolling_mean

plot_rolling_mean(
    config: dict,
    chisquare_results: dict,
    epoch_col: str = "Epoch Number",
    rolling_col: str = "Rolling Chi Square",
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plot rolling chi-square statistics for each genotype.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings..

  • chisquare_results (dict) –

    Chi-square results dictionary.

  • epoch_col (str, default: 'Epoch Number' ) –

    Column name for epochs.

  • rolling_col (str, default: 'Rolling Chi Square' ) –

    Column name for rolling chi-square.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_rolling_mean(
    config: dict,
    chisquare_results: dict,
    epoch_col: str = "Epoch Number",
    rolling_col: str = "Rolling Chi Square",
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plot rolling chi-square statistics for each genotype.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings..
    chisquare_results : dict
        Chi-square results dictionary.
    epoch_col : str
        Column name for epochs.
    rolling_col : str
        Column name for rolling chi-square.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    genotypes = chisquare_results.keys()
    n_genotypes = len(genotypes)
    n_cols = 1
    n_rows = n_genotypes

    fig, axes = plt.subplots(n_rows, n_cols, figsize=(12, 5 * n_rows), squeeze=False)
    axes = axes.flatten()

    for i, genotype in enumerate(genotypes):
        ax = axes[i]
        df_geno = chisquare_results[genotype]

        sns.barplot(
            data=df_geno,
            x=epoch_col,
            y=rolling_col,
            errorbar="se",
            palette="Blues",
            ax=ax,
        )
        ax.set_title(f"{genotype}: Rolling Chi-Square")
        ax.set_xlabel("Epochs")
        ax.set_ylabel("Rolling Stat")

    for j in range(len(genotypes), len(axes)):
        fig.delaxes(axes[j])

    fig.suptitle("Rolling Chi-Square by Genotype", fontsize=16)
    plt.tight_layout(rect=[0, 0, 1, 0.97])

    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "all_genotypes_average_chi_square_rolling.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

plot_cumulative_chi_square

plot_cumulative_chi_square(
    config: dict,
    chisquare_results: dict,
    epoch_col: str = "Epoch Number",
    cum_col: str = "Cumulative Chi Square",
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plot cumulative chi-square statistics for each genotype.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings..

  • chisquare_results (dict) –

    Chi-square results dictionary.

  • epoch_col (str, default: 'Epoch Number' ) –

    Column name for epochs.

  • cum_col (str, default: 'Cumulative Chi Square' ) –

    Column name for cumulative chi-square.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def plot_cumulative_chi_square(
    config: dict,
    chisquare_results: dict,
    epoch_col: str = "Epoch Number",
    cum_col: str = "Cumulative Chi Square",
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plot cumulative chi-square statistics for each genotype.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings..
    chisquare_results : dict
        Chi-square results dictionary.
    epoch_col : str
        Column name for epochs.
    cum_col : str
        Column name for cumulative chi-square.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    genotypes = chisquare_results.keys()
    n_genotypes = len(genotypes)
    n_cols = 1
    n_rows = n_genotypes

    fig, axes = plt.subplots(n_rows, n_cols, figsize=(12, 5 * n_rows), squeeze=False)
    axes = axes.flatten()

    for i, genotype in enumerate(genotypes):
        ax = axes[i]
        df_geno = chisquare_results[genotype]

        sns.barplot(
            data=df_geno,
            x=epoch_col,
            y=cum_col,
            errorbar="se",
            palette="magma",
            ax=ax,
        )
        ax.set_title(f"{genotype}: Cumulative Chi-Square")
        ax.set_xlabel("Epochs")
        ax.set_ylabel("Cumulative Stat")

    for j in range(len(genotypes), len(axes)):
        fig.delaxes(axes[j])

    fig.suptitle("Cumulative Chi-Square by Genotype", fontsize=16)
    plt.tight_layout(rect=[0, 0, 1, 0.97])

    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "all_genotypes_cumulative_chi_square.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

Explore-Exploit Agent

compass_labyrinth.behavior.behavior_metrics.simulation_modeling.explore_exploit_agent

EXPLORATION-EXPLOITATION AGENT MODELING AND ANALYSIS Author: Shreya Bangera

trim_to_common_epochs

trim_to_common_epochs(
    df_results: DataFrame,
) -> pd.DataFrame

Trims the results dataframe to retain only the maximum number of epochs common across all sessions.

Parameters:

  • df_results (DataFrame) –

    The output of evaluate_agent_performance. - 'Session' (str): Column name indicating sessions. - 'Epoch_Number' (str): Column name indicating epoch/bin number.

Returns:

  • DataFrame

    Trimmed dataframe with only common epochs.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def trim_to_common_epochs(df_results: pd.DataFrame) -> pd.DataFrame:
    """
    Trims the results dataframe to retain only the maximum number of epochs common across all sessions.

    Parameters
    -----------
    df_results : pd.DataFrame
        The output of evaluate_agent_performance.
            - 'Session' (str): Column name indicating sessions.
            - 'Epoch_Number' (str): Column name indicating epoch/bin number.

    Returns
    --------
    pd.DataFrame
        Trimmed dataframe with only common epochs.
    """
    df_trimmed = df_results.copy()

    # Ensure correct dtypes
    df_trimmed["Session"] = df_trimmed["Session"].astype(int)
    df_trimmed["Epoch Number"] = df_trimmed["Epoch Number"].astype(int)

    # Find common epochs across all sessions
    epoch_sets = df_trimmed.groupby("Session")["Epoch Number"].apply(set)
    common_epochs = set.intersection(*epoch_sets)

    if not common_epochs:
        print("Warning: No common epochs across sessions. Returning original dataframe.")
        return df_trimmed

    max_common_epoch = max(common_epochs)
    print(f" Max common epoch across all sessions: {max_common_epoch}")

    # Filter
    df_trimmed = df_trimmed[df_trimmed["Epoch Number"] <= max_common_epoch].reset_index(drop=True)
    return df_trimmed

track_valid_and_optimal_transitions_EE

track_valid_and_optimal_transitions_EE(
    df: DataFrame, decision_label: str, reward_label: str
) -> tuple[dict, dict]

Tracks valid and optimal transitions for an exploration-exploitation agent.

Parameters:

  • df (DataFrame) –

    DataFrame containing maze navigation data.

  • decision_label (str) –

    Label indicating decision nodes.

  • reward_label (str) –

    Label indicating reward paths.

Returns:

  • valid_transitions ( dict ) –

    Dictionary of valid transitions per session.

  • optimal_transitions ( dict ) –

    Dictionary of optimal transitions per session.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/explore_exploit_agent.py
def track_valid_and_optimal_transitions_EE(
    df: pd.DataFrame,
    decision_label: str,
    reward_label: str,
) -> tuple[dict, dict]:
    """
    Tracks valid and optimal transitions for an exploration-exploitation agent.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing maze navigation data.
    decision_label : str
        Label indicating decision nodes.
    reward_label : str
        Label indicating reward paths.

    Returns
    --------
    valid_transitions : dict
        Dictionary of valid transitions per session.
    optimal_transitions : dict
        Dictionary of optimal transitions per session.
    """
    valid_transitions, optimal_transitions = {}, {}

    for session, group in df.groupby("Session"):
        session_valid, session_optimal = {}, {}

        for i in range(len(group) - 1):
            if group.iloc[i]["NodeType"] == decision_label:
                current = group.iloc[i]["Grid Number"]
                next_grid = group.iloc[i + 1]["Grid Number"]
                next_region = group.iloc[i + 1]["Region"]

                session_valid.setdefault(current, set()).add(next_grid)
                if next_region == reward_label:
                    session_optimal.setdefault(current, set()).add(next_grid)

        valid_transitions[session] = session_valid
        optimal_transitions[session] = session_optimal

    return valid_transitions, optimal_transitions

simulate_exploration_agent_EE

simulate_exploration_agent_EE(
    segment: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    exploration_rate: float,
    n_simulations: int,
    decision_label: str,
) -> tuple[list, list]

Simulates the behavior of an exploration-exploitation agent.

Parameters:

  • segment (DataFrame) –

    DataFrame segment containing maze navigation data.

  • valid_dict (dict) –

    Dictionary of valid transitions.

  • optimal_dict (dict) –

    Dictionary of optimal transitions.

  • exploration_rate (float) –

    Probability of exploring a non-optimal path.

  • n_simulations (int) –

    Number of simulations to run per decision point.

  • decision_label (str) –

    Label indicating decision nodes.

Returns:

  • actual ( list ) –

    List of actual outcomes (1 for optimal, 0 for non-optimal).

  • simulated ( list ) –

    List of simulated outcomes (mean proportion of optimal choices).

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/explore_exploit_agent.py
def simulate_exploration_agent_EE(
    segment: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    exploration_rate: float,
    n_simulations: int,
    decision_label: str,
) -> tuple[list, list]:
    """
    Simulates the behavior of an exploration-exploitation agent.

    Parameters
    -----------
    segment : pd.DataFrame
        DataFrame segment containing maze navigation data.
    valid_dict : dict
        Dictionary of valid transitions.
    optimal_dict : dict
        Dictionary of optimal transitions.
    exploration_rate : float
        Probability of exploring a non-optimal path.
    n_simulations : int
        Number of simulations to run per decision point.
    decision_label : str
        Label indicating decision nodes.

    Returns
    --------
    actual : list
        List of actual outcomes (1 for optimal, 0 for non-optimal).
    simulated : list
        List of simulated outcomes (mean proportion of optimal choices).
    """
    actual, simulated = [], []

    for i in range(len(segment) - 1):
        if segment.iloc[i]["NodeType"] == decision_label:
            curr = segment.iloc[i]["Grid Number"]
            actual_next = segment.iloc[i + 1]["Grid Number"]

            actual.append(1 if actual_next in optimal_dict.get(curr, set()) else 0)
            trials = []
            for _ in range(n_simulations):
                if curr in valid_dict:
                    if random.random() > exploration_rate and curr in optimal_dict:
                        choice = random.choice(list(optimal_dict[curr]))
                    else:
                        choice = random.choice(list(valid_dict[curr]))

                    trials.append(1 if choice in optimal_dict.get(curr, set()) else 0)
            simulated.append(np.mean(trials))

    return actual, simulated

calculate_segment_metrics_EE

calculate_segment_metrics_EE(
    segment: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    exploration_rate: float,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str,
) -> pd.Series

Calculates performance metrics for a segment of maze navigation data.

Parameters:

  • segment (DataFrame) –

    DataFrame segment containing maze navigation data.

  • valid_dict (dict) –

    Dictionary of valid transitions.

  • optimal_dict (dict) –

    Dictionary of optimal transitions.

  • exploration_rate (float) –

    Probability of exploring a non-optimal path.

  • n_bootstrap (int) –

    Number of bootstrap samples for confidence intervals.

  • n_simulations (int) –

    Number of simulations to run per decision point.

  • decision_label (str) –

    Label indicating decision nodes.

Returns:

  • Series

    Series containing calculated metrics.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/explore_exploit_agent.py
def calculate_segment_metrics_EE(
    segment: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    exploration_rate: float,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str,
) -> pd.Series:
    """
    Calculates performance metrics for a segment of maze navigation data.

    Parameters
    -----------
    segment : pd.DataFrame
        DataFrame segment containing maze navigation data.
    valid_dict : dict
        Dictionary of valid transitions.
    optimal_dict : dict
        Dictionary of optimal transitions.
    exploration_rate : float
        Probability of exploring a non-optimal path.
    n_bootstrap : int
        Number of bootstrap samples for confidence intervals.
    n_simulations : int
        Number of simulations to run per decision point.
    decision_label : str
        Label indicating decision nodes.

    Returns
    --------
    pd.Series
        Series containing calculated metrics.
    """
    if segment.empty or decision_label not in segment["NodeType"].values:
        return pd.Series(
            {
                k: np.nan
                for k in [
                    "Actual Reward Path %",
                    "Agent Reward Path %",
                    "Actual Reward Path % CI Lower",
                    "Actual Reward Path % CI Upper",
                    "Agent Reward Path % CI Lower",
                    "Agent Reward Path % CI Upper",
                    "Relative Performance",
                ]
            }
        )

    actual, simulated = simulate_exploration_agent_EE(
        segment, valid_dict, optimal_dict, exploration_rate, n_simulations, decision_label
    )
    if not actual or not simulated:
        return pd.Series(
            {
                k: np.nan
                for k in [
                    "Actual Reward Path %",
                    "Agent Reward Path %",
                    "Actual Reward Path % CI Lower",
                    "Actual Reward Path % CI Upper",
                    "Agent Reward Path % CI Lower",
                    "Agent Reward Path % CI Upper",
                    "Relative Performance",
                ]
            }
        )

    def bootstrap(data):
        return np.random.choice(data, (n_bootstrap, len(data)), replace=True)

    actual_means = np.mean(bootstrap(actual), axis=1)
    agent_means = np.mean(bootstrap(simulated), axis=1)

    return pd.Series(
        {
            "Actual Reward Path %": np.mean(actual_means),
            "Agent Reward Path %": np.mean(agent_means),
            "Actual Reward Path % CI Lower": np.percentile(actual_means, 5),
            "Actual Reward Path % CI Upper": np.percentile(actual_means, 95),
            "Agent Reward Path % CI Lower": np.percentile(agent_means, 5),
            "Agent Reward Path % CI Upper": np.percentile(agent_means, 95),
            "Relative Performance": (
                np.mean(actual_means) / np.mean(agent_means) if np.mean(agent_means) > 0 else np.nan
            ),
        }
    )

split_sessions_into_segments_EE

split_sessions_into_segments_EE(
    df: DataFrame, segment_size: int
) -> list[tuple]
Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/explore_exploit_agent.py
def split_sessions_into_segments_EE(df: pd.DataFrame, segment_size: int) -> list[tuple]:
    segments = []
    for _, group in df.groupby(["Genotype", "Session"]):
        n_parts = len(group) // segment_size + 1
        for i in range(n_parts):
            part = group.iloc[i * segment_size : (i + 1) * segment_size]
            if not part.empty:
                segments.append((group["Session"].iloc[0], i + 1, part))
    return segments

run_exploration_agent_analysis_EE

run_exploration_agent_analysis_EE(
    df: DataFrame,
    exploration_rate: float,
    segment_size: int = 1000,
    n_bootstrap: int = 10000,
    n_simulations: int = 100,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    trim: bool = True,
) -> pd.DataFrame

Run exploration agent analysis on the given DataFrame.

Parameters:

  • df (DataFrame) –

    DataFrame containing maze navigation data.

  • exploration_rate (float) –

    Probability of exploring a non-optimal path.

  • segment_size (int, default: 1000 ) –

    Size of each segment for analysis (default is 1000).

  • n_bootstrap (int, default: 10000 ) –

    Number of bootstrap samples for confidence intervals (default is 10000).

  • n_simulations (int, default: 100 ) –

    Number of simulations to run per decision point (default is 100).

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label indicating decision nodes (default is "Decision (Reward)").

  • reward_label (str, default: 'reward_path' ) –

    Label indicating reward paths (default is "reward_path").

  • trim (bool, default: True ) –

    Whether to trim the DataFrame to common epochs (default is True).

Returns:

  • DataFrame

    DataFrame containing analysis results for each segment.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/explore_exploit_agent.py
def run_exploration_agent_analysis_EE(
    df: pd.DataFrame,
    exploration_rate: float,
    segment_size: int = 1000,
    n_bootstrap: int = 10000,
    n_simulations: int = 100,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    trim: bool = True,
) -> pd.DataFrame:
    """
    Run exploration agent analysis on the given DataFrame.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing maze navigation data.
    exploration_rate : float
        Probability of exploring a non-optimal path.
    segment_size : int, optional
        Size of each segment for analysis (default is 1000).
    n_bootstrap : int, optional
        Number of bootstrap samples for confidence intervals (default is 10000).
    n_simulations : int, optional
        Number of simulations to run per decision point (default is 100).
    decision_label : str, optional
        Label indicating decision nodes (default is "Decision (Reward)").
    reward_label : str, optional
        Label indicating reward paths (default is "reward_path").
    trim : bool, optional
        Whether to trim the DataFrame to common epochs (default is True).

    Returns
    --------
    pd.DataFrame
        DataFrame containing analysis results for each segment.
    """
    valid_dict, optimal_dict = track_valid_and_optimal_transitions_EE(df, decision_label, reward_label)
    segments = split_sessions_into_segments_EE(df, segment_size)
    results = []

    for session, seg_num, segment in segments:
        metrics = calculate_segment_metrics_EE(
            segment,
            valid_dict.get(session, {}),
            optimal_dict.get(session, {}),
            exploration_rate,
            n_bootstrap,
            n_simulations,
            decision_label,
        )
        metrics["Session"] = session
        metrics["Epoch Number"] = seg_num
        results.append(metrics)

    results = pd.DataFrame(results)
    if trim:
        results = trim_to_common_epochs(results)

    return results

plot_exploration_rate_performance_EE

plot_exploration_rate_performance_EE(
    config: dict,
    df_source: DataFrame,
    exploration_rates: list[float],
    segment_size: int = 1000,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    trim: bool = True,
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plots the performance of an exploration-exploitation agent across varying exploration rates.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings.

  • df_source (DataFrame) –

    DataFrame containing maze navigation data.

  • exploration_rates (list of float) –

    List of exploration rates to evaluate.

  • segment_size (int, default: 1000 ) –

    Size of each segment for analysis (default is 1000).

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label indicating decision nodes (default is "Decision (Reward)").

  • reward_label (str, default: 'reward_path' ) –

    Label indicating reward paths (default is "reward_path").

  • trim (bool, default: True ) –

    Whether to trim the DataFrame to common epochs (default is True).

  • save_fig (bool, default: True ) –

    Whether to save the figure (default is True).

  • show_fig (bool, default: True ) –

    Whether to display the figure (default is True).

  • return_fig (bool, default: False ) –

    Whether to return the figure object (default is False).

Returns:

  • None or Figure

    Returns the figure if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/explore_exploit_agent.py
def plot_exploration_rate_performance_EE(
    config: dict,
    df_source: pd.DataFrame,
    exploration_rates: list[float],
    segment_size: int = 1000,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    trim: bool = True,
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plots the performance of an exploration-exploitation agent across varying exploration rates.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings.
    df_source : pd.DataFrame
        DataFrame containing maze navigation data.
    exploration_rates : list of float
        List of exploration rates to evaluate.
    segment_size : int, optional
        Size of each segment for analysis (default is 1000).
    decision_label : str, optional
        Label indicating decision nodes (default is "Decision (Reward)").
    reward_label : str, optional
        Label indicating reward paths (default is "reward_path").
    trim : bool, optional
        Whether to trim the DataFrame to common epochs (default is True).
    save_fig : bool, optional
        Whether to save the figure (default is True).
    show_fig : bool, optional
        Whether to display the figure (default is True).
    return_fig : bool, optional
        Whether to return the figure object (default is False).

    Returns
    --------
    None or plt.Figure
        Returns the figure if return_fig is True, otherwise None.
    """
    fig = plt.figure(figsize=(15, 8))

    for er in exploration_rates:
        er_rounded = round(er, 1)
        print("Exploration rate = ", er_rounded, " being processed....")
        result_df = run_exploration_agent_analysis_EE(
            df_source,
            exploration_rate=er_rounded,
            segment_size=segment_size,
            decision_label=decision_label,
            reward_label=reward_label,
            trim=trim,
        )
        sns.lineplot(
            data=result_df,
            x="Epoch Number",
            y="Agent Reward Path %",
            linestyle="--",
            label=f"Agent (ER={er_rounded})",
        )

    # Plot actual performance
    last_df = run_exploration_agent_analysis_EE(
        df_source,
        exploration_rate=exploration_rates[-1],
        segment_size=segment_size,
        decision_label=decision_label,
        reward_label=reward_label,
        trim=trim,
    )
    sns.lineplot(
        data=last_df,
        x="Epoch Number",
        y="Actual Reward Path %",
        marker="o",
        label="Mouse",
        color="black",
    )

    plt.xlabel("Epochs (in maze)")
    plt.ylabel("Proportion of Reward Path Transitions")
    plt.title("Mouse vs. Exploration Agent Reward Path Transition Proportion")
    plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left")
    plt.grid(True)
    plt.tight_layout()

    # Save figure
    fig = plt.gcf()
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / "ee_agent.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

Multi-Agent Simulation

compass_labyrinth.behavior.behavior_metrics.simulation_modeling.multi_agent

MULTI-AGENT MODELING Author: Shreya Bangera Goal: ├── Simulated Agent, Binary Agent, 3/4 way Agent Modelling ├── Comparsion across Agents

trim_to_common_epochs

trim_to_common_epochs(
    df_results: DataFrame,
) -> pd.DataFrame

Trims the results dataframe to retain only the maximum number of epochs common across all sessions.

Parameters:

  • df_results (DataFrame) –

    The output of evaluate_agent_performance. - 'Session' (str): Column name indicating sessions. - 'Epoch_Number' (str): Column name indicating epoch/bin number.

Returns:

  • DataFrame

    Trimmed dataframe with only common epochs.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/simulated_agent.py
def trim_to_common_epochs(df_results: pd.DataFrame) -> pd.DataFrame:
    """
    Trims the results dataframe to retain only the maximum number of epochs common across all sessions.

    Parameters
    -----------
    df_results : pd.DataFrame
        The output of evaluate_agent_performance.
            - 'Session' (str): Column name indicating sessions.
            - 'Epoch_Number' (str): Column name indicating epoch/bin number.

    Returns
    --------
    pd.DataFrame
        Trimmed dataframe with only common epochs.
    """
    df_trimmed = df_results.copy()

    # Ensure correct dtypes
    df_trimmed["Session"] = df_trimmed["Session"].astype(int)
    df_trimmed["Epoch Number"] = df_trimmed["Epoch Number"].astype(int)

    # Find common epochs across all sessions
    epoch_sets = df_trimmed.groupby("Session")["Epoch Number"].apply(set)
    common_epochs = set.intersection(*epoch_sets)

    if not common_epochs:
        print("Warning: No common epochs across sessions. Returning original dataframe.")
        return df_trimmed

    max_common_epoch = max(common_epochs)
    print(f" Max common epoch across all sessions: {max_common_epoch}")

    # Filter
    df_trimmed = df_trimmed[df_trimmed["Epoch Number"] <= max_common_epoch].reset_index(drop=True)
    return df_trimmed

split_into_epochs_multi

split_into_epochs_multi(
    df: DataFrame, epoch_size: int
) -> list

Split the DataFrame into epochs of specified size for each session.

Parameters:

  • df (DataFrame) –

    DataFrame containing navigation data.

  • epoch_size (int) –

    Number of steps per epoch.

Returns:

  • list

    A list of tuples containing (session, epoch index, chunk DataFrame).

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def split_into_epochs_multi(df: pd.DataFrame, epoch_size: int) -> list:
    """
    Split the DataFrame into epochs of specified size for each session.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing navigation data.
    epoch_size : int
        Number of steps per epoch.

    Returns
    --------
    list
        A list of tuples containing (session, epoch index, chunk DataFrame).
    """
    epochs = []
    for session, sess_df in df.groupby("Session"):
        for i in range(0, len(sess_df), epoch_size):
            chunk = sess_df.iloc[i : i + epoch_size]
            if not chunk.empty:
                epochs.append((session, i // epoch_size + 1, chunk))
    return epochs

track_valid_transitions_multi

track_valid_transitions_multi(
    df: DataFrame, decision_label: str, reward_label: str
) -> tuple[dict, dict]

Track valid and optimal transitions for each session.

Parameters:

  • df (DataFrame) –

    DataFrame containing navigation data.

  • decision_label (str) –

    Label for decision nodes.

  • reward_label (str) –

    Label for reward path regions.

Returns:

  • tuple[dict, dict]

    A tuple containing two dictionaries: - session_valid: Maps session to valid transitions. - session_optimal: Maps session to optimal transitions.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def track_valid_transitions_multi(
    df: pd.DataFrame,
    decision_label: str,
    reward_label: str,
) -> tuple[dict, dict]:
    """
    Track valid and optimal transitions for each session.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing navigation data.
    decision_label : str
        Label for decision nodes.
    reward_label : str
        Label for reward path regions.

    Returns
    --------
    tuple[dict, dict]
        A tuple containing two dictionaries:
        - session_valid: Maps session to valid transitions.
        - session_optimal: Maps session to optimal transitions.
    """
    session_valid = {}
    session_optimal = {}

    for session, group in df.groupby("Session"):
        valid_dict = {}
        optimal_dict = {}

        for i in range(len(group) - 1):
            if group.iloc[i]["NodeType"] == decision_label:
                curr_grid = group.iloc[i]["Grid Number"]
                next_grid = group.iloc[i + 1]["Grid Number"]
                next_region = group.iloc[i + 1]["Region"]

                valid_dict.setdefault(curr_grid, set()).add(next_grid)

                if next_region == reward_label:
                    optimal_dict.setdefault(curr_grid, set()).add(next_grid)

        session_valid[session] = valid_dict
        session_optimal[session] = optimal_dict

    return session_valid, session_optimal

simulate_random_agent_multi

simulate_random_agent_multi(
    chunk: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    decision_label: str,
    n_simulations: int,
) -> tuple[list, list]

Simulate a random agent's performance over the given chunk of data.

Parameters:

  • chunk (DataFrame) –

    DataFrame chunk representing an epoch of navigation data.

  • valid_dict (dict) –

    Dictionary mapping current grid numbers to valid next grid numbers.

  • optimal_dict (dict) –

    Dictionary mapping current grid numbers to optimal next grid numbers.

  • decision_label (str) –

    Label for decision nodes.

  • n_simulations (int) –

    Number of simulations to run for estimating performance.

Returns:

  • tuple[list, list]

    A tuple containing two lists: - actual: List of actual performance (1 for optimal transition, 0 otherwise). - random_perf: List of average performance from random agent simulations.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def simulate_random_agent_multi(
    chunk: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    decision_label: str,
    n_simulations: int,
) -> tuple[list, list]:
    """
    Simulate a random agent's performance over the given chunk of data.

    Parameters
    -----------
    chunk : pd.DataFrame
        DataFrame chunk representing an epoch of navigation data.
    valid_dict : dict
        Dictionary mapping current grid numbers to valid next grid numbers.
    optimal_dict : dict
        Dictionary mapping current grid numbers to optimal next grid numbers.
    decision_label : str
        Label for decision nodes.
    n_simulations : int
        Number of simulations to run for estimating performance.

    Returns
    --------
    tuple[list, list]
        A tuple containing two lists:
        - actual: List of actual performance (1 for optimal transition, 0 otherwise).
        - random_perf: List of average performance from random agent simulations.
    """
    actual, random_perf = [], []

    for i in range(len(chunk) - 1):
        if chunk.iloc[i]["NodeType"] == decision_label:
            curr = chunk.iloc[i]["Grid Number"]
            next_actual = chunk.iloc[i + 1]["Grid Number"]

            is_opt = next_actual in optimal_dict.get(curr, set())
            actual.append(1 if is_opt else 0)

            sim_choices = [
                1 if random.choice(list(valid_dict[curr])) in optimal_dict.get(curr, set()) else 0
                for _ in range(n_simulations)
                if curr in valid_dict
            ]
            if sim_choices:
                random_perf.append(np.mean(sim_choices))

    return actual, random_perf

simulate_binary_agent_multi

simulate_binary_agent_multi(
    chunk: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    decision_label: str,
    n_simulations: int,
) -> list

Simulate a binary agent's performance over the given chunk of data.

Parameters:

  • chunk (DataFrame) –

    DataFrame chunk representing an epoch of navigation data.

  • valid_dict (dict) –

    Dictionary mapping current grid numbers to valid next grid numbers.

  • optimal_dict (dict) –

    Dictionary mapping current grid numbers to optimal next grid numbers.

  • decision_label (str) –

    Label for decision nodes.

  • n_simulations (int) –

    Number of simulations to run for estimating performance.

Returns:

  • list

    A list containing the average performance of the binary agent simulations.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def simulate_binary_agent_multi(
    chunk: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    decision_label: str,
    n_simulations: int,
) -> list:
    """
    Simulate a binary agent's performance over the given chunk of data.

    Parameters
    -----------
    chunk : pd.DataFrame
        DataFrame chunk representing an epoch of navigation data.
    valid_dict : dict
        Dictionary mapping current grid numbers to valid next grid numbers.
    optimal_dict : dict
        Dictionary mapping current grid numbers to optimal next grid numbers.
    decision_label : str
        Label for decision nodes.
    n_simulations : int
        Number of simulations to run for estimating performance.

    Returns
    --------
    list
        A list containing the average performance of the binary agent simulations.
    """
    binary_perf = []

    for i in range(len(chunk) - 1):
        if chunk.iloc[i]["NodeType"] == decision_label:
            curr = chunk.iloc[i]["Grid Number"]
            choices = list(valid_dict.get(curr, []))

            opt = [x for x in choices if x in optimal_dict.get(curr, set())]
            non_opt = [x for x in choices if x not in opt]

            if opt and non_opt:
                sim_choices = [opt[0], non_opt[0]]
            elif len(choices) >= 2:
                sim_choices = random.sample(choices, 2)
            else:
                continue

            binary_opt = [1 if random.choice(sim_choices) in opt else 0 for _ in range(n_simulations)]
            binary_perf.append(np.mean(binary_opt))

    return binary_perf

simulate_multiway_agent_multi

simulate_multiway_agent_multi(
    chunk: DataFrame,
    decision_label: str,
    three_nodes: list,
    four_nodes: list,
    n_simulations: int,
) -> list

Simulate a multiway agent's performance over the given chunk of data.

Parameters:

  • chunk (DataFrame) –

    DataFrame chunk representing an epoch of navigation data.

  • decision_label (str) –

    Label for decision nodes.

  • three_nodes (list) –

    List of grid numbers for three-way decision nodes.

  • four_nodes (list) –

    List of grid numbers for four-way decision nodes.

  • n_simulations (int) –

    Number of simulations to run for estimating performance.

Returns:

  • list

    A list containing the average performance of the multiway agent simulations.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def simulate_multiway_agent_multi(
    chunk: pd.DataFrame,
    decision_label: str,
    three_nodes: list,
    four_nodes: list,
    n_simulations: int,
) -> list:
    """
    Simulate a multiway agent's performance over the given chunk of data.

    Parameters
    -----------
    chunk : pd.DataFrame
        DataFrame chunk representing an epoch of navigation data.
    decision_label : str
        Label for decision nodes.
    three_nodes : list
        List of grid numbers for three-way decision nodes.
    four_nodes : list
        List of grid numbers for four-way decision nodes.
    n_simulations : int
        Number of simulations to run for estimating performance.

    Returns
    --------
    list
        A list containing the average performance of the multiway agent simulations.
    """
    perf = []
    for i in range(len(chunk) - 1):
        if chunk.iloc[i]["NodeType"] == decision_label:
            curr = chunk.iloc[i]["Grid Number"]
            prob = None
            if curr in three_nodes:
                prob = 1 / 3
            elif curr in four_nodes:
                prob = 1 / 4
            if prob:
                perf.append(np.mean([1 if random.random() < prob else 0 for _ in range(n_simulations)]))

    return perf

bootstrap_means_multi

bootstrap_means_multi(data, n)
Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def bootstrap_means_multi(data, n):
    return np.mean(np.random.choice(data, (n, len(data)), replace=True), axis=1)

evaluate_epoch_multi

evaluate_epoch_multi(
    chunk: DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    decision_label: str,
    three_nodes: list,
    four_nodes: list,
    n_bootstrap: int,
    n_simulations: int,
) -> pd.Series

Evaluate performance metrics for all agent types over a given epoch chunk.

Parameters:

  • chunk (DataFrame) –

    DataFrame chunk representing an epoch of navigation data.

  • valid_dict (dict) –

    Dictionary mapping current grid numbers to valid next grid numbers.

  • optimal_dict (dict) –

    Dictionary mapping current grid numbers to optimal next grid numbers.

  • decision_label (str) –

    Label for decision nodes.

  • three_nodes (list) –

    List of grid numbers for three-way decision nodes.

  • four_nodes (list) –

    List of grid numbers for four-way decision nodes.

  • n_bootstrap (int) –

    Number of bootstrap samples for confidence intervals.

  • n_simulations (int) –

    Number of simulations for agent performance.

Returns:

  • Series

    Series containing performance metrics for the epoch.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def evaluate_epoch_multi(
    chunk: pd.DataFrame,
    valid_dict: dict,
    optimal_dict: dict,
    decision_label: str,
    three_nodes: list,
    four_nodes: list,
    n_bootstrap: int,
    n_simulations: int,
) -> pd.Series:
    """
    Evaluate performance metrics for all agent types over a given epoch chunk.

    Parameters
    -----------
    chunk : pd.DataFrame
        DataFrame chunk representing an epoch of navigation data.
    valid_dict : dict
        Dictionary mapping current grid numbers to valid next grid numbers.
    optimal_dict : dict
        Dictionary mapping current grid numbers to optimal next grid numbers.
    decision_label : str
        Label for decision nodes.
    three_nodes : list
        List of grid numbers for three-way decision nodes.
    four_nodes : list
        List of grid numbers for four-way decision nodes.
    n_bootstrap : int
        Number of bootstrap samples for confidence intervals.
    n_simulations : int
        Number of simulations for agent performance.

    Returns
    --------
    pd.Series
        Series containing performance metrics for the epoch.
    """
    if chunk.empty or decision_label not in chunk["NodeType"].values:
        return pd.Series(dtype="float64")  # Empty metrics

    actual, random_perf = simulate_random_agent_multi(
        chunk=chunk,
        valid_dict=valid_dict,
        optimal_dict=optimal_dict,
        decision_label=decision_label,
        n_simulations=n_simulations,
    )
    binary_perf = simulate_binary_agent_multi(
        chunk=chunk,
        valid_dict=valid_dict,
        optimal_dict=optimal_dict,
        decision_label=decision_label,
        n_simulations=n_simulations,
    )
    multiway_perf = simulate_multiway_agent_multi(
        chunk=chunk,
        decision_label=decision_label,
        three_nodes=three_nodes,
        four_nodes=four_nodes,
        n_simulations=n_simulations,
    )

    if not (actual and random_perf and binary_perf and multiway_perf):
        return pd.Series(dtype="float64")

    actual_boot = bootstrap_means_multi(actual, n_bootstrap)
    random_boot = bootstrap_means_multi(random_perf, n_bootstrap)
    binary_boot = bootstrap_means_multi(binary_perf, n_bootstrap)
    multi_boot = bootstrap_means_multi(multiway_perf, n_bootstrap)

    return pd.Series(
        {
            "Actual Reward Path %": actual_boot.mean(),
            "Random Agent Reward Path %": random_boot.mean(),
            "Binary Agent Reward Path %": binary_boot.mean(),
            "Three/Four Way Agent Reward Path %": multi_boot.mean(),
            "Actual Reward Path % CI Lower": np.percentile(actual_boot, 5),
            "Actual Reward Path % CI Upper": np.percentile(actual_boot, 95),
            "Random Agent Reward Path % CI Lower": np.percentile(random_boot, 5),
            "Random Agent Reward Path % CI Upper": np.percentile(random_boot, 95),
            "Binary Agent Reward Path % CI Lower": np.percentile(binary_boot, 5),
            "Binary Agent Reward Path % CI Upper": np.percentile(binary_boot, 95),
            "Three/Four Way Agent Reward Path % CI Lower": np.percentile(multi_boot, 5),
            "Three/Four Way Agent Reward Path % CI Upper": np.percentile(multi_boot, 95),
            "Relative Performance (Actual/Random)": (
                actual_boot.mean() / random_boot.mean() if random_boot.mean() > 0 else np.nan
            ),
            "Relative Performance (Actual/Binary)": (
                actual_boot.mean() / binary_boot.mean() if binary_boot.mean() > 0 else np.nan
            ),
        }
    )

evaluate_agent_performance_multi

evaluate_agent_performance_multi(
    df: DataFrame,
    epoch_size: int,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    trim: bool = True,
    three_nodes: list | None = None,
    four_nodes: list | None = None,
) -> pd.DataFrame

Evaluate the performance of different agent types over multiple epochs.

Parameters:

  • df (DataFrame) –

    DataFrame containing navigation data.

  • epoch_size (int) –

    Number of steps per epoch.

  • n_bootstrap (int) –

    Number of bootstrap samples for confidence intervals.

  • n_simulations (int) –

    Number of simulations for agent performance.

  • decision_label (str, default: 'Decision (Reward)' ) –

    Label for decision nodes.

  • reward_label (str, default: 'reward_path' ) –

    Label for reward path regions.

  • trim (bool, default: True ) –

    Whether to trim results to common epochs across sessions.

  • three_nodes (list, default: None ) –

    List of grid numbers for three-way decision nodes.

  • four_nodes (list, default: None ) –

    List of grid numbers for four-way decision nodes.

Returns:

  • DataFrame

    DataFrame with performance metrics for each epoch.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def evaluate_agent_performance_multi(
    df: pd.DataFrame,
    epoch_size: int,
    n_bootstrap: int,
    n_simulations: int,
    decision_label: str = "Decision (Reward)",
    reward_label: str = "reward_path",
    trim: bool = True,
    three_nodes: list | None = None,
    four_nodes: list | None = None,
) -> pd.DataFrame:
    """
    Evaluate the performance of different agent types over multiple epochs.

    Parameters
    -----------
    df : pd.DataFrame
        DataFrame containing navigation data.
    epoch_size : int
        Number of steps per epoch.
    n_bootstrap : int
        Number of bootstrap samples for confidence intervals.
    n_simulations : int
        Number of simulations for agent performance.
    decision_label : str
        Label for decision nodes.
    reward_label : str
        Label for reward path regions.
    trim : bool
        Whether to trim results to common epochs across sessions.
    three_nodes : list, optional
        List of grid numbers for three-way decision nodes.
    four_nodes : list, optional
        List of grid numbers for four-way decision nodes.

    Returns
    --------
    pd.DataFrame
        DataFrame with performance metrics for each epoch.
    """
    if three_nodes is None:
        three_nodes = [20, 17, 39, 51, 63, 60, 77, 89, 115, 114, 110, 109, 98]
    if four_nodes is None:
        four_nodes = [32, 14]

    valid_dict_all, optimal_dict_all = track_valid_transitions_multi(
        df,
        decision_label,
        reward_label,
    )
    epochs = split_into_epochs_multi(df, epoch_size)

    all_results = []
    for session, idx, chunk in epochs:
        valid_dict = valid_dict_all.get(session, {})
        optimal_dict = optimal_dict_all.get(session, {})
        metrics = evaluate_epoch_multi(
            chunk=chunk,
            valid_dict=valid_dict,
            optimal_dict=optimal_dict,
            decision_label=decision_label,
            three_nodes=three_nodes,
            four_nodes=four_nodes,
            n_bootstrap=n_bootstrap,
            n_simulations=n_simulations,
        )
        metrics["Session"] = int(session)
        metrics["Epoch Number"] = int(idx)
        all_results.append(metrics)

    results = pd.DataFrame(all_results)
    if trim:
        results = trim_to_common_epochs(results)

    return results

plot_agent_vs_mouse_performance_multi

plot_agent_vs_mouse_performance_multi(
    config: dict,
    df_metrics: DataFrame,
    cohort_metadata: DataFrame,
    genotype: str,
    figsize: tuple = (12, 6),
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plot actual vs. simulated agent reward path performance across epochs for a specified genotype.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings.

  • df_metrics (DataFrame) –

    Output from evaluate_agent_performance_multi().

  • cohort_metadata (DataFrame) –

    Metadata mapping sessions to genotypes.

  • genotype (str) –

    Genotype to filter (e.g., 'WT-WT').

  • figsize (tuple, default: (12, 6) ) –

    Size of the plot.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def plot_agent_vs_mouse_performance_multi(
    config: dict,
    df_metrics: pd.DataFrame,
    cohort_metadata: pd.DataFrame,
    genotype: str,
    figsize: tuple = (12, 6),
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plot actual vs. simulated agent reward path performance across epochs for a specified genotype.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings.
    df_metrics : pd.DataFrame
        Output from evaluate_agent_performance_multi().
    cohort_metadata : pd.DataFrame
        Metadata mapping sessions to genotypes.
    genotype : str
        Genotype to filter (e.g., 'WT-WT').
    figsize : tuple
        Size of the plot.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    # --- Constants ---
    x_col = "Epoch Number"
    y_col_actual = "Actual Reward Path %"
    y_col_random = "Random Agent Reward Path %"
    y_col_binary = "Binary Agent Reward Path %"
    y_col_multi = "Three/Four Way Agent Reward Path %"
    title = "Mouse vs. Agent Reward Path Transition Proportion"

    # --- Filter sessions by genotype ---
    sessions_reqd = cohort_metadata.loc[cohort_metadata.Genotype == genotype, "Session #"].unique()
    df_filtered = df_metrics[df_metrics["Session"].isin(sessions_reqd)].copy()

    # --- Plot ---
    fig = plt.figure(figsize=figsize)

    sns.lineplot(
        data=df_filtered,
        x=x_col,
        y=y_col_actual,
        marker="o",
        label="Mouse",
        color="black",
    )
    sns.lineplot(
        data=df_filtered,
        x=x_col,
        y=y_col_random,
        linestyle="dashed",
        label="Random Agent",
        color="navy",
    )
    sns.lineplot(
        data=df_filtered,
        x=x_col,
        y=y_col_binary,
        linestyle="dashed",
        label="Binary Agent",
        color="green",
    )
    sns.lineplot(
        data=df_filtered,
        x=x_col,
        y=y_col_multi,
        linestyle="dashed",
        label="Three/Four Way Agent",
        color="maroon",
    )

    plt.xlabel("Epochs (in maze)", fontsize=12)
    plt.ylabel("Proportion of Reward Path Transitions", fontsize=12)
    plt.title(title, fontsize=14, fontweight="bold")
    plt.grid(True)
    plt.legend(
        loc="center left",
        bbox_to_anchor=(1.02, 0.5),
        frameon=False,
        title="Agent",
    )
    plt.tight_layout()

    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / f"{genotype}_multiple_agent.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig

plot_cumulative_agent_comparison_boxplot_multi

plot_cumulative_agent_comparison_boxplot_multi(
    config: dict,
    df_metrics: DataFrame,
    cohort_metadata: DataFrame,
    genotype: str,
    figsize: tuple = (10, 6),
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure

Plots a boxplot comparing the cumulative reward path transition percentage across all sessions for the specified genotype for mouse and simulated agents.

Parameters:

  • config (dict) –

    Configuration dictionary containing project settings.

  • df_metrics (DataFrame) –

    Output from evaluate_agent_performance_multi().

  • cohort_metadata (DataFrame) –

    Metadata mapping sessions to genotypes.

  • genotype (str) –

    Genotype to filter (e.g., 'WT-WT').

  • figsize (tuple, default: (10, 6) ) –

    Size of the plot.

  • save_fig (bool, default: True ) –

    Whether to save the figure.

  • show_fig (bool, default: True ) –

    Whether to display the figure.

  • return_fig (bool, default: False ) –

    Whether to return the figure object.

Returns:

  • Figure or None

    The figure object if return_fig is True, otherwise None.

Source code in src/compass_labyrinth/behavior/behavior_metrics/simulation_modeling/multi_agent.py
def plot_cumulative_agent_comparison_boxplot_multi(
    config: dict,
    df_metrics: pd.DataFrame,
    cohort_metadata: pd.DataFrame,
    genotype: str,
    figsize: tuple = (10, 6),
    save_fig: bool = True,
    show_fig: bool = True,
    return_fig: bool = False,
) -> None | plt.Figure:
    """
    Plots a boxplot comparing the cumulative reward path transition percentage
    across all sessions for the specified genotype for mouse and simulated agents.

    Parameters
    -----------
    config : dict
        Configuration dictionary containing project settings.
    df_metrics : pd.DataFrame
        Output from evaluate_agent_performance_multi().
    cohort_metadata : pd.DataFrame
        Metadata mapping sessions to genotypes.
    genotype : str
        Genotype to filter (e.g., 'WT-WT').
    figsize : tuple
        Size of the plot.
    save_fig : bool
        Whether to save the figure.
    show_fig : bool
        Whether to display the figure.
    return_fig : bool
        Whether to return the figure object.

    Returns
    --------
    plt.Figure or None
        The figure object if return_fig is True, otherwise None.
    """
    # --- Constants ---
    metric_cols = {
        "Mouse": "Actual Reward Path %",
        "Random Agent": "Random Agent Reward Path %",
        "Binary Agent": "Binary Agent Reward Path %",
        "3/4-Way Agent": "Three/Four Way Agent Reward Path %",
    }

    # --- Filter sessions for the genotype ---
    sessions_reqd = cohort_metadata.loc[cohort_metadata.Genotype == genotype, "Session #"].unique()
    df_filtered = df_metrics[df_metrics["Session"].isin(sessions_reqd)].copy()

    # --- Aggregate to session level (mean across epochs) ---
    df_agg = df_filtered.groupby("Session")[[*metric_cols.values()]].mean().reset_index()

    # --- Melt for plotting ---
    df_melt = df_agg.melt(id_vars="Session", var_name="Agent", value_name="Reward Path %")
    df_melt["Agent"] = df_melt["Agent"].map({v: k for k, v in metric_cols.items()})

    # --- Plot ---
    fig = plt.figure(figsize=figsize)
    sns.boxplot(
        data=df_melt,
        x="Agent",
        y="Reward Path %",
        palette="Set2",
    )
    sns.stripplot(
        data=df_melt,
        x="Agent",
        y="Reward Path %",
        color="black",
        size=4,
        jitter=True,
        alpha=0.6,
    )

    plt.title(
        f"Cumulative Reward Path Transition % across Sessions\nGenotype: {genotype}", fontsize=14, fontweight="bold"
    )
    plt.ylabel("Mean Reward Path Transition %")
    plt.xlabel("")
    plt.ylim(0, 1)
    plt.grid(axis="y", linestyle="--", alpha=0.7)
    plt.tight_layout()

    # Save figure
    if save_fig:
        save_path = Path(config["project_path_full"]) / "figures" / f"{genotype}_cumulative_multiple_agent.pdf"
        plt.savefig(save_path, bbox_inches="tight", dpi=300)
        print(f"Figure saved at: {save_path}")

    # Show figure
    if show_fig:
        plt.show()

    # Return figure
    if return_fig:
        return fig