This package is plugin of MatchCake. The main goal of this package is to implement optimization pipelines and torch-modules that are used in MatchCake's projects.
If you’d like to request a new feature, dataset, or module, or if you have any questions or documentation requests, feel free to open a new GitHub issue here.
With python and pip installed,
pip install git+https://github.com/MatchCake/MatchCake-OptWith uv installed,
uv add "git+https://github.com/MatchCake/MatchCake-Opt"To install the package with cu128 (CUDA), add --extra cu128 to the installation commands above.
With python and pip installed, run the following commands to install the dependencies:
python -m venv .venv
source .venv/bin/activate
pip install uv
uv sync --devWith uv installed, run the following commands to install the dependencies:
uv venv .venv
uv sync --devIf you'd like to contribute to this repository, please do so by submitting pull requests to the dev branch. Thank you!
from typing import Optional
import numpy as np
import torch
from ax import ChoiceParameterConfig, RangeParameterConfig
from matchcake_opt.datasets import *
from matchcake_opt.modules.classification_model import ClassificationModel
from matchcake_opt.tr_pipeline.automl_pipeline import AutoMLPipeline
class LinearNN(ClassificationModel):
MODEL_NAME = "LinearNN"
HP_CONFIGS = [
RangeParameterConfig(
name="learning_rate",
parameter_type="float",
bounds=(1e-5, 0.1),
),
RangeParameterConfig(
name="n_neurons",
parameter_type="int",
bounds=(4, 2048),
),
]
def __init__(
self,
input_shape: Optional[tuple[int, ...]],
output_shape: Optional[tuple[int, ...]],
learning_rate: float = 2e-4,
n_neurons: int = 128,
**kwargs,
):
super().__init__(input_shape=input_shape, output_shape=output_shape, learning_rate=learning_rate, **kwargs)
self.save_hyperparameters("learning_rate", "n_neurons")
self.nn = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.LazyLinear(n_neurons),
torch.nn.ReLU(),
torch.nn.LazyLinear(self.output_size),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.nn(x)
@property
def output_size(self):
return int(np.prod(self.output_shape))
datamodule = DataModule.from_dataset_name("Digits2D", fold_id=0)
automl_pipeline = AutoMLPipeline(model_cls=LinearNN, datamodule=datamodule)
automl_pipeline.run()
lt_pipeline, metrics = automl_pipeline.run_best_pipeline()
print("⚡" * 20, "\nValidation Metrics:\n", metrics, "\n", "⚡" * 20)
test_metrics = lt_pipeline.run_test()
print("⚡" * 20, "\nTest Metrics:\n", test_metrics, "\n", "⚡" * 20)For more detailed examples see
- notebooks/ligthning_pipeline_tutorial.ipynb for an introduction to the lightning pipeline.
- notebooks/automl_pipeline_tutorial.ipynb for an introduction to the AutoML pipeline.
@INPROCEEDINGS{10821385,
author={Gince, Jérémie and Pagé, Jean-Michel and Armenta, Marco and Sarkar, Ayana and Kourtis, Stefanos},
booktitle={2024 IEEE International Conference on Quantum Computing and Engineering (QCE)},
title={Fermionic Machine Learning},
year={2024},
volume={01},
number={},
pages={1672-1678},
keywords={Runtime;Quantum entanglement;Computational modeling;Benchmark testing;Rendering (computer graphics);Hardware;Kernel;Integrated circuit modeling;Quantum circuit;Standards;Quantum machine learning;quantum kernel methods;matchgate circuits;fermionic quantum computation;data classification},
doi={10.1109/QCE60285.2024.00195}
}