MACESettings#

pydantic model autoplex.settings.MACESettings[source]#

Model describing the hyperparameters for the MACE fits.

field model: Literal['BOTNet', 'MACE', 'ScaleShiftMACE', 'ScaleShiftBOTNet', 'AtomicDipolesMACE', 'EnergyDipolesMACE'] = 'MACE'#

type of the model

field name: str = 'MACE_model'#

Experiment name

field amsgrad: bool = True#

Use amsgrad variant of optimizer

field batch_size: int = 10#

Batch size

field compute_avg_num_neighbors: bool | Literal['yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'] = True#

Compute average number of neighbors

field compute_forces: bool | Literal['yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'] = True#

Compute forces

field config_type_weights: str = "{'Default':1.0}"#

String of dictionary containing the weights for each config type

field compute_stress: bool | Literal['yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'] = False#

Compute stress

field compute_statistics: bool = False#

Compute statistics

field correlation: int = 3#

Correlation order at each layer

field default_dtype: Literal['float32', 'float64'] = 'float32'#

Default data type

field device: Literal['cpu', 'cuda', 'mps', 'xpu'] = 'cpu'#

Device to be used for model fitting

field distributed: bool = False#

Train in multi-GPU data parallel mode

field energy_weight: float = 1.0#

Weight for the energy loss

field ema: bool = True#

Whether to use EMA

field ema_decay: float = 0.99#

Exponential moving average decay

field E0s: str | None = None#

Dictionary of isolated atom energies

field forces_weight: float = 100.0#

Weight for the forces loss

field foundation_filter_elements: bool | Literal['yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'] = True#

Filter element during fine-tuning

field foundation_model: str | None = None#

Path to the foundation model for finetuning

field foundation_model_readout: bool = True#

Use readout of foundation model for finetuning

field keep_checkpoint: bool = False#

Keep all checkpoints

field keep_isolated_atoms: bool | Literal['yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'] = False#

Keep isolated atoms in the dataset, useful for finetuning

field hidden_irreps: str = '128x0e + 128x1o'#

Hidden irreps

field loss: Literal['ef', 'weighted', 'forces_only', 'virials', 'stress', 'dipole', 'huber', 'universal', 'energy_forces_dipole'] = 'huber'#

Loss function

field lr: float = 0.001#

Learning rate

field multiheads_finetuning: bool | Literal['yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'] = False#

Multiheads finetuning

field max_num_epochs: int = 1500#

Maximum number of epochs

field pair_repulsion: bool = False#

Use pair repulsion term with ZBL potential

field patience: int = 2048#

Maximum number of consecutive epochs of increasing loss

field r_max: float = 5.0#

Radial cutoff distance

field restart_latest: bool = False#

Whether to restart the latest model

field seed: int = 123#

Seed for the random number generator

field save_cpu: bool = True#

Save CPU

field save_all_checkpoints: bool = False#

Save all checkpoints

field scaling: Literal['std_scaling', 'rms_forces_scaling', 'no_scaling'] = 'rms_forces_scaling'#

Scaling

field stress_weight: float = 1.0#

Weight for the stress loss

field start_swa: int = 1200 (alias 'start_stage_two')#

Start of the SWA

field swa: bool = True (alias 'stage_two')#

Use Stage Two loss weight, it will decrease the learning rate and increases the energy weight at the end of the training

field valid_batch_size: int = 10#

Validation batch size

field virials_weight: float = 1.0#

Weight for the virials loss

field wandb: bool = False#

Use Weights and Biases for logging