Evaluate a trained slot attention type model.
EvaluationConfig
dataclass
Configuration for evaluation.
Source code in ocl/cli/eval.py
| @dataclasses.dataclass
class EvaluationConfig:
"""Configuration for evaluation."""
# Path to training configuration file or configuration dir. If dir, train_config_name
# needs to be set as well.
train_config_path: str
train_config_overrides: Optional[List[str]] = None
train_config_name: Optional[str] = None
checkpoint_path: Optional[str] = None
output_dir: Optional[str] = None
report_filename: str = "metrics.json"
# Setting this allows to add modules to the model that are executed during evaluation
modules: Optional[Dict[str, Any]] = None
# Setting this allows to evaluate on a different dataset than the model was trained on
dataset: Optional[Any] = None
# Setting this allows to evaluate on different metrics than the model was trained on
evaluation_metrics: Optional[Dict[str, Any]] = None
save_outputs: bool = False
skip_metrics: bool = False
outputs_dirname: str = "outputs"
outputs_to_store: Optional[List[str]] = None
n_samples_to_store: Optional[int] = None
eval_train: bool = False
eval_val: bool = True
eval_test: bool = False
eval_batch_size: Optional[int] = None
|