Skip to content

configs/experiment/slot_attention/clevr10.yaml

# @package _global_
defaults:
  - /experiment/slot_attention/_base  # (1)!
  - /dataset: clevr  # (2)!
  - /experiment/slot_attention/_preprocessing_clevr # (3)!
  - /experiment/slot_attention/_metrics_clevr # (4)!
  - _self_

# The following parameters assume training on 8 GPUs, leading to an effective batch size of 64.
trainer:
  devices: 8
  max_steps: 500000
  max_epochs:
dataset:
  num_workers: 4
  batch_size: 8

models:
  conditioning:
    n_slots: 11
  1. /experiment/slot_attention/_base
  2. /dataset/clevr
  3. /experiment/slot_attention/_preprocessing_clevr
  4. /experiment/slot_attention/_metrics_clevr