use trainer but has bugs

This commit is contained in:
mhz
2024-09-19 14:11:19 +02:00
parent d36e1d1077
commit be178bc5ee
6 changed files with 750 additions and 580 deletions

View File

@@ -177,32 +177,92 @@ def test(cfg: DictConfig):
os.chdir(cfg.general.resume.split("checkpoints")[0])
# os.environ["CUDA_VISIBLE_DEVICES"] = cfg.general.gpu_number
model = Graph_DiT(cfg=cfg, **model_kwargs)
trainer = Trainer(
gradient_clip_val=cfg.train.clip_grad,
# accelerator="cpu",
accelerator="gpu"
if torch.cuda.is_available() and cfg.general.gpus > 0
else "cpu",
devices=[cfg.general.gpu_number]
if torch.cuda.is_available() and cfg.general.gpus > 0
else None,
max_epochs=cfg.train.n_epochs,
enable_checkpointing=False,
check_val_every_n_epoch=cfg.train.check_val_every_n_epoch,
val_check_interval=cfg.train.val_check_interval,
strategy="ddp" if cfg.general.gpus > 1 else "auto",
enable_progress_bar=cfg.general.enable_progress_bar,
callbacks=[],
reload_dataloaders_every_n_epochs=0,
logger=[],
)
if not cfg.general.test_only:
print("start testing fit method")
trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume)
if cfg.general.save_model:
trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt")
trainer.test(model, datamodule=datamodule)
if cfg.general.type == "accelerator":
graph_dit_model = model
from accelerate import Accelerator
from accelerate.utils import set_seed, ProjectConfiguration
accelerator_config = ProjectConfiguration(
project_dir=os.path.join(cfg.general.log_dir, cfg.general.name),
automatic_checkpoint_naming=True,
total_limit=cfg.general.number_checkpoint_limit,
)
accelerator = Accelerator(
mixed_precision='no',
project_config=accelerator_config,
# gradient_accumulation_steps=cfg.train.gradient_accumulation_steps * cfg.train.n_epochs,
gradient_accumulation_steps=cfg.train.gradient_accumulation_steps,
)
optimizer = graph_dit_model.configure_optimizers()
train_dataloader = datamodule.train_dataloader()
train_dataloader = accelerator.prepare(train_dataloader)
val_dataloader = datamodule.val_dataloader()
val_dataloader = accelerator.prepare(val_dataloader)
test_dataloader = datamodule.test_dataloader()
test_dataloader = accelerator.prepare(test_dataloader)
optimizer, graph_dit_model = accelerator.prepare(optimizer, graph_dit_model)
# train_epoch
from pytorch_lightning import seed_everything
seed_everything(cfg.train.seed)
for epoch in range(cfg.train.n_epochs):
print(f"Epoch {epoch}")
graph_dit_model.train()
graph_dit_model.cur_epoch = epoch
graph_dit_model.on_train_epoch_start()
for batch in train_dataloader:
optimizer.zero_grad()
loss = graph_dit_model.training_step(batch, epoch)['loss']
accelerator.backward(loss)
optimizer.step()
graph_dit_model.on_train_epoch_end()
for batch in val_dataloader:
if epoch % cfg.train.check_val_every_n_epoch == 0:
graph_dit_model.eval()
graph_dit_model.on_validation_epoch_start()
graph_dit_model.validation_step(batch, epoch)
graph_dit_model.on_validation_epoch_end()
# test_epoch
graph_dit_model.test()
graph_dit_model.on_test_epoch_start()
for batch in test_dataloader:
graph_dit_model.test_step(batch, epoch)
graph_dit_model.on_test_epoch_end()
elif cfg.general.type == "Trainer":
trainer = Trainer(
gradient_clip_val=cfg.train.clip_grad,
# accelerator="cpu",
accelerator="gpu"
if torch.cuda.is_available() and cfg.general.gpus > 0
else "cpu",
devices=[cfg.general.gpu_number]
if torch.cuda.is_available() and cfg.general.gpus > 0
else None,
max_epochs=cfg.train.n_epochs,
enable_checkpointing=False,
check_val_every_n_epoch=cfg.train.check_val_every_n_epoch,
val_check_interval=cfg.train.val_check_interval,
strategy="ddp" if cfg.general.gpus > 1 else "auto",
enable_progress_bar=cfg.general.enable_progress_bar,
callbacks=[],
reload_dataloaders_every_n_epochs=0,
logger=[],
)
if not cfg.general.test_only:
print("start testing fit method")
trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume)
if cfg.general.save_model:
trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt")
trainer.test(model, datamodule=datamodule)
if __name__ == "__main__":
test()