14 Commits

Author SHA1 Message Date
mhz
91d4e3c7ad try to get the original perf 2024-09-16 22:45:12 +02:00
mhz
c867aef5a6 now we add reward wait to test 2024-09-15 22:21:09 +02:00
mhz
1ad520d248 can run but need to test whtich pth is 2024-09-15 22:18:56 +02:00
mhz
74a629fdcc update README file 2024-09-15 00:01:47 +02:00
mhz
94fe13756f try to update reward func 2024-09-14 23:56:36 +02:00
mhz
2ac17caa3c need to update the model 2024-09-12 23:40:42 +02:00
mhz
0c60171c71 need to update the model 2024-09-10 16:57:42 +02:00
mhz
97fbdf91c7 try to deploy PPO policy 2024-09-09 23:50:10 +02:00
mhz
297261d666 make data to use 1 rather than 2 size list 2024-09-08 23:53:56 +02:00
mhz
5dccf590e7 add sample phase and try to get log prob 2024-09-08 23:26:49 +02:00
mhz
0c4b597dd2 train phase done 2024-09-08 21:09:41 +02:00
mhz
11d9697e06 write some codes for integrate reward code 2024-09-08 20:28:14 +02:00
mhz
244b159c26 add how to compute min max in nasbench 2024-09-01 23:11:10 +02:00
mhz
63ca6c716e add the aircraft result 2024-09-01 23:09:56 +02:00
7 changed files with 16377 additions and 570 deletions

View File

@@ -1,14 +1,34 @@
Graph Diffusion Transformer for Multi-Conditional Molecular Generation Graph Diffusion Transformer for Multi-Conditional Molecular Generation
================================================================ ================================================================
## Initial Setup
Please download NASBench201 dataset(NAS-Bench-201-v1_1-096897.pth) from
https://drive.google.com/file/d/16Y0UwGisiouVRxW-W5hEtbxmcHw_0hF_/view
and put it in the `/path/to/repo/graph_dit` folder.
## Running the code
start command:
``` bash
python main.py --config-name=config.yaml \
model.ensure_connected=True \
dataset.task_name='nasbench201' \
dataset.guidance_target='regression'
```
This repository contains the code for the paper "Inverse Molecular Design with Multi-Conditional Diffusion Guidance" by Gang Liu, Jiaxin Xu, Tengfei Luo, and Meng Jiang.
Paper: https://arxiv.org/abs/2401.13858 Paper: https://arxiv.org/abs/2401.13858
This is the code for Graph DiT. The denoising model architecture in `graph_dit/models` looks like: <!-- This is the code for Graph DiT. The denoising model architecture in `graph_dit/models` looks like:
<div style="display: flex;" markdown="1"> <div style="display: flex;" markdown="1">
<img src="asset/reverse.png" style="width: 45%;" alt="Description of the first image"> <img src="asset/reverse.png" style="width: 45%;" alt="Description of the first image">
<img src="asset/arch.png" style="width: 45%;" alt="Description of the second image"> <img src="asset/arch.png" style="width: 45%;" alt="Description of the second image">
</div> </div> -->
## Requirements ## Requirements

View File

@@ -195,15 +195,18 @@ class Graph_DiT(pl.LightningModule):
# print("Size of the input features Xdim {}, Edim {}, ydim {}".format(self.Xdim, self.Edim, self.ydim)) # print("Size of the input features Xdim {}, Edim {}, ydim {}".format(self.Xdim, self.Edim, self.ydim))
def on_train_epoch_start(self) -> None: def on_train_epoch_start(self) -> None:
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]: # if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
print("Starting train epoch {}/{}...".format(self.current_epoch, self.trainer.max_epochs)) if self.current_epoch / self.cfg.train.n_epochs in [0.25, 0.5, 0.75, 1.0]:
# print("Starting train epoch {}/{}...".format(self.current_epoch, self.trainer.max_epochs))
print("Starting train epoch {}/{}...".format(self.current_epoch, self.cfg.train.n_epochs))
self.start_epoch_time = time.time() self.start_epoch_time = time.time()
self.train_loss.reset() self.train_loss.reset()
self.train_metrics.reset() self.train_metrics.reset()
def on_train_epoch_end(self) -> None: def on_train_epoch_end(self) -> None:
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]: # if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
if self.current_epoch / self.cfg.train.n_epochs in [0.25, 0.5, 0.75, 1.0]:
log = True log = True
else: else:
log = False log = False
@@ -239,7 +242,7 @@ class Graph_DiT(pl.LightningModule):
self.val_X_logp.compute(), self.val_E_logp.compute()] self.val_X_logp.compute(), self.val_E_logp.compute()]
if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]: # if self.current_epoch / self.trainer.max_epochs in [0.25, 0.5, 0.75, 1.0]:
print(f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ", print(f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ",
f"Val Edge type KL: {metrics[2] :.2f}", 'Val loss: %.2f \t Best : %.2f\n' % (metrics[0], self.best_val_nll)) f"Val Edge type KL: {metrics[2] :.2f}", 'Val loss: %.2f \t Best : %.2f\n' % (metrics[0], self.best_val_nll))
with open("validation-metrics.csv", "a") as f: with open("validation-metrics.csv", "a") as f:
@@ -286,7 +289,7 @@ class Graph_DiT(pl.LightningModule):
samples.extend(self.sample_batch(batch_id=ident, batch_size=to_generate, y=batch_y, samples.extend(self.sample_batch(batch_id=ident, batch_size=to_generate, y=batch_y,
save_final=to_save, save_final=to_save,
keep_chain=chains_save, keep_chain=chains_save,
number_chain_steps=self.number_chain_steps)) number_chain_steps=self.number_chain_steps)[0])
ident += to_generate ident += to_generate
start_index += to_generate start_index += to_generate
@@ -360,7 +363,7 @@ class Graph_DiT(pl.LightningModule):
batch_y = torch.ones(to_generate, self.ydim_output, device=self.device) batch_y = torch.ones(to_generate, self.ydim_output, device=self.device)
cur_sample = self.sample_batch(batch_id, to_generate, batch_y, save_final=to_save, cur_sample = self.sample_batch(batch_id, to_generate, batch_y, save_final=to_save,
keep_chain=chains_save, number_chain_steps=self.number_chain_steps) keep_chain=chains_save, number_chain_steps=self.number_chain_steps)[0]
samples = samples + cur_sample samples = samples + cur_sample
all_ys.append(batch_y) all_ys.append(batch_y)
@@ -601,6 +604,9 @@ class Graph_DiT(pl.LightningModule):
assert (E == torch.transpose(E, 1, 2)).all() assert (E == torch.transpose(E, 1, 2)).all()
total_log_probs = torch.zeros([self.cfg.general.final_model_samples_to_generate,10], device=self.device)
# total_log_probs = torch.zeros([self.cfg.general.samples_to_generate,10], device=self.device)
# Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1. # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.
for s_int in reversed(range(0, self.T)): for s_int in reversed(range(0, self.T)):
s_array = s_int * torch.ones((batch_size, 1)).type_as(y) s_array = s_int * torch.ones((batch_size, 1)).type_as(y)
@@ -609,21 +615,24 @@ class Graph_DiT(pl.LightningModule):
t_norm = t_array / self.T t_norm = t_array / self.T
# Sample z_s # Sample z_s
sampled_s, discrete_sampled_s = self.sample_p_zs_given_zt(s_norm, t_norm, X, E, y, node_mask) sampled_s, discrete_sampled_s, log_probs= self.sample_p_zs_given_zt(s_norm, t_norm, X, E, y, node_mask)
X, E, y = sampled_s.X, sampled_s.E, sampled_s.y X, E, y = sampled_s.X, sampled_s.E, sampled_s.y
print(f'sampled_s.X shape: {sampled_s.X.shape}, sampled_s.E shape: {sampled_s.E.shape}')
print(f'log_probs shape: {log_probs.shape}')
total_log_probs += log_probs
# Sample # Sample
sampled_s = sampled_s.mask(node_mask, collapse=True) sampled_s = sampled_s.mask(node_mask, collapse=True)
X, E, y = sampled_s.X, sampled_s.E, sampled_s.y X, E, y = sampled_s.X, sampled_s.E, sampled_s.y
molecule_list = [] graph_list = []
for i in range(batch_size): for i in range(batch_size):
n = n_nodes[i] n = n_nodes[i]
atom_types = X[i, :n].cpu() node_types = X[i, :n].cpu()
edge_types = E[i, :n, :n].cpu() edge_types = E[i, :n, :n].cpu()
molecule_list.append([atom_types, edge_types]) graph_list.append([node_types, edge_types])
return molecule_list return graph_list, total_log_probs
def sample_p_zs_given_zt(self, s, t, X_t, E_t, y_t, node_mask): def sample_p_zs_given_zt(self, s, t, X_t, E_t, y_t, node_mask):
"""Samples from zs ~ p(zs | zt). Only used during sampling. """Samples from zs ~ p(zs | zt). Only used during sampling.
@@ -635,6 +644,7 @@ class Graph_DiT(pl.LightningModule):
# Neural net predictions # Neural net predictions
noisy_data = {'X_t': X_t, 'E_t': E_t, 'y_t': y_t, 't': t, 'node_mask': node_mask} noisy_data = {'X_t': X_t, 'E_t': E_t, 'y_t': y_t, 't': t, 'node_mask': node_mask}
print(f"sample p zs given zt X_t shape: {X_t.shape}, E_t shape: {E_t.shape}, y_t shape: {y_t.shape}, node_mask shape: {node_mask.shape}")
def get_prob(noisy_data, unconditioned=False): def get_prob(noisy_data, unconditioned=False):
pred = self.forward(noisy_data, unconditioned=unconditioned) pred = self.forward(noisy_data, unconditioned=unconditioned)
@@ -674,7 +684,19 @@ class Graph_DiT(pl.LightningModule):
# with condition = P_t(G_{t-1} |G_t, C) # with condition = P_t(G_{t-1} |G_t, C)
# with condition = P_t(A_{t-1} |A_t, y) # with condition = P_t(A_{t-1} |A_t, y)
prob_X, prob_E, pred = get_prob(noisy_data) prob_X, prob_E, pred = get_prob(noisy_data)
print(f'prob_X shape: {prob_X.shape}, prob_E shape: {prob_E.shape}')
print(f'X_t shape: {X_t.shape}, E_t shape: {E_t.shape}, y_t shape: {y_t.shape}')
print(f'X_t: {X_t}')
log_prob_X = torch.log(torch.gather(prob_X, -1, X_t.long()).squeeze(-1)) # bs, n
log_prob_E = torch.log(torch.gather(prob_E, -1, E_t.long()).squeeze(-1)) # bs, n, n
# Sum the log_prob across dimensions for total log_prob
log_prob_X = log_prob_X.sum(dim=-1)
log_prob_E = log_prob_E.sum(dim=(1, 2))
print(f'log_prob_X shape: {log_prob_X.shape}, log_prob_E shape: {log_prob_E.shape}')
# log_probs = log_prob_E + log_prob_X
log_probs = torch.cat([log_prob_X, log_prob_E], dim=-1) # (batch_size, 2)
print(f'log_probs shape: {log_probs.shape}')
### Guidance ### Guidance
if self.guidance_target is not None and self.guide_scale is not None and self.guide_scale != 1: if self.guidance_target is not None and self.guide_scale is not None and self.guide_scale != 1:
uncon_prob_X, uncon_prob_E, pred = get_prob(noisy_data, unconditioned=True) uncon_prob_X, uncon_prob_E, pred = get_prob(noisy_data, unconditioned=True)
@@ -810,4 +832,4 @@ class Graph_DiT(pl.LightningModule):
out_one_hot = utils.PlaceHolder(X=X_s, E=E_s, y=y_t) out_one_hot = utils.PlaceHolder(X=X_s, E=E_s, y=y_t)
out_discrete = utils.PlaceHolder(X=X_s, E=E_s, y=y_t) out_discrete = utils.PlaceHolder(X=X_s, E=E_s, y=y_t)
return out_one_hot.mask(node_mask).type_as(y_t), out_discrete.mask(node_mask, collapse=True).type_as(y_t) return out_one_hot.mask(node_mask).type_as(y_t), out_discrete.mask(node_mask, collapse=True).type_as(y_t), log_probs

View File

@@ -1,4 +1,5 @@
# These imports are tricky because they use c++, do not move them # These imports are tricky because they use c++, do not move them
from tqdm import tqdm
import os, shutil import os, shutil
import warnings import warnings
@@ -144,10 +145,32 @@ def main(cfg: DictConfig):
else: else:
trainer.test(model, datamodule=datamodule, ckpt_path=cfg.general.test_only) trainer.test(model, datamodule=datamodule, ckpt_path=cfg.general.test_only)
from accelerate import Accelerator
from accelerate.utils import set_seed, ProjectConfiguration
@hydra.main( @hydra.main(
version_base="1.1", config_path="../configs", config_name="config" version_base="1.1", config_path="../configs", config_name="config"
) )
def test(cfg: DictConfig): def test(cfg: DictConfig):
os.environ["CUDA_VISIBLE_DEVICES"] = cfg.general.gpu_number
accelerator_config = ProjectConfiguration(
project_dir=os.path.join(cfg.general.log_dir, cfg.general.name),
automatic_checkpoint_naming=True,
total_limit=cfg.general.number_checkpoint_limit,
)
accelerator = Accelerator(
mixed_precision='no',
project_config=accelerator_config,
# gradient_accumulation_steps=cfg.train.gradient_accumulation_steps * cfg.train.n_epochs,
gradient_accumulation_steps=cfg.train.gradient_accumulation_steps,
)
# Debug: 确认可用设备
print(f"Available GPUs: {torch.cuda.device_count()}")
print(f"Using device: {accelerator.device}")
set_seed(cfg.train.seed, device_specific=True)
datamodule = dataset.DataModule(cfg) datamodule = dataset.DataModule(cfg)
datamodule.prepare_data() datamodule.prepare_data()
dataset_infos = dataset.DataInfos(datamodule=datamodule, cfg=cfg, dataset=datamodule.dataset) dataset_infos = dataset.DataInfos(datamodule=datamodule, cfg=cfg, dataset=datamodule.dataset)
@@ -169,40 +192,216 @@ def test(cfg: DictConfig):
"visualization_tools": visulization_tools, "visualization_tools": visulization_tools,
} }
# Debug: 确认可用设备
print(f"Available GPUs: {torch.cuda.device_count()}")
print(f"Using device: {accelerator.device}")
if cfg.general.test_only: if cfg.general.test_only:
cfg, _ = get_resume(cfg, model_kwargs) cfg, _ = get_resume(cfg, model_kwargs)
os.chdir(cfg.general.test_only.split("checkpoints")[0]) os.chdir(cfg.general.test_only.split("checkpoints")[0])
elif cfg.general.resume is not None: elif cfg.general.resume is not None:
cfg, _ = get_resume_adaptive(cfg, model_kwargs) cfg, _ = get_resume_adaptive(cfg, model_kwargs)
os.chdir(cfg.general.resume.split("checkpoints")[0]) os.chdir(cfg.general.resume.split("checkpoints")[0])
# os.environ["CUDA_VISIBLE_DEVICES"] = cfg.general.gpu_number
model = Graph_DiT(cfg=cfg, **model_kwargs) model = Graph_DiT(cfg=cfg, **model_kwargs)
trainer = Trainer( graph_dit_model = model
gradient_clip_val=cfg.train.clip_grad,
# accelerator="cpu",
accelerator="gpu"
if torch.cuda.is_available() and cfg.general.gpus > 0
else "cpu",
devices=[cfg.general.gpu_number]
if torch.cuda.is_available() and cfg.general.gpus > 0
else None,
max_epochs=cfg.train.n_epochs,
enable_checkpointing=False,
check_val_every_n_epoch=cfg.train.check_val_every_n_epoch,
val_check_interval=cfg.train.val_check_interval,
strategy="ddp" if cfg.general.gpus > 1 else "auto",
enable_progress_bar=cfg.general.enable_progress_bar,
callbacks=[],
reload_dataloaders_every_n_epochs=0,
logger=[],
)
if not cfg.general.test_only: inference_dtype = torch.float32
print("start testing fit method") graph_dit_model.to(accelerator.device, dtype=inference_dtype)
trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume)
if cfg.general.save_model:
trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt") # optional: freeze the model
trainer.test(model, datamodule=datamodule) # graph_dit_model.model.requires_grad_(True)
import torch.nn.functional as F
optimizer = graph_dit_model.configure_optimizers()
train_dataloader = accelerator.prepare(datamodule.train_dataloader())
optimizer, graph_dit_model = accelerator.prepare(optimizer, graph_dit_model)
# start training
for epoch in range(cfg.train.n_epochs):
graph_dit_model.train() # 设置模型为训练模式
print(f"Epoch {epoch}", end="\n")
graph_dit_model.on_train_epoch_start()
for data in train_dataloader: # 从数据加载器中获取一个批次的数据
# data.to(accelerator.device)
# data_x = F.one_hot(data.x, num_classes=12).float()[:, graph_dit_model.active_index]
# data_edge_attr = F.one_hot(data.edge_attr, num_classes=2).float()
# dense_data, node_mask = utils.to_dense(data_x, data.edge_index, data_edge_attr, data.batch, graph_dit_model.max_n_nodes)
# dense_data = dense_data.mask(node_mask)
# X, E = dense_data.X, dense_data.E
# noisy_data = graph_dit_model.apply_noise(X, E, data.y, node_mask)
# pred = graph_dit_model.forward(noisy_data)
# loss = graph_dit_model.train_loss(masked_pred_X=pred.X, masked_pred_E=pred.E, pred_y=pred.y,
# true_X=X, true_E=E, true_y=data.y, node_mask=node_mask,
# log=epoch % graph_dit_model.log_every_steps == 0)
# # print(f'training loss: {loss}, epoch: {self.current_epoch}, batch: {i}\n, pred type: {type(pred)}, pred.X shape: {type(pred.X)}, {pred.X.shape}, pred.E shape: {type(pred.E)}, {pred.E.shape}')
# graph_dit_model.train_metrics(masked_pred_X=pred.X, masked_pred_E=pred.E, true_X=X, true_E=E,
# log=epoch % graph_dit_model.log_every_steps == 0)
# graph_dit_model.log(f'loss', loss, batch_size=X.size(0), sync_dist=True)
# print(f"training loss: {loss}")
# with open("training-loss.csv", "a") as f:
# f.write(f"{loss}, {epoch}\n")
loss = graph_dit_model.training_step(data, epoch)
loss = loss['loss']
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
# return {'loss': loss}
graph_dit_model.on_train_epoch_end()
if epoch % cfg.train.check_val_every_n_epoch == 0:
print(f'print validation loss')
graph_dit_model.eval()
graph_dit_model.on_validation_epoch_start()
graph_dit_model.validation_step(data, epoch)
graph_dit_model.on_validation_epoch_end()
# start testing
print("start testing")
graph_dit_model.eval()
test_dataloader = accelerator.prepare(datamodule.test_dataloader())
graph_dit_model.on_test_epoch_start()
for data in test_dataloader:
nll = graph_dit_model.test_step(data, epoch)
# data_x = F.one_hot(data.x, num_classes=12).float()[:, graph_dit_model.active_index]
# data_edge_attr = F.one_hot(data.edge_attr, num_classes=2).float()
# dense_data, node_mask = utils.to_dense(data_x, data.edge_index, data_edge_attr, data.batch, graph_dit_model.max_n_nodes)
# dense_data = dense_data.mask(node_mask)
# noisy_data = graph_dit_model.apply_noise(dense_data.X, dense_data.E, data.y, node_mask)
# pred = graph_dit_model.forward(noisy_data)
# nll = graph_dit_model.compute_val_loss(pred, noisy_data, dense_data.X, dense_data.E, data.y, node_mask, test=True)
# graph_dit_model.test_y_collection.append(data.y)
print(f'test loss: {nll}')
graph_dit_model.on_test_epoch_end()
# start sampling
# samples_left_to_generate = cfg.general.final_model_samples_to_generate
# samples_left_to_save = cfg.general.final_model_samples_to_save
# chains_left_to_save = cfg.general.final_model_chains_to_save
# samples, all_ys, batch_id = [], [], 0
# samples_with_log_probs = []
# test_y_collection = torch.cat(graph_dit_model.test_y_collection, dim=0)
# num_examples = test_y_collection.size(0)
# if cfg.general.final_model_samples_to_generate > num_examples:
# ratio = cfg.general.final_model_samples_to_generate // num_examples
# test_y_collection = test_y_collection.repeat(ratio+1, 1)
# num_examples = test_y_collection.size(0)
# Normal reward function
# from nas_201_api import NASBench201API as API
# api = API('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/NAS-Bench-201-v1_1-096897.pth')
# def graph_reward_fn(graphs, true_graphs=None, device=None, reward_model='swap'):
# rewards = []
# if reward_model == 'swap':
# import csv
# with open('/nfs/data3/hanzhang/nasbenchDiT/graph_dit/swap_results.csv', 'r') as f:
# reader = csv.reader(f)
# header = next(reader)
# data = [row for row in reader]
# swap_scores = [float(row[0]) for row in data]
# for graph in graphs:
# node_tensor = graph[0]
# node = node_tensor.cpu().numpy().tolist()
# def nodes_to_arch_str(nodes):
# num_to_op = ['input', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3', 'skip_connect', 'none', 'output']
# nodes_str = [num_to_op[node] for node in nodes]
# arch_str = '|' + nodes_str[1] + '~0|+' + \
# '|' + nodes_str[2] + '~0|' + nodes_str[3] + '~1|+' +\
# '|' + nodes_str[4] + '~0|' + nodes_str[5] + '~1|' + nodes_str[6] + '~2|'
# return arch_str
# arch_str = nodes_to_arch_str(node)
# reward = swap_scores[api.query_index_by_arch(arch_str)]
# rewards.append(reward)
# # for graph in graphs:
# # reward = 1.0
# # rewards.append(reward)
# return torch.tensor(rewards, dtype=torch.float32, requires_grad=True).unsqueeze(0).to(device)
# old_log_probs = None
# while samples_left_to_generate > 0:
# print(f'samples left to generate: {samples_left_to_generate}/'
# f'{cfg.general.final_model_samples_to_generate}', end='', flush=True)
# bs = 1 * cfg.train.batch_size
# to_generate = min(samples_left_to_generate, bs)
# to_save = min(samples_left_to_save, bs)
# chains_save = min(chains_left_to_save, bs)
# # batch_y = test_y_collection[batch_id : batch_id + to_generate]
# batch_y = torch.ones(to_generate, graph_dit_model.ydim_output, device=graph_dit_model.device)
# cur_sample, log_probs = graph_dit_model.sample_batch(batch_id, to_generate, batch_y, save_final=to_save,
# keep_chain=chains_save, number_chain_steps=graph_dit_model.number_chain_steps)
# log_probs = torch.sum(log_probs, dim=-1).unsqueeze(1)
# samples = samples + cur_sample
# reward = graph_reward_fn(cur_sample, device=graph_dit_model.device)
# advantages = (reward - torch.mean(reward)) / (torch.std(reward) + 1e-6)
# print(f'reward: {reward.shape}, advantages: {advantages.shape}, log_probs: {log_probs.shape}, cur_sample: {len(cur_sample)}')
# if old_log_probs is None:
# old_log_probs = log_probs.clone()
# ratio = torch.exp(log_probs - old_log_probs)
# unclipped_loss = -advantages * ratio
# clipped_loss = -advantages * torch.clamp(ratio, 1.0 - cfg.ppo.clip_param, 1.0 + cfg.ppo.clip_param)
# loss = torch.mean(torch.max(unclipped_loss, clipped_loss))
# accelerator.backward(loss)
# optimizer.step()
# optimizer.zero_grad()
# samples_with_log_probs.append((cur_sample, log_probs, reward))
# all_ys.append(batch_y)
# batch_id += to_generate
# samples_left_to_save -= to_save
# samples_left_to_generate -= to_generate
# chains_left_to_save -= chains_save
# print(f"final Computing sampling metrics...")
# graph_dit_model.sampling_metrics.reset()
# graph_dit_model.sampling_metrics(samples, all_ys, graph_dit_model.name, graph_dit_model.current_epoch, graph_dit_model.val_counter, test=True)
# graph_dit_model.sampling_metrics.reset()
# print(f"Done.")
# # save samples
# print("Samples:")
# print(samples)
# ========================
# trainer = Trainer(
# gradient_clip_val=cfg.train.clip_grad,
# # accelerator="cpu",
# accelerator="gpu"
# if torch.cuda.is_available() and cfg.general.gpus > 0
# else "cpu",
# devices=[cfg.general.gpu_number]
# if torch.cuda.is_available() and cfg.general.gpus > 0
# else None,
# max_epochs=cfg.train.n_epochs,
# enable_checkpointing=False,
# check_val_every_n_epoch=cfg.train.check_val_every_n_epoch,
# val_check_interval=cfg.train.val_check_interval,
# strategy="ddp" if cfg.general.gpus > 1 else "auto",
# enable_progress_bar=cfg.general.enable_progress_bar,
# callbacks=[],
# reload_dataloaders_every_n_epochs=0,
# logger=[],
# )
# if not cfg.general.test_only:
# print("start testing fit method")
# trainer.fit(model, datamodule=datamodule, ckpt_path=cfg.general.resume)
# if cfg.general.save_model:
# trainer.save_checkpoint(f"checkpoints/{cfg.general.name}/last.ckpt")
# trainer.test(model, datamodule=datamodule)
if __name__ == "__main__": if __name__ == "__main__":
test() test()

View File

@@ -76,6 +76,8 @@ class CategoricalEmbedder(nn.Module):
embeddings = embeddings + noise embeddings = embeddings + noise
return embeddings return embeddings
# 相似的condition cluster起来
# size
class ClusterContinuousEmbedder(nn.Module): class ClusterContinuousEmbedder(nn.Module):
def __init__(self, input_size, hidden_size, dropout_prob): def __init__(self, input_size, hidden_size, dropout_prob):
super().__init__() super().__init__()
@@ -108,6 +110,8 @@ class ClusterContinuousEmbedder(nn.Module):
if drop_ids is not None: if drop_ids is not None:
embeddings = torch.zeros((labels.shape[0], self.hidden_size), device=labels.device) embeddings = torch.zeros((labels.shape[0], self.hidden_size), device=labels.device)
# print(labels[~drop_ids].shape)
# torch.Size([1200])
embeddings[~drop_ids] = self.mlp(labels[~drop_ids]) embeddings[~drop_ids] = self.mlp(labels[~drop_ids])
embeddings[drop_ids] += self.embedding_drop.weight[0] embeddings[drop_ids] += self.embedding_drop.weight[0]
else: else:

View File

@@ -17,20 +17,22 @@ class Denoiser(nn.Module):
num_heads=16, num_heads=16,
mlp_ratio=4.0, mlp_ratio=4.0,
drop_condition=0.1, drop_condition=0.1,
Xdim=118, Xdim=7,
Edim=5, Edim=2,
ydim=3, ydim=1,
task_type='regression', task_type='regression',
): ):
super().__init__() super().__init__()
print(f"Denoiser, xdim: {Xdim}, edim: {Edim}, ydim: {ydim}, hidden_size: {hidden_size}, depth: {depth}, num_heads: {num_heads}, mlp_ratio: {mlp_ratio}, drop_condition: {drop_condition}")
self.num_heads = num_heads self.num_heads = num_heads
self.ydim = ydim self.ydim = ydim
self.x_embedder = nn.Linear(Xdim + max_n_nodes * Edim, hidden_size, bias=False) self.x_embedder = nn.Linear(Xdim + max_n_nodes * Edim, hidden_size, bias=False)
self.t_embedder = TimestepEmbedder(hidden_size) self.t_embedder = TimestepEmbedder(hidden_size)
#
self.y_embedding_list = torch.nn.ModuleList() self.y_embedding_list = torch.nn.ModuleList()
self.y_embedding_list.append(ClusterContinuousEmbedder(2, hidden_size, drop_condition)) self.y_embedding_list.append(ClusterContinuousEmbedder(1, hidden_size, drop_condition))
for i in range(ydim - 2): for i in range(ydim - 2):
if task_type == 'regression': if task_type == 'regression':
self.y_embedding_list.append(ClusterContinuousEmbedder(1, hidden_size, drop_condition)) self.y_embedding_list.append(ClusterContinuousEmbedder(1, hidden_size, drop_condition))
@@ -88,6 +90,8 @@ class Denoiser(nn.Module):
# print("Denoiser Forward") # print("Denoiser Forward")
# print(x.shape, e.shape, y.shape, t.shape, unconditioned) # print(x.shape, e.shape, y.shape, t.shape, unconditioned)
# torch.Size([1200, 8, 7]) torch.Size([1200, 8, 8, 2]) torch.Size([1200, 2]) torch.Size([1200, 1]) False
# print(y)
force_drop_id = torch.zeros_like(y.sum(-1)) force_drop_id = torch.zeros_like(y.sum(-1))
# drop the nan values # drop the nan values
force_drop_id[torch.isnan(y.sum(-1))] = 1 force_drop_id[torch.isnan(y.sum(-1))] = 1
@@ -109,11 +113,12 @@ class Denoiser(nn.Module):
c1 = self.t_embedder(t) c1 = self.t_embedder(t)
# print("C1 after t_embedder") # print("C1 after t_embedder")
# print(c1.shape) # print(c1.shape)
for i in range(1, self.ydim): c2 = self.y_embedding_list[0](y[:,0].unsqueeze(-1), self.training, force_drop_id, t)
if i == 1: # for i in range(1, self.ydim):
c2 = self.y_embedding_list[i-1](y[:, :2], self.training, force_drop_id, t) # if i == 1:
else: # c2 = self.y_embedding_list[i-1](y[:, :2], self.training, force_drop_id, t)
c2 = c2 + self.y_embedding_list[i-1](y[:, i:i+1], self.training, force_drop_id, t) # else:
# c2 = c2 + self.y_embedding_list[i-1](y[:, i:i+1], self.training, force_drop_id, t)
# print("C2 after y_embedding_list") # print("C2 after y_embedding_list")
# print(c2.shape) # print(c2.shape)
# print("C1 + C2") # print("C1 + C2")

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long