first commit

This commit is contained in:
CownowAn
2024-03-15 14:38:51 +00:00
commit bc2ed1304f
321 changed files with 44802 additions and 0 deletions

View File

@@ -0,0 +1 @@
from .evaluator import get_stats_eval, get_nn_eval

View File

@@ -0,0 +1,58 @@
import networkx as nx
from .structure_evaluator import mmd_eval
from .gin_evaluator import nn_based_eval
from torch_geometric.utils import to_networkx
import torch
import torch.nn.functional as F
import dgl
def get_stats_eval(config):
if config.eval.mmd_distance.lower() == 'rbf':
method = [('degree', 1., 'argmax'), ('cluster', 0.1, 'argmax'),
('spectral', 1., 'argmax')]
else:
raise ValueError
def eval_stats_fn(test_dataset, pred_graph_list):
pred_G = [nx.from_numpy_matrix(pred_adj) for pred_adj in pred_graph_list]
sub_pred_G = []
if config.eval.max_subgraph:
for G in pred_G:
CGs = [G.subgraph(c) for c in nx.connected_components(G)]
CGs = sorted(CGs, key=lambda x: x.number_of_nodes(), reverse=True)
sub_pred_G += [CGs[0]]
pred_G = sub_pred_G
test_G = [to_networkx(test_dataset[i], to_undirected=True, remove_self_loops=True)
for i in range(len(test_dataset))]
results = mmd_eval(test_G, pred_G, method)
return results
return eval_stats_fn
def get_nn_eval(config):
if hasattr(config.eval, "N_gin"):
N_gin = config.eval.N_gin
else:
N_gin = 10
def nn_eval_fn(test_dataset, pred_graph_list):
pred_G = [nx.from_numpy_matrix(pred_adj) for pred_adj in pred_graph_list]
sub_pred_G = []
if config.eval.max_subgraph:
for G in pred_G:
CGs = [G.subgraph(c) for c in nx.connected_components(G)]
CGs = sorted(CGs, key=lambda x: x.number_of_nodes(), reverse=True)
sub_pred_G += [CGs[0]]
pred_G = sub_pred_G
test_G = [to_networkx(test_dataset[i], to_undirected=True, remove_self_loops=True)
for i in range(len(test_dataset))]
results = nn_based_eval(test_G, pred_G, N_gin)
return results
return nn_eval_fn

View File

@@ -0,0 +1,311 @@
"""Modified from https://github.com/uoguelph-mlrg/GGM-metrics"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.utils import expand_as_pair
from dgl.nn import SumPooling, AvgPooling, MaxPooling
class GINConv(nn.Module):
def __init__(self,
apply_func,
aggregator_type,
init_eps=0,
learn_eps=False):
super(GINConv, self).__init__()
self.apply_func = apply_func
self._aggregator_type = aggregator_type
if aggregator_type == 'sum':
self._reducer = fn.sum
elif aggregator_type == 'max':
self._reducer = fn.max
elif aggregator_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggregator_type))
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
def forward(self, graph, feat, edge_weight=None):
r"""
Description
-----------
Compute Graph Isomorphism Network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, the input feature of shape :math:`(N, D_{in})` where
:math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
edge_weight : torch.Tensor, optional
Optional tensor on the edge. If given, the convolution will weight
with regard to the message.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, D_{out})` where
:math:`D_{out}` is the output dimensionality of ``apply_func``.
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
with graph.local_scope():
aggregate_fn = self.concat_edge_msg
# aggregate_fn = fn.copy_src('h', 'm')
if edge_weight is not None:
assert edge_weight.shape[0] == graph.number_of_edges()
graph.edata['_edge_weight'] = edge_weight
aggregate_fn = fn.u_mul_e('h', '_edge_weight', 'm')
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.update_all(aggregate_fn, self._reducer('m', 'neigh'))
diff = torch.tensor(graph.dstdata['neigh'].shape[1: ]) - torch.tensor(feat_dst.shape[1: ])
zeros = torch.zeros(feat_dst.shape[0], *diff).to(feat_dst.device)
feat_dst = torch.cat([feat_dst, zeros], dim=1)
rst = (1 + self.eps) * feat_dst + graph.dstdata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
def concat_edge_msg(self, edges):
if self.edge_feat_loc not in edges.data:
return {'m': edges.src['h']}
else:
m = torch.cat([edges.src['h'], edges.data[self.edge_feat_loc]], dim=1)
return {'m': m}
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class GIN(nn.Module):
"""GIN model"""
def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
graph_pooling_type, neighbor_pooling_type, edge_feat_dim=0,
final_dropout=0.0, learn_eps=False, output_dim=1, **kwargs):
"""model parameters setting
Paramters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super().__init__()
def init_weights_orthogonal(m):
if isinstance(m, nn.Linear):
torch.nn.init.orthogonal_(m.weight)
elif isinstance(m, MLP):
if hasattr(m, 'linears'):
m.linears.apply(init_weights_orthogonal)
else:
m.linear.apply(init_weights_orthogonal)
elif isinstance(m, nn.ModuleList):
pass
else:
raise Exception()
self.num_layers = num_layers
self.learn_eps = learn_eps
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
# self.preprocess_nodes = PreprocessNodeAttrs(
# node_attrs=node_preprocess, output_dim=node_preprocess_output_dim)
# print(input_dim)
for layer in range(self.num_layers - 1):
if layer == 0:
mlp = MLP(num_mlp_layers, input_dim + edge_feat_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(num_mlp_layers, hidden_dim + edge_feat_dim, hidden_dim, hidden_dim)
if kwargs['init'] == 'orthogonal':
init_weights_orthogonal(mlp)
self.ginlayers.append(
GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(
nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(
nn.Linear(hidden_dim, output_dim))
if kwargs['init'] == 'orthogonal':
# print('orthogonal')
self.linears_prediction.apply(init_weights_orthogonal)
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == 'sum':
self.pool = SumPooling()
elif graph_pooling_type == 'mean':
self.pool = AvgPooling()
elif graph_pooling_type == 'max':
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, g, h):
# list of hidden representation at each layer (including input)
hidden_rep = [h]
# h = self.preprocess_nodes(h)
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
score_over_layer = 0
# perform pooling over all nodes in each graph in every layer
for i, h in enumerate(hidden_rep):
pooled_h = self.pool(g, h)
score_over_layer += self.drop(self.linears_prediction[i](pooled_h))
return score_over_layer
def get_graph_embed(self, g, h):
self.eval()
with torch.no_grad():
# return self.forward(g, h).detach().numpy()
hidden_rep = []
# h = self.preprocess_nodes(h)
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
# perform pooling over all nodes in each graph in every layer
graph_embed = torch.Tensor([]).to(self.device)
for i, h in enumerate(hidden_rep):
pooled_h = self.pool(g, h)
graph_embed = torch.cat([graph_embed, pooled_h], dim = 1)
return graph_embed
def get_graph_embed_no_cat(self, g, h):
self.eval()
with torch.no_grad():
hidden_rep = []
# h = self.preprocess_nodes(h)
for i in range(self.num_layers - 1):
h = self.ginlayers[i](g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
return self.pool(g, hidden_rep[-1]).to(self.device)
@property
def edge_feat_loc(self):
return self.ginlayers[0].edge_feat_loc
@edge_feat_loc.setter
def edge_feat_loc(self, loc):
for layer in self.ginlayers:
layer.edge_feat_loc = loc

View File

@@ -0,0 +1,292 @@
"""Evaluation on random GIN features. Modified from https://github.com/uoguelph-mlrg/GGM-metrics"""
import torch
import numpy as np
import sklearn
import sklearn.metrics
from sklearn.preprocessing import StandardScaler
import time
import dgl
from .gin import GIN
def load_feature_extractor(
device, num_layers=3, hidden_dim=35, neighbor_pooling_type='sum',
graph_pooling_type='sum', input_dim=1, edge_feat_dim=0,
dont_concat=False, num_mlp_layers=2, output_dim=1,
node_feat_loc='attr', edge_feat_loc='attr', init='orthogonal',
**kwargs):
model = GIN(num_layers=num_layers, hidden_dim=hidden_dim, neighbor_pooling_type=neighbor_pooling_type,
graph_pooling_type=graph_pooling_type, input_dim=input_dim, edge_feat_dim=edge_feat_dim,
num_mlp_layers=num_mlp_layers, output_dim=output_dim, init=init)
model.node_feat_loc = node_feat_loc
model.edge_feat_loc = edge_feat_loc
model.eval()
if dont_concat:
model.forward = model.get_graph_embed_no_cat
else:
model.forward = model.get_graph_embed
model.device = device
return model.to(device)
def time_function(func):
def wrapper(*args, **kwargs):
start = time.time()
results = func(*args, **kwargs)
end = time.time()
return results, end - start
return wrapper
class GINMetric():
def __init__(self, model):
self.feat_extractor = model
self.get_activations = self.get_activations_gin
@time_function
def get_activations_gin(self, generated_dataset, reference_dataset):
return self._get_activations(generated_dataset, reference_dataset)
def _get_activations(self, generated_dataset, reference_dataset):
gen_activations = self.__get_activations_single_dataset(generated_dataset)
ref_activations = self.__get_activations_single_dataset(reference_dataset)
scaler = StandardScaler()
scaler.fit(ref_activations)
ref_activations = scaler.transform(ref_activations)
gen_activations = scaler.transform(gen_activations)
return gen_activations, ref_activations
def __get_activations_single_dataset(self, dataset):
node_feat_loc = self.feat_extractor.node_feat_loc
edge_feat_loc = self.feat_extractor.edge_feat_loc
ndata = [node_feat_loc] if node_feat_loc in dataset[0].ndata else '__ALL__'
edata = [edge_feat_loc] if edge_feat_loc in dataset[0].edata else '__ALL__'
graphs = dgl.batch(dataset, ndata=ndata, edata=edata).to(self.feat_extractor.device)
if node_feat_loc not in graphs.ndata: # Use degree as features
feats = graphs.in_degrees() + graphs.out_degrees()
feats = feats.unsqueeze(1).type(torch.float32)
else:
feats = graphs.ndata[node_feat_loc]
graph_embeds = self.feat_extractor(graphs, feats)
return graph_embeds.cpu().detach().numpy()
def evaluate(self, *args, **kwargs):
raise Exception('Must be implemented by child class')
class MMDEvaluation(GINMetric):
def __init__(self, model, kernel='rbf', sigma='range', multiplier='mean'):
super().__init__(model)
if multiplier == 'mean':
self.__get_sigma_mult_factor = self.__mean_pairwise_distance
elif multiplier == 'median':
self.__get_sigma_mult_factor = self.__median_pairwise_distance
elif multiplier is None:
self.__get_sigma_mult_factor = lambda *args, **kwargs: 1
else:
raise Exception(multiplier)
if 'rbf' in kernel:
if sigma == 'range':
self.base_sigmas = np.array([0.01, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0])
if multiplier == 'mean':
self.name = 'mmd_rbf'
elif multiplier == 'median':
self.name = 'mmd_rbf_adaptive_median'
else:
self.name = 'mmd_rbf_adaptive'
elif sigma == 'one':
self.base_sigmas = np.array([1])
if multiplier == 'mean':
self.name = 'mmd_rbf_single_mean'
elif multiplier == 'median':
self.name = 'mmd_rbf_single_median'
else:
self.name = 'mmd_rbf_single'
else:
raise Exception(sigma)
self.evaluate = self.calculate_MMD_rbf_quadratic
elif 'linear' in kernel:
self.evaluate = self.calculate_MMD_linear_kernel
else:
raise Exception()
def __get_pairwise_distances(self, generated_dataset, reference_dataset):
return sklearn.metrics.pairwise_distances(reference_dataset, generated_dataset, metric='euclidean', n_jobs=8)**2
def __mean_pairwise_distance(self, dists_GR):
return np.sqrt(dists_GR.mean())
def __median_pairwise_distance(self, dists_GR):
return np.sqrt(np.median(dists_GR))
def get_sigmas(self, dists_GR):
mult_factor = self.__get_sigma_mult_factor(dists_GR)
return self.base_sigmas * mult_factor
@time_function
def calculate_MMD_rbf_quadratic(self, generated_dataset=None, reference_dataset=None):
# https://github.com/djsutherland/opt-mmd/blob/master/two_sample/mmd.py
if not isinstance(generated_dataset, torch.Tensor) and not isinstance(generated_dataset, np.ndarray):
(generated_dataset, reference_dataset), _ = self.get_activations(generated_dataset, reference_dataset)
GG = self.__get_pairwise_distances(generated_dataset, generated_dataset)
GR = self.__get_pairwise_distances(generated_dataset, reference_dataset)
RR = self.__get_pairwise_distances(reference_dataset, reference_dataset)
max_mmd = 0
sigmas = self.get_sigmas(GR)
for sigma in sigmas:
gamma = 1 / (2 * sigma**2)
K_GR = np.exp(-gamma * GR)
K_GG = np.exp(-gamma * GG)
K_RR = np.exp(-gamma * RR)
mmd = K_GG.mean() + K_RR.mean() - 2 * K_GR.mean()
max_mmd = mmd if mmd > max_mmd else max_mmd
return {self.name: max_mmd}
@time_function
def calculate_MMD_linear_kernel(self, generated_dataset=None, reference_dataset=None):
# https://github.com/djsutherland/opt-mmd/blob/master/two_sample/mmd.py
if not isinstance(generated_dataset, torch.Tensor) and not isinstance(generated_dataset, np.ndarray):
(generated_dataset, reference_dataset), _ = self.get_activations(generated_dataset, reference_dataset)
G_bar = generated_dataset.mean(axis=0)
R_bar = reference_dataset.mean(axis=0)
Z_bar = G_bar - R_bar
mmd = Z_bar.dot(Z_bar)
mmd = mmd if mmd >= 0 else 0
return {'mmd_linear': mmd}
class prdcEvaluation(GINMetric):
# From PRDC github: https://github.com/clovaai/generative-evaluation-prdc/blob/master/prdc/prdc.py#L54
def __init__(self, *args, use_pr=False, **kwargs):
super().__init__(*args, **kwargs)
self.use_pr = use_pr
@time_function
def evaluate(self, generated_dataset=None, reference_dataset=None, nearest_k=5):
""" Computes precision, recall, density, and coverage given two manifolds. """
if not isinstance(generated_dataset, torch.Tensor) and not isinstance(generated_dataset, np.ndarray):
(generated_dataset, reference_dataset), _ = self.get_activations(generated_dataset, reference_dataset)
real_nearest_neighbour_distances = self.__compute_nearest_neighbour_distances(reference_dataset, nearest_k)
distance_real_fake = self.__compute_pairwise_distance(reference_dataset, generated_dataset)
if self.use_pr:
fake_nearest_neighbour_distances = self.__compute_nearest_neighbour_distances(generated_dataset, nearest_k)
precision = (
distance_real_fake <= np.expand_dims(real_nearest_neighbour_distances, axis=1)
).any(axis=0).mean()
recall = (
distance_real_fake <= np.expand_dims(fake_nearest_neighbour_distances, axis=0)
).any(axis=1).mean()
f1_pr = 2 / ((1 / (precision + 1e-8)) + (1 / (recall + 1e-8)))
result = dict(precision=precision, recall=recall, f1_pr=f1_pr)
else:
density = (1. / float(nearest_k)) * (
distance_real_fake <= np.expand_dims(real_nearest_neighbour_distances, axis=1)).sum(axis=0).mean()
coverage = (distance_real_fake.min(axis=1) <= real_nearest_neighbour_distances).mean()
f1_dc = 2 / ((1 / (density + 1e-8)) + (1 / (coverage + 1e-8)))
result = dict(density=density, coverage=coverage, f1_dc=f1_dc)
return result
def __compute_pairwise_distance(self, data_x, data_y=None):
"""
Args:
data_x: numpy.ndarray([N, feature_dim], dtype=np.float32)
data_y: numpy.ndarray([N, feature_dim], dtype=np.float32)
Return:
numpy.ndarray([N, N], dtype=np.float32) of pairwise distances.
"""
if data_y is None:
data_y = data_x
dists = sklearn.metrics.pairwise_distances(data_x, data_y, metric='euclidean', n_jobs=8)
return dists
def __get_kth_value(self, unsorted, k, axis=-1):
"""
Args:
unsorted: numpy.ndarray of any dimensionality.
k: int
Return:
kth values along the designated axis.
"""
indices = np.argpartition(unsorted, k, axis=axis)[..., :k]
k_smallest = np.take_along_axis(unsorted, indices, axis=axis)
kth_values = k_smallest.max(axis=axis)
return kth_values
def __compute_nearest_neighbour_distances(self, input_features, nearest_k):
"""
Args:
input_features: numpy.ndarray([N, feature_dim], dtype=np.float32)
nearest_k: int
Return:
Distances to kth nearest neighbours.
"""
distances = self.__compute_pairwise_distance(input_features)
radii = self.__get_kth_value(distances, k=nearest_k + 1, axis=-1)
return radii
def nn_based_eval(graph_ref_list, graph_pred_list, N_gin=10):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
evaluators = []
for _ in range(N_gin):
gin = load_feature_extractor(device)
evaluators.append(MMDEvaluation(model=gin, kernel='rbf', sigma='range', multiplier='mean'))
evaluators.append(prdcEvaluation(model=gin, use_pr=True))
evaluators.append(prdcEvaluation(model=gin, use_pr=False))
ref_graphs = [dgl.from_networkx(g).to(device) for g in graph_ref_list]
gen_graphs = [dgl.from_networkx(g).to(device) for g in graph_pred_list]
metrics = {
'mmd_rbf': [],
'f1_pr': [],
'f1_dc': []
}
for evaluator in evaluators:
res, time = evaluator.evaluate(generated_dataset=gen_graphs, reference_dataset=ref_graphs)
for key in list(res.keys()):
if key in metrics:
metrics[key].append(res[key])
results = {
'MMD_RBF': (np.mean(metrics['mmd_rbf']), np.std(metrics['mmd_rbf'])),
'F1_PR': (np.mean(metrics['f1_pr']), np.std(metrics['f1_pr'])),
'F1_DC': (np.mean(metrics['f1_dc']), np.std(metrics['f1_dc']))
}
return results

View File

@@ -0,0 +1,209 @@
"""MMD Evaluation on graph structure statistics. Modified from https://github.com/uoguelph-mlrg/GGM-metrics"""
import numpy as np
import networkx as nx
import numpy as np
# from scipy.linalg import toeplitz
# import pyemd
import concurrent.futures
from scipy.linalg import eigvalsh
from functools import partial
class Descriptor():
def __init__(self, is_parallel=False, bins=100, kernel='rbf', sigma_type='single', **kwargs):
self.is_parallel = is_parallel
self.bins = bins
self.max_workers = kwargs.get('max_workers')
if kernel == 'rbf':
self.distance = self.l2
self.name += '_rbf'
else:
ValueError
if sigma_type == 'argmax':
log_sigmas = np.linspace(-5., 5., 50)
# the first 30 sigma values is usually enough
log_sigmas = log_sigmas[:30]
self.sigmas = [np.exp(log_sigma) for log_sigma in log_sigmas]
elif sigma_type == 'single':
self.sigmas = kwargs['sigma']
else:
raise ValueError
def evaluate(self, graph_ref_list, graph_pred_list):
"""Compute the distance between the distributions of two unordered sets of graphs.
Args:
graph_ref_list, graph_pred_list: two lists of networkx graphs to be evaluated.
"""
graph_pred_list = [G for G in graph_pred_list if not G.number_of_nodes() == 0]
sample_pred = self.extract_features(graph_pred_list)
sample_ref = self.extract_features(graph_ref_list)
GG = self.disc(sample_pred, sample_pred, distance_scaling=self.distance_scaling)
GR = self.disc(sample_pred, sample_ref, distance_scaling=self.distance_scaling)
RR = self.disc(sample_ref, sample_ref, distance_scaling=self.distance_scaling)
sigmas = self.sigmas
max_mmd = 0
mmd_dict = []
for sigma in sigmas:
gamma = 1 / (2 * sigma ** 2)
K_GR = np.exp(-gamma * GR)
K_GG = np.exp(-gamma * GG)
K_RR = np.exp(-gamma * RR)
mmd = K_GG.mean() + K_RR.mean() - (2 * K_GR.mean())
mmd_dict.append((sigma, mmd))
max_mmd = mmd if mmd > max_mmd else max_mmd
# print(self.name, mmd_dict)
return max_mmd
def pad_histogram(self, x, y):
# convert histogram values x and y to float, and pad them for equal length
support_size = max(len(x), len(y))
x = x.astype(np.float)
y = y.astype(np.float)
if len(x) < len(y):
x = np.hstack((x, [0.] * (support_size - len(x))))
elif len(y) < len(x):
y = np.hstack((y, [0.] * (support_size - len(y))))
return x, y
# def emd(self, x, y, distance_scaling=1.0):
# support_size = max(len(x), len(y))
# x, y = self.pad_histogram(x, y)
#
# d_mat = toeplitz(range(support_size)).astype(np.float)
# distance_mat = d_mat / distance_scaling
#
# dist = pyemd.emd(x, y, distance_mat)
# return dist ** 2
def l2(self, x, y, **kwargs):
# gaussian rbf
x, y = self.pad_histogram(x, y)
dist = np.linalg.norm(x - y, 2)
return dist ** 2
def kernel_parallel_unpacked(self, x, samples2, kernel):
dist = []
for s2 in samples2:
dist += [kernel(x, s2)]
return dist
def kernel_parallel_worker(self, t):
return self.kernel_parallel_unpacked(*t)
def disc(self, samples1, samples2, **kwargs):
# Discrepancy between 2 samples
tot_dist = []
if not self.is_parallel:
for s1 in samples1:
for s2 in samples2:
tot_dist += [self.distance(s1, s2)]
else:
with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_workers) as executor:
for dist in executor.map(self.kernel_parallel_worker,
[(s1, samples2, partial(self.distance, **kwargs)) for s1 in samples1]):
tot_dist += [dist]
return np.array(tot_dist)
class degree(Descriptor):
def __init__(self, *args, **kwargs):
self.name = 'degree'
self.sigmas = [kwargs.get('sigma', 1.0)]
self.distance_scaling = 1.0
super().__init__(*args, **kwargs)
def extract_features(self, dataset):
res = []
if self.is_parallel:
with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_workers) as executor:
for deg_hist in executor.map(self.degree_worker, dataset):
res.append(deg_hist)
else:
for g in dataset:
degree_hist = self.degree_worker(g)
res.append(degree_hist)
res = [s1 / np.sum(s1) for s1 in res]
return res
def degree_worker(self, G):
return np.array(nx.degree_histogram(G))
class cluster(Descriptor):
def __init__(self, *args, **kwargs):
self.name = 'cluster'
self.sigmas = [kwargs.get('sigma', [1.0 / 10])]
super().__init__(*args, **kwargs)
self.distance_scaling = self.bins
def extract_features(self, dataset):
res = []
if self.is_parallel:
with concurrent.futures.ProcessPoolExecutor(max_workers=self.max_workers) as executor:
for clustering_hist in executor.map(self.clustering_worker, [(G, self.bins) for G in dataset]):
res.append(clustering_hist)
else:
for g in dataset:
clustering_hist = self.clustering_worker((g, self.bins))
res.append(clustering_hist)
res = [s1 / np.sum(s1) for s1 in res]
return res
def clustering_worker(self, param):
G, bins = param
clustering_coeffs_list = list(nx.clustering(G).values())
hist, _ = np.histogram(
clustering_coeffs_list, bins=bins, range=(0.0, 1.0), density=False)
return hist
class spectral(Descriptor):
def __init__(self, *args, **kwargs):
self.name = 'spectral'
self.sigmas = [kwargs.get('sigma', 1.0)]
self.distance_scaling = 1
super().__init__(*args, **kwargs)
def extract_features(self, dataset):
res = []
if self.is_parallel:
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
for spectral_density in executor.map(self.spectral_worker, dataset):
res.append(spectral_density)
else:
for g in dataset:
spectral_temp = self.spectral_worker(g)
res.append(spectral_temp)
return res
def spectral_worker(self, G):
eigs = eigvalsh(nx.normalized_laplacian_matrix(G).todense())
spectral_pmf, _ = np.histogram(eigs, bins=200, range=(-1e-5, 2), density=False)
spectral_pmf = spectral_pmf / spectral_pmf.sum()
return spectral_pmf
def mmd_eval(graph_ref_list, graph_pred_list, methods):
evaluators = []
for (method, sigma, sigma_type) in methods:
evaluators.append(eval(method)(sigma=sigma, sigma_type=sigma_type))
results = {}
for evaluator in evaluators:
results[evaluator.name] = evaluator.evaluate(graph_ref_list, graph_pred_list)
return results