Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/ispras/GNN-AID into fix_…
Browse files Browse the repository at this point in the history
…split
  • Loading branch information
LukyanovKirillML committed Oct 30, 2024
2 parents 8951a57 + 4f09088 commit c3b7399
Show file tree
Hide file tree
Showing 10 changed files with 868 additions and 24 deletions.
197 changes: 196 additions & 1 deletion experiments/attack_defense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -778,11 +778,206 @@ def test_adv_training():
Metric("Accuracy", mask='test')])
print(metric_loc)

def test_pgd():
# ______________________ Attack on node ______________________
my_device = device('cpu')

# Load dataset
full_name = ("single-graph", "Planetoid", 'Cora')
dataset, data, results_dataset_path = DatasetManager.get_by_full_name(
full_name=full_name,
dataset_ver_ind=0
)

gcn_gcn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn')

manager_config = ConfigPattern(
_config_class="ModelManagerConfig",
_config_kwargs={
"mask_features": [],
"optimizer": {
"_class_name": "Adam",
"_config_kwargs": {},
}
}
)

gnn_model_manager = FrameworkGNNModelManager(
gnn=gcn_gcn,
dataset_path=results_dataset_path,
manager_config=manager_config,
modification=ModelModificationConfig(model_ver_ind=0, epochs=0)
)

gnn_model_manager.gnn.to(my_device)

num_steps = 200
gnn_model_manager.train_model(gen_dataset=dataset,
steps=num_steps,
save_model_flag=False)

acc_test = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']
print(f"Accuracy on test: {acc_test}")

# Node for attack
node_idx = 650

# Model prediction on a node before PGD attack on it
gnn_model_manager.gnn.eval()
with torch.no_grad():
probabilities = torch.exp(gnn_model_manager.gnn(dataset.data.x, dataset.data.edge_index))

predicted_class = probabilities[node_idx].argmax().item()
predicted_probability = probabilities[node_idx][predicted_class].item()
real_class = dataset.data.y[node_idx].item()

info_before_pgd_attack_on_node = {"node_idx": node_idx,
"predicted_class": predicted_class,
"predicted_probability": predicted_probability,
"real_class": real_class}

# Attack config
evasion_attack_config = ConfigPattern(
_class_name="PGD",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"is_feature_attack": True,
"element_idx": node_idx,
"epsilon": 0.1,
"learning_rate": 0.001,
"num_iterations": 500,
"num_rand_trials": 100
}
)

gnn_model_manager.set_evasion_attacker(evasion_attack_config=evasion_attack_config)

# Attack
_ = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']

# Model prediction on a node after PGD attack on it
with torch.no_grad():
probabilities = torch.exp(gnn_model_manager.gnn(gnn_model_manager.evasion_attacker.attack_diff.data.x,
gnn_model_manager.evasion_attacker.attack_diff.data.edge_index))

predicted_class = probabilities[node_idx].argmax().item()
predicted_probability = probabilities[node_idx][predicted_class].item()
real_class = dataset.data.y[node_idx].item()

info_after_pgd_attack_on_node = {"node_idx": node_idx,
"predicted_class": predicted_class,
"predicted_probability": predicted_probability,
"real_class": real_class}
# ____________________________________________________________

# ______________________ Attack on graph _____________________
# Load dataset
full_name = ("multiple-graphs", "TUDataset", 'MUTAG')
dataset, data, results_dataset_path = DatasetManager.get_by_full_name(
full_name=full_name,
dataset_ver_ind=0
)

model = model_configs_zoo(dataset=dataset, model_name='gin_gin_gin_lin_lin_con')

manager_config = ConfigPattern(
_config_class="ModelManagerConfig",
_config_kwargs={
"mask_features": [],
"optimizer": {
"_class_name": "Adam",
"_config_kwargs": {},
}
}
)

gnn_model_manager = FrameworkGNNModelManager(
gnn=model,
dataset_path=results_dataset_path,
manager_config=manager_config,
modification=ModelModificationConfig(model_ver_ind=0, epochs=0)
)

gnn_model_manager.gnn.to(my_device)

num_steps = 200
gnn_model_manager.train_model(gen_dataset=dataset,
steps=num_steps,
save_model_flag=False)

acc_test = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']
print(f"Accuracy on test: {acc_test}")

# Graph for attack
graph_idx = 0

# Model prediction on a graph before PGD attack on it
gnn_model_manager.gnn.eval()
with torch.no_grad():
probabilities = torch.exp(gnn_model_manager.gnn(dataset.dataset[graph_idx].x,
dataset.dataset[graph_idx].edge_index))

predicted_class = probabilities.argmax().item()
predicted_probability = probabilities[0][predicted_class].item()
real_class = dataset.dataset[graph_idx].y.item()

info_before_pgd_attack_on_graph = {"graph_idx": graph_idx,
"predicted_class": predicted_class,
"predicted_probability": predicted_probability,
"real_class": real_class}

# Attack config
evasion_attack_config = ConfigPattern(
_class_name="PGD",
_import_path=EVASION_ATTACK_PARAMETERS_PATH,
_config_class="EvasionAttackConfig",
_config_kwargs={
"is_feature_attack": True,
"element_idx": graph_idx,
"epsilon": 0.1,
"learning_rate": 0.001,
"num_iterations": 500,
"num_rand_trials": 100
}
)

gnn_model_manager.set_evasion_attacker(evasion_attack_config=evasion_attack_config)

# Attack
_ = gnn_model_manager.evaluate_model(gen_dataset=dataset,
metrics=[Metric("Accuracy", mask='test')])['test']['Accuracy']

# Model prediction on a graph after PGD attack on it
with torch.no_grad():
probabilities = torch.exp(gnn_model_manager.gnn(gnn_model_manager.evasion_attacker.attack_diff.dataset[graph_idx].x,
gnn_model_manager.evasion_attacker.attack_diff.dataset[graph_idx].edge_index))

predicted_class = probabilities.argmax().item()
predicted_probability = probabilities[0][predicted_class].item()
real_class = dataset.dataset[graph_idx].y.item()

info_after_pgd_attack_on_graph = {"graph_idx": graph_idx,
"predicted_class": predicted_class,
"predicted_probability": predicted_probability,
"real_class": real_class}

# ____________________________________________________________
print(f"Before PGD attack on node (Cora dataset): {info_before_pgd_attack_on_node}")
print(f"After PGD attack on node (Cora dataset): {info_after_pgd_attack_on_node}")
print(f"Before PGD attack on graph (MUTAG dataset): {info_before_pgd_attack_on_graph}")
print(f"After PGD attack on graph (MUTAG dataset): {info_after_pgd_attack_on_graph}")


if __name__ == '__main__':
import random
random.seed(10)
#test_attack_defense()
# torch.manual_seed(5000)
# test_gnnguard()
# test_jaccard()
test_attack_defense()
# test_attack_defense()
test_pgd()
119 changes: 119 additions & 0 deletions experiments/interpretation_metrics_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
import random
import warnings

import torch

from aux.custom_decorators import timing_decorator
from aux.utils import EXPLAINERS_LOCAL_RUN_PARAMETERS_PATH, EXPLAINERS_INIT_PARAMETERS_PATH
from explainers.explainers_manager import FrameworkExplainersManager
from models_builder.gnn_models import FrameworkGNNModelManager, Metric
from src.aux.configs import ModelModificationConfig, ConfigPattern
from src.base.datasets_processing import DatasetManager
from src.models_builder.models_zoo import model_configs_zoo


@timing_decorator
def run_interpretation_test():
full_name = ("single-graph", "Planetoid", 'Cora')
steps_epochs = 10
save_model_flag = False
my_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

dataset, data, results_dataset_path = DatasetManager.get_by_full_name(
full_name=full_name,
dataset_ver_ind=0
)
gnn = model_configs_zoo(dataset=dataset, model_name='gcn_gcn')
manager_config = ConfigPattern(
_config_class="ModelManagerConfig",
_config_kwargs={
"mask_features": [],
"optimizer": {
# "_config_class": "Config",
"_class_name": "Adam",
# "_import_path": OPTIMIZERS_PARAMETERS_PATH,
# "_class_import_info": ["torch.optim"],
"_config_kwargs": {},
}
}
)
gnn_model_manager = FrameworkGNNModelManager(
gnn=gnn,
dataset_path=results_dataset_path,
manager_config=manager_config,
modification=ModelModificationConfig(model_ver_ind=0, epochs=steps_epochs)
)
gnn_model_manager.gnn.to(my_device)
data.x = data.x.float()
data = data.to(my_device)

warnings.warn("Start training")
try:
raise FileNotFoundError()
except FileNotFoundError:
gnn_model_manager.epochs = gnn_model_manager.modification.epochs = 0
train_test_split_path = gnn_model_manager.train_model(gen_dataset=dataset, steps=steps_epochs,
save_model_flag=save_model_flag,
metrics=[Metric("F1", mask='train', average=None)])

if train_test_split_path is not None:
dataset.save_train_test_mask(train_test_split_path)
train_mask, val_mask, test_mask, train_test_sizes = torch.load(train_test_split_path / 'train_test_split')[
:]
dataset.train_mask, dataset.val_mask, dataset.test_mask = train_mask, val_mask, test_mask
data.percent_train_class, data.percent_test_class = train_test_sizes
warnings.warn("Training was successful")

metric_loc = gnn_model_manager.evaluate_model(
gen_dataset=dataset, metrics=[Metric("F1", mask='test', average='macro')])
print(metric_loc)

explainer_init_config = ConfigPattern(
_class_name="GNNExplainer(torch-geom)",
_import_path=EXPLAINERS_INIT_PARAMETERS_PATH,
_config_class="ExplainerInitConfig",
_config_kwargs={
"epochs": 10
}
)
explainer_metrics_run_config = ConfigPattern(
_config_class="ExplainerRunConfig",
_config_kwargs={
"mode": "local",
"kwargs": {
"_class_name": "GNNExplainer(torch-geom)",
"_import_path": EXPLAINERS_LOCAL_RUN_PARAMETERS_PATH,
"_config_class": "Config",
"_config_kwargs": {
"stability_graph_perturbations_nums": 10,
"stability_feature_change_percent": 0.05,
"stability_node_removal_percent": 0.05,
"consistency_num_explanation_runs": 10
},
}
}
)

explainer_GNNExpl = FrameworkExplainersManager(
init_config=explainer_init_config,
dataset=dataset, gnn_manager=gnn_model_manager,
explainer_name='GNNExplainer(torch-geom)',
)

num_explaining_nodes = 10
node_indices = random.sample(range(dataset.data.x.shape[0]), num_explaining_nodes)

# explainer_GNNExpl.explainer.pbar = ProgressBar(socket, "er", desc=f'{explainer_GNNExpl.explainer.name} explaining')
# explanation_metric = NodesExplainerMetric(
# model=explainer_GNNExpl.gnn,
# graph=explainer_GNNExpl.gen_dataset.data,
# explainer=explainer_GNNExpl.explainer
# )
# res = explanation_metric.evaluate(node_indices)
explanation_metrics = explainer_GNNExpl.evaluate_metrics(node_indices, explainer_metrics_run_config)
print(explanation_metrics)


if __name__ == '__main__':
random.seed(11)
run_interpretation_test()
9 changes: 8 additions & 1 deletion metainfo/evasion_attack_parameters.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,12 @@
"generations" : ["Generations", "int", 50, {"min": 0, "step": 1}, "Number of generations for genetic algorithm"],
"prob_cross": ["Probability for crossover", "float", 0.5, {"min": 0, "max": 1, "step": 0.01}, "Probability of crossover between two genes"],
"prob_mutate": ["Probability for mutation", "float", 0.02, {"min": 0, "max": 1, "step": 0.01}, "Probability of gene mutation"]
},
"PGD": {
"epsilon": ["Epsilon", "float", 0.1, {"min": 0, "max": 1, "step": 0.01}, "Epsilon"],
"learning_rate": ["Learning rate", "float", 0.01, {}, "Learning rate for adjacency matrix optimization"],
"num_iterations": ["Number of iterations", "int", 100, {"min": 1, "step": 1}, "Number of iterations of gradient descent"],
"num_rand_trials": ["Number of random trials", "int", 100, {"min": 1, "step": 1}, "number of random trials in Random Sampling Algorithm"]
}
}
}

Loading

0 comments on commit c3b7399

Please sign in to comment.