Source code for greatx.nn.models.supervised.elastic_gnn

from typing import List

import torch.nn as nn

from greatx.nn.layers import ElasticConv, Sequential, activations
from greatx.utils import wrapper


[docs]class ElasticGNN(nn.Module): r"""Graph Neural Networks with elastic message passing (ElasticGNN) from the `"Elastic Graph Neural Networks" <https://arxiv.org/abs/2107.06996>`_ paper (ICML'21) Parameters ---------- in_channels : int, the input dimensions of model out_channels : int, the output dimensions of model hids : List[int], optional the number of hidden units for each hidden layer, by default [64] acts : List[str], optional the activation function for each hidden layer, by default ['relu'] K : int, optional the number of propagation steps during message passing, by default 3 lambda1 : float, optional trade-off hyperparameter, by default 3 lambda2 : float, optional trade-off hyperparameter, by default 3 L21 : bool, optional whether to use row-wise projection on the l2 ball of radius λ1., by default True cached : bool, optional whether to cache the incident matrix, by default True dropout : float, optional the dropout ratio of model, by default 0.8 bias : bool, optional whether to use bias in the layers, by default True bn: bool, optional whether to use :class:`BatchNorm1d` after the convolution layer, by default False Examples -------- >>> # ElasticGNN with one hidden layer >>> model = ElasticGNN(100, 10) >>> # ElasticGNN with two hidden layers >>> model = ElasticGNN(100, 10, hids=[32, 16], acts=['relu', 'elu']) >>> # ElasticGNN with two hidden layers, without first activation >>> model = ElasticGNN(100, 10, hids=[32, 16], acts=[None, 'relu']) >>> # ElasticGNN with deep architectures, each layer has elu activation >>> model = ElasticGNN(100, 10, hids=[16]*8, acts=['elu']) See also -------- :class:`greatx.nn.layers.ElasticGNN` """ @wrapper def __init__(self, in_channels: int, out_channels: int, hids: List[int] = [16], acts: List[str] = ['relu'], K: int = 3, lambda1: float = 3, lambda2: float = 3, cached: bool = True, dropout: float = 0.8, bias: bool = True, bn: bool = False): super().__init__() lin = [] for hid, act in zip(hids, acts): lin.append(nn.Dropout(dropout)) lin.append(nn.Linear(in_channels, hid, bias=bias)) if bn: lin.append(nn.BatchNorm1d(hid)) lin.append(activations.get(act)) in_channels = hid lin.append(nn.Dropout(dropout)) lin.append(nn.Linear(in_channels, out_channels, bias=bias)) self.prop = ElasticConv(K=K, lambda1=lambda1, lambda2=lambda2, L21=True, cached=cached) self.lin = Sequential(*lin)
[docs] def reset_parameters(self): self.prop.reset_parameters() self.lin.reset_parameters()
[docs] def cache_clear(self): """Clear cached inputs or intermediate results.""" self.prop._cached_inc = None return self
[docs] def forward(self, x, edge_index, edge_weight=None): """""" x = self.lin(x) return self.prop(x, edge_index, edge_weight)