Source code for greatx.nn.models.supervised.soft_median_gcn
from typing import List
import torch.nn as nn
from greatx.nn.layers import Sequential, SoftMedianConv, activations
from greatx.utils import wrapper
[docs]class SoftMedianGCN(nn.Module):
r"""Graph Convolution Network (GCN) with
soft median aggregation (MedianGCN)
from the `"Robustness of Graph Neural Networks
at Scale" <https://arxiv.org/abs/2110.14038>`_ paper
(NeurIPS'21)
Parameters
----------
in_channels : int,
the input dimensions of model
out_channels : int,
the output dimensions of model
hids : List[int], optional
the number of hidden units for each hidden layer,
by default [16]
acts : List[str], optional
the activation function for each hidden layer,
by default ['relu']
dropout : float, optional
the dropout ratio of model, by default 0.5
bias : bool, optional
whether to use bias in the layers,
by default True
normalize : bool, optional
whether to compute symmetric normalization
coefficients on the fly, by default False
row_normalize : bool, optional
whether to perform row-normalization on the fly,
by default False
cached : bool, optional
whether the layer will cache
the computation of :math:`(\mathbf{\hat{D}}^{-1/2}
\mathbf{\hat{A}} \mathbf{\hat{D}}^{-1/2})`
and sorted edges on first execution,
and will use the cached version for further executions,
by default False
bn: bool, optional
whether to use :class:`BatchNorm1d` after the convolution layer,
by default False
Examples
--------
>>> # SoftMedianGCN with one hidden layer
>>> model = SoftMedianGCN(100, 10)
>>> # SoftMedianGCN with two hidden layers
>>> model = SoftMedianGCN(100, 10, hids=[32, 16], acts=['relu', 'elu'])
>>> # SoftMedianGCN with two hidden layers, without first activation
>>> model = SoftMedianGCN(100, 10, hids=[32, 16], acts=[None, 'relu'])
>>> # SoftMedianGCN with deep architectures, each layer has elu activation
>>> model = SoftMedianGCN(100, 10, hids=[16]*8, acts=['elu'])
See also
--------
:class:`greatx.nn.layers.SoftMedianConv`
"""
@wrapper
def __init__(self, in_channels: int, out_channels: int,
hids: List[int] = [16], acts: List[str] = ['relu'],
dropout: float = 0.5, bias: bool = True,
normalize: bool = False, row_normalize: bool = False,
cached: bool = True, bn: bool = False):
super().__init__()
conv = []
assert len(hids) == len(acts)
for hid, act in zip(hids, acts):
conv.append(
SoftMedianConv(in_channels, hid, bias=bias,
normalize=normalize,
row_normalize=row_normalize, cached=cached))
if bn:
conv.append(nn.BatchNorm1d(hid))
conv.append(activations.get(act))
conv.append(nn.Dropout(dropout))
in_channels = hid
conv.append(
SoftMedianConv(in_channels, out_channels, bias=bias,
normalize=normalize, row_normalize=row_normalize,
cached=cached))
self.conv = Sequential(*conv)
[docs] def cache_clear(self):
"""Clear cached inputs or intermediate results."""
for conv in self.conv:
if hasattr(conv, '_cached_edges'):
conv._cached_edges = None
return self
[docs] def forward(self, x, edge_index, edge_weight=None):
""""""
return self.conv(x, edge_index, edge_weight)