Network Modules

Decoder

Bases: Module

Decoder class for a Variational Autoencoder (VAE).

The Decoder class is responsible for taking the latent space representation and generating the reconstructed output data.

Source code in flexynesis/modules.py
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
class Decoder(nn.Module):
    """
    Decoder class for a Variational Autoencoder (VAE).

    The Decoder class is responsible for taking the latent space representation and
    generating the reconstructed output data.
    """
    def __init__(self, latent_dim, hidden_dims, output_dim):
        super(Decoder, self).__init__()

        self.act = nn.LeakyReLU(0.2)

        hidden_layers = []

        hidden_layers.append(nn.Linear(latent_dim, hidden_dims[0]))
        nn.init.xavier_uniform_(hidden_layers[-1].weight)
        hidden_layers.append(self.act)
        hidden_layers.append(nn.BatchNorm1d(hidden_dims[0]))

        for i in range(len(hidden_dims) - 1):
            hidden_layers.append(nn.Linear(hidden_dims[i], hidden_dims[i + 1]))
            nn.init.xavier_uniform_(hidden_layers[-1].weight)
            hidden_layers.append(self.act)
            hidden_layers.append(nn.BatchNorm1d(hidden_dims[i+1]))

        self.hidden_layers = nn.Sequential(*hidden_layers)

        self.FC_output = nn.Linear(hidden_dims[-1], output_dim)
        nn.init.xavier_uniform_(self.FC_output.weight)

    def forward(self, x):
        """
        Performs a forward pass through the Decoder network.

        Args:
            x (torch.Tensor): The input tensor representing the latent space.

        Returns:
            x_hat (torch.Tensor): The reconstructed output tensor.
        """
        h = self.hidden_layers(x)
        x_hat = torch.sigmoid(self.FC_output(h))
        return x_hat

forward(x)

Performs a forward pass through the Decoder network.

Parameters:
  • x (Tensor) –

    The input tensor representing the latent space.

Returns:
  • x_hat( Tensor ) –

    The reconstructed output tensor.

Source code in flexynesis/modules.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def forward(self, x):
    """
    Performs a forward pass through the Decoder network.

    Args:
        x (torch.Tensor): The input tensor representing the latent space.

    Returns:
        x_hat (torch.Tensor): The reconstructed output tensor.
    """
    h = self.hidden_layers(x)
    x_hat = torch.sigmoid(self.FC_output(h))
    return x_hat

Encoder

Bases: Module

Encoder class for a Variational Autoencoder (VAE).

The Encoder class is responsible for taking input data and generating the mean and log variance for the latent space representation.

Source code in flexynesis/modules.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
class Encoder(nn.Module):
    """
    Encoder class for a Variational Autoencoder (VAE).

    The Encoder class is responsible for taking input data and generating the mean and
    log variance for the latent space representation.
    """
    def __init__(self, input_dim, hidden_dims, latent_dim):
        super(Encoder, self).__init__()

        self.act = nn.LeakyReLU(0.2)

        hidden_layers = []

        hidden_layers.append(nn.Linear(input_dim, hidden_dims[0]))
        nn.init.xavier_uniform_(hidden_layers[-1].weight)
        hidden_layers.append(self.act)
        hidden_layers.append(nn.BatchNorm1d(hidden_dims[0]))

        for i in range(len(hidden_dims)-1):
            hidden_layers.append(nn.Linear(hidden_dims[i], hidden_dims[i+1]))
            nn.init.xavier_uniform_(hidden_layers[-1].weight)
            hidden_layers.append(self.act)
            hidden_layers.append(nn.BatchNorm1d(hidden_dims[i+1]))

        self.hidden_layers = nn.Sequential(*hidden_layers)

        self.FC_mean  = nn.Linear(hidden_dims[-1], latent_dim)
        nn.init.xavier_uniform_(self.FC_mean.weight)
        self.FC_var   = nn.Linear(hidden_dims[-1], latent_dim)
        nn.init.xavier_uniform_(self.FC_var.weight)

    def forward(self, x):
        """
        Performs a forward pass through the Encoder network.

        Args:
            x (torch.Tensor): The input data tensor.

        Returns:
            mean (torch.Tensor): The mean of the latent space representation.
            log_var (torch.Tensor): The log variance of the latent space representation.
        """
        h_       = self.hidden_layers(x)
        mean     = self.FC_mean(h_)
        log_var  = self.FC_var(h_)
        return mean, log_var

forward(x)

Performs a forward pass through the Encoder network.

Parameters:
  • x (Tensor) –

    The input data tensor.

Returns:
  • mean( Tensor ) –

    The mean of the latent space representation.

  • log_var( Tensor ) –

    The log variance of the latent space representation.

Source code in flexynesis/modules.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def forward(self, x):
    """
    Performs a forward pass through the Encoder network.

    Args:
        x (torch.Tensor): The input data tensor.

    Returns:
        mean (torch.Tensor): The mean of the latent space representation.
        log_var (torch.Tensor): The log variance of the latent space representation.
    """
    h_       = self.hidden_layers(x)
    mean     = self.FC_mean(h_)
    log_var  = self.FC_var(h_)
    return mean, log_var

MLP

Bases: Module

A Multi-Layer Perceptron (MLP) model for regression or classification tasks.

The MLP class is a simple feed-forward neural network that can be used for regression when output_dim is set to 1 or for classification when output_dim is greater than 1.

Source code in flexynesis/modules.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
class MLP(nn.Module):
    """
    A Multi-Layer Perceptron (MLP) model for regression or classification tasks.

    The MLP class is a simple feed-forward neural network that can be used for regression
    when `output_dim` is set to 1 or for classification when `output_dim` is greater than 1.
    """
    def __init__(self, input_dim, hidden_dim, output_dim):
        """
        Initializes the MLP class with the given input dimension, output dimension, and hidden layer size.

        Args:
            input_dim (int): The input dimension.
            hidden_dim (int, optional): The size of the hidden layer. Default is 32.
            output_dim (int): The output dimension. Set to 1 for regression tasks, and > 1 for classification tasks.
        """
        super(MLP, self).__init__()
        self.layer_1 = nn.Linear(input_dim, hidden_dim)
        self.layer_out = nn.Linear(hidden_dim, output_dim) if output_dim > 1 else nn.Linear(hidden_dim, 1, bias=False)
        self.relu = nn.ReLU() 
        self.dropout = nn.Dropout(p=0.1)
        self.batchnorm = nn.BatchNorm1d(hidden_dim)

    def forward(self, x):
        """
        Performs a forward pass through the MLP network.

        Args:
            x (torch.Tensor): The input data tensor.

        Returns:
            x (torch.Tensor): The output tensor after passing through the MLP network.
        """
        x = self.layer_1(x)
        x = self.batchnorm(x)
        x = self.relu(x)
        x = self.dropout(x)
        x = self.layer_out(x)
        return x

__init__(input_dim, hidden_dim, output_dim)

Initializes the MLP class with the given input dimension, output dimension, and hidden layer size.

Parameters:
  • input_dim (int) –

    The input dimension.

  • hidden_dim (int) –

    The size of the hidden layer. Default is 32.

  • output_dim (int) –

    The output dimension. Set to 1 for regression tasks, and > 1 for classification tasks.

Source code in flexynesis/modules.py
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def __init__(self, input_dim, hidden_dim, output_dim):
    """
    Initializes the MLP class with the given input dimension, output dimension, and hidden layer size.

    Args:
        input_dim (int): The input dimension.
        hidden_dim (int, optional): The size of the hidden layer. Default is 32.
        output_dim (int): The output dimension. Set to 1 for regression tasks, and > 1 for classification tasks.
    """
    super(MLP, self).__init__()
    self.layer_1 = nn.Linear(input_dim, hidden_dim)
    self.layer_out = nn.Linear(hidden_dim, output_dim) if output_dim > 1 else nn.Linear(hidden_dim, 1, bias=False)
    self.relu = nn.ReLU() 
    self.dropout = nn.Dropout(p=0.1)
    self.batchnorm = nn.BatchNorm1d(hidden_dim)

forward(x)

Performs a forward pass through the MLP network.

Parameters:
  • x (Tensor) –

    The input data tensor.

Returns:
  • x( Tensor ) –

    The output tensor after passing through the MLP network.

Source code in flexynesis/modules.py
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
def forward(self, x):
    """
    Performs a forward pass through the MLP network.

    Args:
        x (torch.Tensor): The input data tensor.

    Returns:
        x (torch.Tensor): The output tensor after passing through the MLP network.
    """
    x = self.layer_1(x)
    x = self.batchnorm(x)
    x = self.relu(x)
    x = self.dropout(x)
    x = self.layer_out(x)
    return x

flexGCN

Bases: Module

A Graph Neural Network (GNN) model using configurable convolution and activation layers.

This class defines a GNN that can utilize various graph convolution types and activation functions. It supports a configurable number of convolutional layers with batch normalization and dropout for regularization. The model aggregates node features into a single vector per graph using a fully connected layer.

Attributes:
  • act (Module) –

    Activation function applied after each convolution.

  • convs (ModuleList) –

    List of convolutional layers.

  • bns (ModuleList) –

    List of batch normalization layers applied after each convolution.

  • dropout (Dropout) –

    Dropout layer applied after activation to prevent overfitting.

  • fc (Linear) –

    Fully connected layer that aggregates node features into a single vector.

Parameters:
  • node_count (int) –

    The number of nodes in each graph.

  • node_feature_count (int) –

    The number of features each node initially has.

  • node_embedding_dim (int) –

    The size of the node embeddings (output dimension of the convolutions).

  • output_dim (int) –

    The size of the output vector, which is the final feature vector for the whole graph.

  • num_convs (int, default: 2 ) –

    Number of convolutional layers in the network. Defaults to 2.

  • dropout_rate (float, default: 0.2 ) –

    The dropout probability used for regularization. Defaults to 0.2.

  • conv (str, default: 'GC' ) –

    Type of convolution layer to use. Supported types include 'GCN' for Graph Convolution Network, 'GAT' for Graph Attention Network, 'SAGE' for GraphSAGE, and 'GC' for generic Graph Convolution. Defaults to 'GC'.

  • act (str, default: 'relu' ) –

    Type of activation function to use. Supported types include 'relu', 'sigmoid', 'leakyrelu', 'tanh', and 'gelu'. Defaults to 'relu'.

Raises:
  • ValueError

    If an unsupported activation function or convolution type is specified.

Example

model = flexGCN(node_count=100, node_feature_count=5, node_embedding_dim=64, output_dim=10, num_convs=3, dropout_rate=0.3, conv='GAT', act='relu') output = model(input_features, edge_index)

Where input_features is a tensor of shape (batch_size, num_nodes, node_feature_count)

and edge_index is a list of edges in the COO format (2, num_edges).

Source code in flexynesis/modules.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
class flexGCN(nn.Module):
    """
    A Graph Neural Network (GNN) model using configurable convolution and activation layers.

    This class defines a GNN that can utilize various graph convolution types and activation functions.
    It supports a configurable number of convolutional layers with batch normalization and dropout
    for regularization. The model aggregates node features into a single vector per graph using
    a fully connected layer.

    Attributes:
        act (torch.nn.Module): Activation function applied after each convolution.
        convs (nn.ModuleList): List of convolutional layers.
        bns (nn.ModuleList): List of batch normalization layers applied after each convolution.
        dropout (nn.Dropout): Dropout layer applied after activation to prevent overfitting.
        fc (torch.nn.Linear): Fully connected layer that aggregates node features into a single vector.

    Args:
        node_count (int): The number of nodes in each graph.
        node_feature_count (int): The number of features each node initially has.
        node_embedding_dim (int): The size of the node embeddings (output dimension of the convolutions).
        output_dim (int): The size of the output vector, which is the final feature vector for the whole graph.
        num_convs (int, optional): Number of convolutional layers in the network. Defaults to 2.
        dropout_rate (float, optional): The dropout probability used for regularization. Defaults to 0.2.
        conv (str, optional): Type of convolution layer to use. Supported types include 'GCN' for Graph Convolution Network, 
                              'GAT' for Graph Attention Network, 'SAGE' for GraphSAGE, and 'GC' for generic Graph Convolution. 
                              Defaults to 'GC'.
        act (str, optional): Type of activation function to use. Supported types include 'relu', 'sigmoid', 
                             'leakyrelu', 'tanh', and 'gelu'. Defaults to 'relu'.

    Raises:
        ValueError: If an unsupported activation function or convolution type is specified.

    Example:
        >>> model = flexGCN(node_count=100, node_feature_count=5, node_embedding_dim=64, output_dim=10, 
                         num_convs=3, dropout_rate=0.3, conv='GAT', act='relu')
        >>> output = model(input_features, edge_index)
        # Where `input_features` is a tensor of shape (batch_size, num_nodes, node_feature_count)
        # and `edge_index` is a list of edges in the COO format (2, num_edges).
    """
    def __init__(self, node_count, node_feature_count, node_embedding_dim, output_dim, 
                 num_convs = 2, dropout_rate = 0.2, conv='GC', act='relu'):
        super().__init__()

        act_options = {
            'relu': nn.ReLU(),
            'sigmoid': nn.Sigmoid(),
            'leakyrelu': nn.LeakyReLU(), 
            'tanh': nn.Tanh(),
            'gelu': nn.GELU()
        }
        if act not in act_options:
            raise ValueError("Invalid activation function string. Choose from ", list(act_options.keys()))

        conv_options = {
            'GCN': GCNConv,
            'GAT': GATConv,
            'SAGE': SAGEConv,
            'GC': GraphConv
        }
        if conv not in conv_options:
            raise ValueError('Unknown convolution type. Choose one of: ', list(conv_options.keys()))

        self.act = act_options[act]
        self.convs = nn.ModuleList()
        self.bns = nn.ModuleList()
        self.dropout = nn.Dropout(dropout_rate)

        # Initialize the first convolution layer separately if different input size
        self.convs.append(conv_options[conv](node_feature_count, node_embedding_dim))
        self.bns.append(nn.BatchNorm1d(node_embedding_dim))

        # Loop to create the remaining convolution and BN layers
        for _ in range(1, num_convs):
            self.convs.append(conv_options[conv](node_embedding_dim, node_embedding_dim))
            self.bns.append(nn.BatchNorm1d(node_embedding_dim))

        # Final fully connected layer
        self.fc = nn.Linear(node_embedding_dim * node_count, output_dim)

    def forward(self, x, edge_index):
        for conv, bn in zip(self.convs, self.bns):
            x = conv(x, edge_index)
            x = bn(x.view(-1, x.size(2))).view_as(x)
            x = self.act(x)
            x = self.dropout(x) 

        # Flatten the output of all nodes into a single vector per graph/sample
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x

cox_ph_loss(outputs, durations, events)

Calculate the Cox proportional hazards loss.

Parameters:
  • outputs (Tensor) –

    The output log-risk scores from the MLP.

  • durations (Tensor) –

    The observed times (durations) for each sample.

  • events (Tensor) –

    The event indicators (1 if event occurred, 0 if censored) for each sample.

Returns:
  • torch.Tensor: The calculated CoxPH loss.

Source code in flexynesis/modules.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
def cox_ph_loss(outputs, durations, events):
    """
    Calculate the Cox proportional hazards loss.

    Args:
        outputs (torch.Tensor): The output log-risk scores from the MLP.
        durations (torch.Tensor): The observed times (durations) for each sample.
        events (torch.Tensor): The event indicators (1 if event occurred, 0 if censored) for each sample.

    Returns:
        torch.Tensor: The calculated CoxPH loss.
    """
    valid_indices = ~torch.isnan(durations) & ~torch.isnan(events)
    if valid_indices.sum() > 0:
        outputs = outputs[valid_indices]
        events = events[valid_indices]
        durations = durations[valid_indices]

        # Exponentiate the outputs to get the hazard ratios
        hazards = torch.exp(outputs)
        # Ensure hazards is at least 1D
        if hazards.dim() == 0:
            hazards = hazards.unsqueeze(0)  # Make hazards 1D if it's a scalar
        # Calculate the risk set sum
        log_risk_set_sum = torch.log(torch.cumsum(hazards[torch.argsort(durations, descending=True)], dim=0))
        # Get the indices that sort the durations in descending order
        sorted_indices = torch.argsort(durations, descending=True)
        events_sorted = events[sorted_indices]

        # Calculate the loss
        uncensored_loss = torch.sum(outputs[sorted_indices][events_sorted == 1]) - torch.sum(log_risk_set_sum[events_sorted == 1])
        total_loss = -uncensored_loss / torch.sum(events)
    else: 
        total_loss = torch.tensor(0.0, device=outputs.device, requires_grad=True)
    if not torch.isfinite(total_loss):
        return torch.tensor(0.0, device=outputs.device, requires_grad=True)
    return total_loss