Example #1
0
def _load_KLDIV_loss_function(device):
    """
    Load loss function and its utilities.
    """
    loss_fct = torch.nn.KLDivLoss(reduction='batchmean')
    softmax = Softmax(dim=-1)
    logSoftmax = LogSoftmax(dim=-1)
    loss_fct.to(device)
    softmax.to(device)
    logSoftmax.to(device)
    return loss_fct, softmax, logSoftmax
Example #2
0
 def create_module(self):
     fc = Linear(in_features=prod(self.pre_dim), out_features=self.classes)
     sm = LogSoftmax(dim=-1)
     module = Sequential(fc,
                         #sm,
                         )
     return module
Example #3
0
def build_multinomial_policy(state_dim, hidden_dims, action_dim):
    '''
    Build a multilayer perceptron with a MultinomialLayer at the output layer

    Parameters
    ----------
    state_dim : int
        the input size of the network

    hidden_dims : list
        a list of type int specifying the sizes of the hidden layers

    action_dim : int
        the dimensionality of the Gaussian distribution to be outputted by the
        policy

    Returns
    -------
    policy : torch.nn.Sequential
        a pytorch sequential model that outputs a multinomial distribution
    '''

    layers = build_layers(state_dim, hidden_dims, action_dim)
    layers.append(LogSoftmax(dim=-1))
    layers.append(MultinomialLayer())
    policy = Sequential(*layers)

    return policy
Example #4
0
 def __init__(
     self,
     graph_embedder: GraphEmbedding,
     graph_encoder: GraphEncoder,
     class_projection: Module,
     graph_field_name: str,
     feature_field_names: List[str],
     indexes_field_name: str,
     label_field_name: str,
     instance: Instance,
     train_dataset: Dataset,
     eval_dataset: Optional[Dataset],
     test_dataset: Optional[Dataset],
     batch_size: int,
     lr: float,
 ) -> None:
     """Construct a complete model."""
     super().__init__()
     self.graph_embedder = graph_embedder
     self.graph_encoder = graph_encoder
     self.selector = Selector()
     self.class_projection = class_projection
     self.graph_field_name = graph_field_name
     self.feature_field_names = feature_field_names
     self.indexes_field_name = indexes_field_name
     self.label_field_name = label_field_name
     self.softmax = LogSoftmax(dim=1)
     self.instance = instance
     self.train_dataset = train_dataset
     self.eval_dataset = eval_dataset
     self.test_dataset = test_dataset
     self.batch_size = batch_size
     self.lr = lr
Example #5
0
    def __init__(self, in_dim, channels, kernel_size, layers, filters,
                 dist_size, masked_conv_class):
        super().__init__()
        self.in_dim = in_dim
        self.channels = channels
        self.kernel_size = kernel_size
        self.filters = filters
        self.layers = layers
        self.dist_size = dist_size
        self.mconv = masked_conv_class
        p = int((self.kernel_size - 1) / 2)

        self.net = ModuleList()
        self.net.append(
            self.mconv('A', self.channels, self.filters, self.kernel_size, p))
        self.net.append(ReLU())
        for _ in range(self.layers - 1):
            self.net.append(
                ResBlock('B', self.filters, self.filters, self.kernel_size,
                         self.mconv))
        self.net.append(self.mconv('B', self.filters, self.filters, 1, 0))
        self.net.append(ReLU())
        self.net.append(
            self.mconv('B', self.filters, self.dist_size * self.channels, 1,
                       0))

        self.log_softmax = LogSoftmax(dim=2)
        self.loss = NLLLoss(reduction='sum')
        print(self)
Example #6
0
    def __init__(self, args):
        super(MultiHeadedAttentionMIL_multiclass_plus, self).__init__()
        self.args = args
        self.dropout = args.dropout
        width_fe = is_in_args(args, 'width_fe', 64)
        atn_dim = is_in_args(args, 'atn_dim', 256)
        self.feature_depth = is_in_args(args, 'feature_depth', 512)
        self.num_heads = is_in_args(args, 'num_heads', 1)
        self.num_class = is_in_args(args, 'num_class', 2)
        self.n_layers_classif = is_in_args(args, 'n_layers_classif', 1)
        self.dim_heads = atn_dim // self.num_heads
        assert self.dim_heads * self.num_heads == atn_dim, "atn_dim must be divisible by num_heads"

        self.attention = Sequential(MultiHeadAttention(args), Softmax(dim=-2))

        classifier = []
        classifier.append(
            LinearBatchNorm(int(args.feature_depth * self.num_heads), width_fe,
                            args.dropout, args.constant_size))
        for i in range(self.n_layers_classif):
            classifier.append(
                LinearBatchNorm(width_fe, width_fe, args.dropout,
                                args.constant_size))
        classifier.append(Linear(width_fe, self.num_class))
        classifier.append(LogSoftmax(-1))
        self.classifier = Sequential(*classifier)
 def __init__(self):
     super().__init__()
     self.flatten = Flatten()
     self.layer = Linear(28 * 28, 50)
     self.layer1 = Linear(50, 20)
     self.layer2 = Linear(20, 10)
     self.softmax = LogSoftmax()
def main():
    """Create and execute an experiment."""
    model = AnalogSequential(
        Flatten(),
        AnalogLinear(INPUT_SIZE,
                     HIDDEN_SIZES[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[0],
                     HIDDEN_SIZES[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[1],
                     OUTPUT_SIZE,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        LogSoftmax(dim=1))

    # Create the training Experiment.
    experiment = BasicTrainingWithScheduler(dataset=FashionMNIST,
                                            model=model,
                                            epochs=EPOCHS,
                                            batch_size=BATCH_SIZE)

    # Create the runner and execute the experiment.
    runner = LocalRunner(device=DEVICE)
    results = runner.run(experiment, dataset_root=PATH_DATASET)
    print(results)
Example #9
0
def createClassifier(hidden_units, num_input, num_output):
    """this function create a classifier network

    :hidden_units: number of hidden units in classifier
    :returns: classifier

    """
    layers = []
    for i in range(len(hidden_units)):
        if not layers:
            layers.append(Linear(num_input, hidden_units[i]))
            pass
        else:
            layers.append(Linear(hidden_units[i - 1], hidden_units[i]))
            pass
        layers.append(ReLU())
        layers.append(Dropout(0.2))
        pass
    if layers:
        layers.append(Linear(hidden_units[-1], num_output))
        pass
    else:
        layers.append(Linear(num_input, num_output))
        pass
    layers.append(LogSoftmax(dim=1))

    classifier = torch.nn.Sequential(*layers)
    return classifier
Example #10
0
    def __init__(self,
                 aggregation_op,
                 readout_op,
                 num_aggregation_layers,
                 mlp_num_layers,
                 num_features,
                 num_classes,
                 dim=32,
                 eps=0,
                 train_eps=False,
                 dropout_rate=0.5):
        super().__init__()

        self.dropout_rate = dropout_rate
        self.num_aggregation_layers = num_aggregation_layers
        self.aggregators = ModuleList()

        if aggregation_op == 'sum':
            aggregate_func = scatter_add
        elif aggregation_op == 'mean':
            aggregate_func = scatter_mean
        elif aggregation_op == 'max':
            aggregate_func = scatter_max
        else:
            raise Exception('Invalid aggregation op %s' % aggregation_op)

        for k in range(num_aggregation_layers):
            mlp_layer = []
            for i in range(mlp_num_layers):
                input_dim = num_features if k == 0 and i == 0 else dim
                output_dim = dim
                mlp_layer.extend([
                    Linear(input_dim, output_dim),
                    Dropout(self.dropout_rate),
                    ReLU(),
                    BatchNorm1d(output_dim)
                ])
            mlp = Sequential(*mlp_layer)
            self.aggregators.append(
                GNNConv_Variant(mlp,
                                aggregate_func,
                                eps=eps,
                                train_eps=train_eps))

        if readout_op == 'sum':
            self.readout = global_add_pool
        elif readout_op == 'mean':
            self.readout = global_mean_pool
        elif readout_op == 'max':
            self.readout = global_max_pool
        else:
            raise Exception('Invalid readout op %s' % readout_op)

        self.classifier = Sequential(
            Linear(num_features + num_aggregation_layers * dim,
                   num_features + num_aggregation_layers * dim), ReLU(),
            Dropout(self.dropout_rate),
            Linear(num_features + num_aggregation_layers * dim, num_classes),
            LogSoftmax(dim=-1))
Example #11
0
  def forward_ocr(self, x):
    
    x = self.conv5(x)
    x = self.batch5(x)
    x = self.leaky(x)
    
    x = self.conv6(x)
    x = self.leaky(x)
    x = self.conv6(x)
    x = self.leaky(x)
    
    x = self.max2(x)
    x = self.conv7(x)
    x = self.batch7(x)
    x = self.leaky(x)
    
    
    x = self.conv8(x)
    x = self.leaky(x)
    x = self.conv8(x)
    x = self.leaky(x)
    
    x = self.conv9_1(x)
    x = self.leaky(x)
    x = self.conv9_2(x)
    x = self.leaky2(x)
    
    x = self.max2_1(x)
    
    x = self.conv10_s(x)
    x = self.batch10_s(x)
    x = self.leaky2(x)
    
    
    x = self.drop1(x)
    x = self.conv11(x)
    x = x.squeeze(2)

    x = x.permute(0,2,1)
    y = x
    x = x.contiguous().view(-1,x.data.shape[2])
    x = LogSoftmax(len(x.size()) - 1)(x)
    x = x.view_as(y)
    x = x.permute(0,2,1)
    
    return x   
Example #12
0
 def __init__(self, config, trial=None):
     super(LitPSD, self).__init__()
     if trial:
         self.trial = trial
     else:
         self.trial = None
     self.pylog = logging.getLogger(__name__)
     logging.getLogger("lightning").setLevel(self.pylog.level)
     self.config = config
     if hasattr(config.system_config, "half_precision"):
         self.needs_float = not config.system_config.half_precision
     else:
         self.needs_float = True
     self.hparams = DictionaryUtility.to_dict(config)
     self.n_type = config.system_config.n_type
     self.lr = config.optimize_config.lr
     self.modules = ModuleUtility(config.net_config.imports +
                                  config.dataset_config.imports +
                                  config.optimize_config.imports)
     self.model_class = self.modules.retrieve_class(
         config.net_config.net_class)
     # self.data_module = PSDDataModule(config,self.device)
     self.model = self.model_class(config)
     self.criterion_class = self.modules.retrieve_class(
         config.net_config.criterion_class)
     self.criterion = self.criterion_class(
         *config.net_config.criterion_params)
     self.softmax = LogSoftmax(dim=1)
     self.accuracy = Accuracy()
     self.confusion = ConfusionMatrix(num_classes=self.n_type)
     if hasattr(self.config.dataset_config, "calgroup"):
         calgroup = self.config.dataset_config.calgroup
     else:
         calgroup = None
     if self.config.dataset_config.dataset_class == "PulseDatasetDet":
         self.evaluator = PhysEvaluator(
             self.config.system_config.type_names,
             self.logger,
             device=self.device)
     elif self.config.dataset_config.dataset_class == "PulseDatasetWaveformNorm":
         self.evaluator = TensorEvaluator(self.logger,
                                          calgroup=calgroup,
                                          target_has_phys=False,
                                          target_index=None,
                                          metric_name="accuracy",
                                          metric_unit="")
     else:
         if hasattr(self.config.dataset_config, "calgroup"):
             self.evaluator = PSDEvaluator(
                 self.config.system_config.type_names,
                 self.logger,
                 device=self.device,
                 calgroup=self.config.dataset_config.calgroup)
         else:
             self.evaluator = PSDEvaluator(
                 self.config.system_config.type_names,
                 self.logger,
                 device=self.device)
Example #13
0
 def __init__(self, model, criterion, X=0, Y=0, SMLoss_mode=0):
     super(BaseTrainer, self).__init__()
     self.model = model
     self.criterion = criterion
     self.indx = X
     self.indy = Y
     self.SML_mode = SMLoss_mode
     self.KLDloss = KLLoss()
     self.logsoft = LogSoftmax()
Example #14
0
def kl_loss_func(scores, target, klloss):
    # scores.shape: [batch_size, seq_length, seq_length]
    # target.shape: [batch_size, seq_length]
    m = LogSoftmax(dim=1)
    output = klloss(
        torch.transpose(m(scores[0, :, 1:]) + 0.025, 0, 1),
        functional.one_hot(target[0, 1:], num_classes=scores.shape[-1]) +
        0.025)
    return output
Example #15
0
 def __init__(self, vocab_size, embedding_dim, hidden_dim, batch_size):
     super(Encoder, self).__init__()
     self.hidden_dim = hidden_dim
     self.embedding = Embedding(num_embeddings=vocab_size,
                                embedding_dim=embedding_dim)
     self.rnn = LSTM(input_size=embedding_dim, hidden_size=hidden_dim)
     self.batch_size = batch_size
     self.softmax = LogSoftmax()
     self.hidden = self.init_hidden()
Example #16
0
 def build_classifier(layers: List[int],
                      dropout: float = 0.3) -> Sequential:
     args: List[NNModule] = []
     for i in range(len(layers) - 2):
         from_size = layers[i]
         to_size = layers[i + 1]
         args.extend([Linear(from_size, to_size), ReLU(), Dropout(dropout)])
     args.extend([Linear(layers[-2], layers[-1]), LogSoftmax(dim=1)])
     classifier: Sequential = Sequential(*args)
     classifier.dropout = dropout
     return classifier
Example #17
0
 def get_model(self, rpu_config: Any = TikiTakaReRamSBPreset) -> Module:
     return AnalogSequential(
         Flatten(),
         AnalogLinear(784, 256, bias=True, rpu_config=rpu_config()),
         Sigmoid(),
         AnalogLinear(256, 128, bias=True, rpu_config=rpu_config()),
         Sigmoid(), AnalogLinear(128,
                                 10,
                                 bias=True,
                                 rpu_config=rpu_config()),
         LogSoftmax(dim=1))
Example #18
0
 def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim,
              batch_size):
     super(POSTagger, self).__init__()
     self.hidden_dim = hidden_dim
     self.embedding = Embedding(num_embeddings=vocab_size,
                                embedding_dim=embedding_dim)
     self.rnn = LSTM(input_size=embedding_dim, hidden_size=hidden_dim)
     self.linear = Linear(hidden_dim, output_dim)
     self.batch_size = batch_size
     self.softmax = LogSoftmax(dim=2)
     self.hidden = self.init_hidden()
Example #19
0
    def __init__(self,
                 num_words: int,
                 embedding_dim: int,
                 padding_index: int = 0) -> None:
        super().__init__()

        self.softmax_w = torch.nn.Parameter(
            torch.Tensor(num_words, embedding_dim))
        self.softmax_b = torch.nn.Parameter(torch.Tensor(num_words))
        self._softmax_func = LogSoftmax(dim=-1)
        self._padding_index = padding_index
        self._reset_parameters()
Example #20
0
def initmodel(character_encoder, tag_encoder, embedded_dimension):
    """

    :param character_encoder:
    :param tag_encoder:
    :param embedded_dimension:
    :return:
    """
    character_encoder = copy(character_encoder)
    tag_encoder = copy(tag_encoder)
    tag_encoder[BD] = len(tag_encoder)
    character_encoder[BD] = len(character_encoder)
    character_embedding = Embedding(len(character_encoder), embedded_dimension)
    tag_embedding = Embedding(len(tag_encoder), embedded_dimension)
    encoder_part = LSTM(input_size=embedded_dimension,
                        hidden_size=LSTMDIM,
                        num_layers=1,
                        bidirectional=1).type(DTYPE)
    ench0 = randn(2, 1, LSTMDIM).type(DTYPE)
    encc0 = randn(2, 1, LSTMDIM).type(DTYPE)

    decoder_part = LSTM(input_size=2 * LSTMDIM + embedded_dimension,
                        hidden_size=LSTMDIM,
                        num_layers=1).type(DTYPE)
    dech0 = randn(2, 1, 2 * LSTMDIM + embedded_dimension).type(DTYPE)
    decc0 = randn(2, 1, 2 * LSTMDIM + embedded_dimension).type(DTYPE)

    pred = Linear(LSTMDIM, len(character_encoder)).type(DTYPE)
    softmax = LogSoftmax().type(DTYPE)

    model = ModuleList([
        character_embedding, tag_embedding, encoder_part, decoder_part, pred,
        softmax
    ])
    optimizer = Adam(model.parameters(), lr=LEARNINGRATE, betas=BETAS)

    return {
        'model': model,
        'optimizer': optimizer,
        'cencoder': character_encoder,
        'tencoder': tag_encoder,
        'cembedding': character_embedding,
        'tembedding': tag_embedding,
        'enc': encoder_part,
        'ench0': ench0,
        'encc0': encc0,
        'dec': decoder_part,
        'dech0': dech0,
        'decc0': decc0,
        'pred': pred,
        'sm': softmax,
        'embdim': embedded_dimension
    }
Example #21
0
def loss_function(origin, target, random_1, random_2, random_3, random_4):
    cos = CosineSimilarity(dim=1, eps=1e-6)
    sim_1 = cos(origin, target).unsqueeze(1)  #batch_size * 1
    sim_2 = cos(origin, random_1).unsqueeze(1)
    sim_3 = cos(origin, random_2).unsqueeze(1)
    sim_4 = cos(origin, random_3).unsqueeze(1)
    sim_5 = cos(origin, random_4).unsqueeze(1)
    sim = torch.cat((sim_1, sim_2, sim_3, sim_4, sim_5),
                    dim=1)  #batch_size * compare_size
    logSoft = LogSoftmax(dim=1)
    output = torch.mean(logSoft(sim)[:, 0])
    return -output
Example #22
0
    def __init__(self, input_len, out_length, loss=None):
        super().__init__()
        self.linear = Linear(input_len, input_len)
        self.activation = ReLU()

        self.linear_2 = Linear(input_len, out_length)

        if not loss:
            self.loss = NLLLoss(reduction='none')
        else:
            self.loss = loss

        self.softmax = LogSoftmax()
Example #23
0
 def get_model(self, rpu_config: Any = TikiTakaEcRamPreset) -> Module:
     return AnalogSequential(
         Conv2d(in_channels=3,
                out_channels=48,
                kernel_size=3,
                stride=1,
                padding=1), ReLU(),
         AnalogConv2d(in_channels=48,
                      out_channels=48,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(48), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         AnalogConv2d(in_channels=48,
                      out_channels=96,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         AnalogConv2d(in_channels=96,
                      out_channels=96,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(96), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         AnalogConv2d(in_channels=96,
                      out_channels=144,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         AnalogConv2d(in_channels=144,
                      out_channels=144,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), BatchNorm2d(144), ReLU(),
         MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),
         Flatten(),
         AnalogLinear(in_features=16 * 144,
                      out_features=384,
                      rpu_config=rpu_config(),
                      weight_scaling_omega=0.8), ReLU(),
         Linear(in_features=384, out_features=10), LogSoftmax(dim=1))
def beamsearch(model, text_field, beams=5, prompt="", max_len=50):
    hidden_size = model._modules['rnn'].hidden_size
    n_layers = model._modules['rnn'].num_layers
    h_t_prev = torch.zeros(n_layers, 1, hidden_size)
    c_t_prev = torch.zeros(n_layers, 1, hidden_size)
    w_t = text_field.process([text_field.tokenize(prompt.lower())])
    sampling_criterion = LogSoftmax(dim=1)

    s_t, h_t, c_t = model(w_t, h_t_prev, c_t_prev)
    h_t_b_prev = torch.cat([h_t] * beams, 1)
    c_t_b_prev = torch.cat([c_t] * beams, 1)

    w_t_next = sampling_criterion(s_t[-1])

    topk_vals, topk_indices = torch.topk(w_t_next, beams)
    decoded_strings = topk_indices.view(beams, 1)

    last_prob = []
    for i in range(1, max_len, 1):
        s_t, h_t_b, c_t_b = model(topk_indices, h_t_b_prev, c_t_b_prev)
        w_t_next = sampling_criterion(s_t[-1])

        cumulative_log_probs = w_t_next + topk_vals.view((beams, 1))
        topk_vals, topk_indices = torch.topk(cumulative_log_probs.view(-1), beams)
        b_indices = np.array(np.unravel_index(topk_indices.numpy(), cumulative_log_probs.shape)).T

        h_t_b_new, c_t_b_new = [], []
        for layer in range(n_layers):
            ht_layer, ct_layer = [], []
            for i, j in b_indices:
                ht_layer.append(h_t_b[layer][i])
                ct_layer.append(c_t_b[layer][i])
            h_t_b_new.append(torch.stack(ht_layer))
            c_t_b_new.append(torch.stack(ct_layer))
        h_t_b_prev = torch.stack(h_t_b_new)
        c_t_b_prev = torch.stack(c_t_b_new)

        strings = []
        for i, (r, c) in enumerate(b_indices):
            topk_indices[i] = c
            strings.append(torch.cat([decoded_strings[r], torch.tensor([c])]))

        decoded_strings = strings
        topk_indices = topk_indices.unsqueeze(0)
        last_prob = topk_vals

    decoded_strings = decoded_strings[last_prob.argmax()]
    decoded_strings = prompt + " " + reverseNumeralize(decoded_strings, text_field)
    return decoded_strings
Example #25
0
    def __init__(self,
                 i_dim,
                 h_dim,
                 drop_prob,
                 supprot,
                 num_bases,
                 featureless=True):
        super(RGCN, self).__init__()
        self.drop_prob = drop_prob
        self.gc1 = RGCLayer(i_dim, h_dim, supprot, num_bases, featureless,
                            drop_prob)
        self.gc2 = RGCLayer(h_dim, h_dim, supprot, num_bases, False, drop_prob)

        self.fc1 = Sequential(Linear(h_dim, h_dim), ReLU(), Dropout(drop_prob))
        self.fc2 = Sequential(Linear(h_dim, 4), LogSoftmax())
Example #26
0
	def __init__(self, reduction: str = 'mean', log_activation: Module = LogSoftmax(dim=-1)):
		"""
			Jensen-Shannon Divergence loss with logits.

			Use the following formula :

			>>> 'JS(p,q) = 0.5 * (KL(LS(p),m) + KL(LS(q),m)), with m = LS(0.5 * (p+q))'
			>>> 'where LS = LogSoftmax and KL = KL-Divergence.'

			:param reduction: The reduction function to apply. (default: 'mean')
			:param log_activation: The log-activation function for compute predictions from logits. (default: LogSoftmax(dim=-1))
		"""
		super().__init__()
		self.kl_div = KLDivLoss(reduction=reduction, log_target=True)
		self.log_activation = log_activation
Example #27
0
    def __init__(self):
        super(Net, self).__init__()

        aggregators = ['mean', 'min', 'max', 'std']
        scalers = ['identity', 'amplification', 'attenuation']
        self.dropout = args.dropout
        self.patience = args.patience
        self.convs = ModuleList()
        self.convs.append(
            PNAConv(in_channels=dataset.num_features,
                    out_channels=args.hidden,
                    aggregators=aggregators,
                    scalers=scalers,
                    deg=deg,
                    edge_dim=dataset.num_edge_features,
                    towers=1,
                    pre_layers=args.pretrans_layers,
                    post_layers=args.posttrans_layers,
                    divide_input=False))
        for _ in range(args.n_conv_layers):
            conv = PNAConv(in_channels=args.hidden,
                           out_channels=args.hidden,
                           aggregators=aggregators,
                           scalers=scalers,
                           deg=deg,
                           edge_dim=dataset.num_edge_features,
                           towers=args.towers,
                           pre_layers=args.pretrans_layers,
                           post_layers=args.posttrans_layers,
                           divide_input=False)
            self.convs.append(conv)

        gr = []
        g = list(
            map(
                int,
                np.ceil(
                    np.geomspace(args.hidden, dataset.num_classes,
                                 args.mlp_layers + 1))))
        g[0] = args.hidden
        g[-1] = dataset.num_classes
        for i in range(args.mlp_layers):
            gr.append(Linear(g[i], g[i + 1]))
            if i < args.mlp_layers - 1:
                gr.append(Dropout(p=self.dropout))
            gr.append(LogSoftmax() if i == args.mlp_layers - 1 else ReLU())

        self.mlp = Sequential(*gr)
Example #28
0
    def __init__(self, options):
        super(LSTMBackend, self).__init__()
        self.module = LSTM(input_size=options['model']['inputdim'],
                           hidden_size=options['model']['hiddendim'],
                           num_layers=options['model']['numlstms'],
                           batch_first=True,
                           bidirectional=True)

        self.fc = Linear(options['model']['hiddendim'] * 2,
                         options['model']['numclasses'])

        self.softmax = LogSoftmax(dim=2)

        self.loss = NLLSequenceLoss()

        self.validator = _validate
Example #29
0
    def __init__(self,
                 nfeat,
                 nhid,
                 dropout,
                 support,
                 num_basis,
                 featureless=True):
        super(RGCN, self).__init__()
        self.dropout = dropout
        self.gc1 = RelationalGraphConvolution(nfeat, nhid, support, num_basis,
                                              featureless, dropout)
        self.gc2 = RelationalGraphConvolution(nhid, nhid, support, num_basis,
                                              False, dropout)

        self.fc1 = Sequential(Linear(nhid, nhid), ReLU(), Dropout(dropout))
        self.fc2 = Sequential(Linear(nhid, 4), LogSoftmax())
Example #30
0
def splitModels(args, hidden_sizes=[128, 640]):
    models = [
        Sequential(
                    Linear(args.input_size, hidden_sizes[0]),
                    ReLU(),
        ),
        Sequential(
                    Linear(hidden_sizes[0], hidden_sizes[1]),
                    ReLU(),
        ),
        Sequential(
                    Linear(hidden_sizes[1], args.output_size),
                    LogSoftmax(dim=1)
        )
    ]

    return models