Esempio n. 1
0
def optimzer_model():
    # get memeory
    transitions = memory.memory
    batch = Transition(*zip(*transitions))
    state_batch = torch.cat(batch.state)
#     print(state_batch.shape)
    action_batch = torch.cat(batch.action)
    # print(len(batch.reward))
    temp = []
    for i in range(len(batch.reward)):
        temp.append(batch.reward[i].type(torch.FloatTensor))
    reward_batch = torch.cat(tuple(temp))
    
    g_list = reward_batch.data.tolist()
    for i in reversed(range(1, len(g_list))):
        g_list[i-1] = g_list[i-1] + 0.999 * g_list[i]
    g_tensor = torch.FloatTensor(g_list).view(-1,1)
#     print(g_tensor.shape)
    
    state_action_values = nn(state_batch)[0].gather(1, action_batch)
    b_tensor = nn(state_batch)[1].detach().view(-1,1)
    l1 = (g_tensor - b_tensor) * torch.log(state_action_values)
#     print(l1.shape)
    l1 = torch.sum(l1)
    l2 = F.smooth_l1_loss(g_tensor, b_tensor)
#     print(l2)
    loss = l1 + l2
#     print(loss)
    optimizer.zero_grad()
    loss.backward()
    # for param in nn.parameters():
    #     param.grad.data.clamp_(-1, 1)
    optimizer.step()
Esempio n. 2
0
def gpu_thread_worker(nn, edge_queue, eval_batch_size, is_cuda):
    while True:
        with torch.no_grad():
            nn.eval()
            edges = []
            last_batch = False
            for i in range(eval_batch_size):
                if edge_queue.empty():
                    break
                try:
                    edge = edge_queue.get_nowait()
                    if edge is None:
                        last_batch = True
                        print(
                            "Sentinel received. GPU will process this batch and terminate afterwards"
                        )
                    else:
                        edges.append(edge)
                except queue.Empty:
                    pass

            if len(edges) != 0:
                # print("batch size:", len(edges))

                # batch process
                states = [edge.to_node.checker_state for edge in edges]
                input_tensor = states_to_batch_tensor(states, is_cuda)
                # this line is the bottleneck
                if isinstance(nn, YesPolicy) or isinstance(nn, SharedPolicy):
                    value_tensor, logits_tensor = nn(input_tensor)

                else:
                    value_tensor = nn(input_tensor)

                if isinstance(nn, YesPolicy) or isinstance(nn, SharedPolicy):
                    logits_tensor = value_tensor

                for edx, edge in enumerate(edges):
                    edge.value = value_tensor[edx, 0]
                    edge.logit = logits_tensor[edx, 0]
                    edge_queue.task_done()
                    edge.from_node.unassigned -= 1
                    if edge.from_node.unassigned == 0:
                        edge.from_node.lock.release()
            else:
                time.sleep(0.1)

            if last_batch:
                edge_queue.task_done()
                print(
                    "Queue task done signal sent. Queue will join. Thread may still be running."
                )
                return
 def have_usable_cuda(self):
     haveit = torch.cuda.is_available()
     if haveit:
         # pytorch says it is available on legacy devices where it is not supported any more so make sure
         try:
             t1 = torch.randn(3).cuda()
             nn = torch.nn.Linear(3, 3).cuda()
             nn(t1)
         except:
             logger.info(
                 "Switchin off CUDA: pytorch reports it is available but it does not seem to be supported"
             )
             haveit = False
     return haveit
 def post_milp(self, x, nn, output_flag, t, template):
     """milp method"""
     post = []
     gurobi_model = self.internal_model  # grb.Model()
     input = self.last_input  # Experiment.generate_input_region(gurobi_model, template, x, self.env_input_size)
     observation = gurobi_model.addMVar(shape=(2,), lb=float("-inf"), ub=float("inf"), name="observation")
     gurobi_model.addConstr(observation[1] <= input[0] - input[1] + self.input_epsilon / 2, name=f"obs_constr21")
     gurobi_model.addConstr(observation[1] >= input[0] - input[1] - self.input_epsilon / 2, name=f"obs_constr22")
     gurobi_model.addConstr(observation[0] <= v_lead - input[2] + self.input_epsilon / 2, name=f"obs_constr11")
     gurobi_model.addConstr(observation[0] >= v_lead - input[2] - self.input_epsilon / 2, name=f"obs_constr12")
     nn_output, max_val, min_val = Experiment.generate_nn_guard_continuous(gurobi_model, observation, nn)
     is_equal = torch.isclose(nn(torch.from_numpy(observation.X).float()), torch.from_numpy(nn_output.X).float(), rtol=1e-3).all().item()
     assert is_equal
     # clipped_nn_output = gurobi_model.addMVar(lb=float("-inf"), shape=(len(nn_output)), name=f"clipped_nn_output")
     # gurobi_model.addConstr(nn_output[0] >= -12, name=f"clipped_out_constr1")
     # gurobi_model.addConstr(nn_output[0] <= 12, name=f"clipped_out_constr2")
     # feasible_action = Experiment.generate_nn_guard(gurobi_model, input, nn, action_ego=chosen_action)
     # apply dynamic
     x_prime = StoppingCarContinuousExperiment.apply_dynamic(input, gurobi_model, action=nn_output, env_input_size=self.env_input_size)
     gurobi_model.update()
     gurobi_model.optimize()
     found_successor, x_prime_results = self.h_repr_to_plot(gurobi_model, template, x_prime)
     self.last_input = x_prime
     # x_prime_results = x_prime_results.round(4)  # correct for rounding errors introduced by the conversion to h-repr
     if found_successor:
         post.append(tuple(x_prime_results))
     return post
Esempio n. 5
0
def nll():
    # input is of size N x C = 3 x 5
    input = torch.randn(3, 5, requires_grad=True)
    # each element in target has to have 0 <= value < C
    target = torch.tensor([1, 0, 4])
    output = torch.nn(torch.nn.LogSoftmax(input), target)
    output.backward()
Esempio n. 6
0
    def __init__(self, hparams):
        super().__init__()
        self.hparams = hparams
        self.loss = torch.nn.BCEWithLogitsLoss()
        self.experiment_name = self.hparams.experiment_name

        # check if using single or multiple modalities
        if self.hparams.data_type == 'JointAll':
            self.hparams.data_type = list(PARSED_EMR_DICT.keys()) + ['Vision']
        if self.hparams.data_type == 'JointSeparate':
            self.hparams.data_type = ['All', 'Vision']

        # get feature size
        tmp_dataloader = self.train_dataloader()
        feature_size = tmp_dataloader.dataset.feature_size

        # use fcnn if only one modality provided, else use joint model
        nn = FCNN if type(self.hparams.data_type) == str else JointModel
        self.model = nn(
            feature_size=feature_size,
            num_neurons=self.hparams.num_neurons,
            num_hidden=self.hparams.num_hidden,
            init_method=self.hparams.init_method,
            activation=self.hparams.activation,
            dropout_prob=self.hparams.dropout_prob,
        )

        # for computing metrics
        self.train_probs = []
        self.val_probs = []
        self.test_probs = []
        self.train_true = []
        self.val_true = []
        self.test_true = []
        self.test_acc = []
Esempio n. 7
0
    def evaluate(self):
        '''
        Evaluate the model
        '''
        for model in self.mrf.nnformulas:
            model.eval()

        pred = []
        with torch.no_grad():
            for fidx, nn in enumerate(self.mrf.nnformulas):
                pred.append(nn(self.val_inputs[fidx]))
            y_link = pred[1]
            y_link_pred = y_link.argmax(dim=1)
            f1_link = f1_score(self.link_val_true,
                               y_link_pred,
                               average='macro',
                               labels=[1])

            y_type = pred[0]
            y_type_pred = y_type.argmax(dim=1)
            f1_type = f1_score(self.type_val_true,
                               y_type_pred,
                               average='macro')

            return f1_link, f1_type
Esempio n. 8
0
    def nn_opt(self, nn):
        with torch.no_grad():

            def obj_cons(x):
                tx = torch.tensor(x)
                out = nn(tx)
                return out[:self.nobj].numpy().tolist(), out[self.nobj:].numpy(
                ).tolist()

            def obj_ucons(x):
                tx = torch.tensor(x)
                return nn(tx).numpy().tolist()

            arch = Archive()
            if self.ncons == 0:
                prob = Problem(self.dim, self.nobj)
                prob.function = obj_ucons
            else:
                prob = Problem(self.dim, self.nobj, self.ncons)
                prob.function = obj_cons
                prob.constraints[:] = "<=0"
            prob.types[:] = [Real(self.lb, self.ub) for i in range(self.dim)]
            self.algo = NSGAII(prob, population=50, archive=arch)
            self.algo.run(5000)

            optimized = self.algo.result
            rand_idx = np.random.randint(len(optimized))
            suggested_x = torch.tensor(optimized[rand_idx].variables)
            suggested_y = nn(suggested_x)
            return suggested_x.view(-1, self.dim), suggested_y.view(
                -1, self.nobj + self.ncons)
Esempio n. 9
0
def train_post_prototype(assignment):
    from config import get_prototype_config
    from model_torch import Post_Prototype_RCNN_L2
    from stream import Post_Data
    config = get_prototype_config()
    ## 1. data prepare
    trainData = Post_Data(is_train=True, **config)
    testData = Post_Data(is_train=False, **config)
    ### 2. model & train
    nn = Post_Prototype_RCNN_L2(assignment, **config)
    LR = config['LR']
    opt_ = torch.optim.SGD(nn.parameters(), lr=LR)
    best_auc = 0
    trainFeature_all, _ = trainData.get_all()
    testFeature, testLabel = testData.get_all()
    for i in range(config['train_iter']):
        nn.generate_prototype(trainFeature_all)
        feat, label = trainData.next()
        loss, _ = nn(feat, label)
        opt_.zero_grad()
        loss.backward()
        opt_.step()
        loss_value = loss.data[0]
        if i % 1 == 0:
            score = post_evaluate(testFeature, testLabel, nn, **config)
            best_auc = score if score > best_auc else best_auc
            print('iter {}, auc:{} (best:{})'.format(i,
                                                     str(score)[:5],
                                                     str(best_auc)[:5]),
                  end=' ')
            if i > 0:
                print('{} sec'.format(str(time() - t1)[:4]))
            t1 = time()
Esempio n. 10
0
def select_action(state):
    m = torch.distributions.Categorical(torch.tensor([0.25, 0.75]))
    m_type = m.sample().data.tolist()
    if m_type == 0:
        return torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)
    else:
        with torch.no_grad():
            return nn(state)[0].max(1)[1].view(1, 1)
Esempio n. 11
0
 def forward(self, x):
     # x is batch by time by dim
     x = x.permute(1, 0, 2)
     # x is time by batch by dim
     for nn in self.nns:
         x = nn(x)[0]
     # swap channel + time back
     z_out = x.permute(1, 0, 2)
     return z_out
Esempio n. 12
0
    def forward(self):
        '''
        Computes the forward step of the nural networks
        '''
        self.wt = []
        for fidx, nn in enumerate(self.mrf.nnformulas):
            self.wt.append(nn(self.nn_inputs[fidx]))

        return self.wt
Esempio n. 13
0
def sample(self, nn, inp, n_samples=3):
    previous = np.zeros()

    for i in n_samples:
        previous += n_i
        inhibit = norm(previous / i)
        #inhibit previous samples ]
        z0 = random.random()
        nn[j] += inp + z0
        nn[j] -= inhibit[j]
    out, p = nn(inp, z0)
    def fit(self, epochs=100):
        for k in tqdm(range(epochs)):
            # query by query
            scores = []
            true_scores = []
            for datum in data_train:
                scores.append(nn(datum.feature))
                true_scores.append(datum.relevance)

            op = self.compute_ordered_pairs(scores)
            lambdas = self.compute_lambda(true_scores, scores, op)
Esempio n. 15
0
 def __init__(self,
              input_dim,
              adapter_dim,
              init_scale=1e-3,
              shared_weights=True):
     super().__init__()
     self.adapter_dim = adapter_dim
     if shared_weights:
         self.pooler_layer = TimeDistributed(
             torch.nn(input_dim, adapter_dim))
     else:
         raise NotImplementedError
Esempio n. 16
0
def inference_imputation_networks(nn, nf, data, args):
    lst = []

    batch_sz = 256
    iterations = int(data.shape[0] / batch_sz)
    left_over = data.shape[0] - batch_sz * iterations

    with torch.no_grad():
        for idx in range(iterations):
            rows = data[int(idx * batch_sz):int((idx + 1) * batch_sz)]
            if args.use_cuda:
                rows = torch.from_numpy(rows).float().cuda()
            else:
                rows = torch.from_numpy(rows).float()

            z = nf(rows)[0]
            z_hat = nn(z)
            x_hat = nf.inverse(z_hat)

            lst.append(np.clip(x_hat.cpu().numpy(), 0, 1))

        rows = data[int((idx + 1) * batch_sz):]
        if args.use_cuda:
            rows = torch.from_numpy(rows).float().cuda()
        else:
            rows = torch.from_numpy(rows).float()

        z = nf(rows)[0]
        z_hat = nn(z)
        x_hat = nf.inverse(z_hat)

        lst.append(np.clip(x_hat.cpu().numpy(), 0, 1))

    final_lst = []
    for idx in range(len(lst)):
        for element in lst[idx]:
            final_lst.append(element)

    return final_lst
Esempio n. 17
0
def eval_on_test(nn, crossEntropy, x_test, y_test, test_accuracies,
                 test_losses):
    """
  Find the accuracy and loss on the test set, given the current weights
  """
    test_pred = nn(x_test)
    true_labels = torch.max(y_test, 1)[1]
    test_acc = accuracy(test_pred, y_test)
    test_loss = crossEntropy(test_pred, true_labels)
    print("Test accuracy is:", test_acc, "\n")
    test_accuracies.append(test_acc)
    test_losses.append(test_loss)

    return test_accuracies, test_losses
Esempio n. 18
0
def inference_img_imputation_networks(nn, nf, data, mask, original_dat, args):

    batch_sz = 256
    iterations = int(len(data) / batch_sz)
    left_over = len(data) - batch_sz * iterations
    ones = np.ones((256, data[0].shape[0]))

    with torch.no_grad():
        for idx in range(iterations):
            begin = int(idx * batch_sz)
            end = int((idx + 1) * batch_sz)
            rows = np.asarray(data[begin:end])
            if args.use_cuda:
                rows = torch.from_numpy(rows).float().cuda()
            else:
                rows = torch.from_numpy(rows).float()

            z = nf(rows)[0]
            z_hat = nn(z)
            x_hat = nf.inverse(z_hat)
            x_hat = np.clip(x_hat.cpu().numpy(), 0, 1)
            data[begin:end] = (ones - mask[begin:end]) * original_dat[
                begin:end] + mask[begin:end] * x_hat

        rows = np.asarray(data[-left_over:])
        if args.use_cuda:
            rows = torch.from_numpy(rows).float().cuda()
        else:
            rows = torch.from_numpy(rows).float()
        ones = np.ones((left_over, data[0].shape[0]))

        z = nf(rows)[0]
        z_hat = nn(z)
        x_hat = nf.inverse(z_hat)
        x_hat = np.clip(x_hat.cpu().numpy(), 0, 1)
        data[-left_over:] = (ones - mask[-left_over:]) * original_dat[
            -left_over:] + mask[-left_over:] * x_hat
Esempio n. 19
0
def post_evaluate(testFeature, testLabel, nn, **config):
    batch_size = config['batch_size']
    batch_number = int(np.ceil(testFeature.shape[0] / batch_size))
    Label = []
    Output = []
    for i in range(batch_number):
        feat, label = testFeature[i * batch_size:(i + 1) *
                                  batch_size], testLabel[i *
                                                         batch_size:(i + 1) *
                                                         batch_size]
        _, output = nn(feat, label)
        label = list(label.data)
        Label.extend(label)
        Output.extend([float(output[j][1]) for j in range(output.shape[0])])
    return roc_auc_score(Label, Output)
Esempio n. 20
0
def main(loadfrom, saveto, dev_data):

    loadfrom = loadfrom.strip().split(',')

    net = []
    for ll in loadfrom:
        with open(ll, 'rb') as f:
            net.append(pkl.load(f))

    test = data_iterator(dev_data, net[0].options)

    print 'Testing...',
    preds = []
    n_samples = 0
    softmax = torch.nn.Softmax()
    for s1, s1m, labels in test:
        for nn in net:
            nn.train()
        s1_ = torch.from_numpy(numpy.array(s1))
        s1m_ = torch.from_numpy(numpy.array(s1m).astype('float32'))

        for ii, nn in enumerate(net):
            out = nn(Variable(s1_, requires_grad=False),
                     Variable(s1m_, requires_grad=False))
            out = softmax(out)
            out = out.data.numpy()
            if ii == 0:
                pp = out
            else:
                pp += out
        pp = pp / len(net)

        preds.append(pp.argmax(-1))
        n_samples += len(labels)

    preds = numpy.concatenate(preds, axis=0)
    preds = (2. * preds) - 1.

    pos = numpy.sum(preds == 1.)
    neg = numpy.sum(preds == -1.)
    print 'pos {} neg {}'.format(pos, neg)

    with open(saveto, 'w') as f_out:
        with open(dev_data, 'r') as f_in:
            print >> f_out, f_in.readline().strip()
            for ii, l in enumerate(f_in):
                print >> f_out, '{}\t{}'.format(l.strip(), int(preds[ii]))
Esempio n. 21
0
def train_weighted_post_prototype(assignment):
    from config import get_prototype_config
    from model_torch import Weighted_Post_Prototype_RCNN_L2
    from stream import Weighted_Post_Data
    config = get_prototype_config()
    every_iter = config['every_iter']
    ## 1. data prepare
    trainData = Weighted_Post_Data(is_train=True, **config)
    testData = Weighted_Post_Data(is_train=False, **config)
    ### 2. model & train
    nn = Weighted_Post_Prototype_RCNN_L2(assignment, **config)
    LR = config['LR']
    opt_ = torch.optim.SGD(nn.parameters(), lr=LR)
    best_auc = 0
    trainFeature_all, _, weight_all = trainData.get_all()
    testFeature, testLabel, _ = testData.get_all()
    for i in range(config['train_iter']):
        nn.generate_prototype(trainFeature_all, weight_all)
        feat, label, weight = trainData.next()
        loss, _ = nn(feat, label, weight)
        opt_.zero_grad()
        loss.backward()
        opt_.step()
        #loss_value = loss.data[0]
        if i % 1 == 0:
            score = post_evaluate(testFeature, testLabel, nn, **config)
            best_auc = score if score > best_auc else best_auc
            if i % every_iter == every_iter - 1 and i > 0:
                print('iter {}, auc:{} (best:{})'.format(
                    i,
                    str(score)[:5],
                    str(best_auc)[:5]),
                      end=' \n')
            #	print('{} sec'.format(str(time() - t1)[:4]))
            #t1 = time()

    trainData.restart()
    reweight = []
    for i in range(trainData.batch_number):
        feat, label, weight = trainData.next()
        reweight.extend(nn.measure_similarity(feat))
    #print(reweight)
    reweight = normalize_weight(reweight, config['upper'], config['lower'])
    #print(reweight)
    return reweight
Esempio n. 22
0
    def evaluate(self):
        '''
        Evaluate the model
        '''
        for model in self.mrf.nnformulas:
            model.eval()

        pred = []
        with torch.no_grad():
            for fidx, nn in enumerate(self.mrf.nnformulas):
                pred.append(nn(self.val_inputs[fidx]))

            y = pred[0]
            y_pred = y.argmax(dim=1)
            y_pred = y_pred.to('cpu')

            acc = accuracy_score(self.val_true, y_pred)

            return acc
Esempio n. 23
0
def greedy_decode(h0, nn, word2idx, idx2word, max_len, batch, out):
    sos = word2idx['<sos>']
    in_sos = torch.ones(1, 1).fill_(sos).long().cuda()
    in_state = in_sos
    for i in range(max_len - 1):
        in_st = torch.cat(
            [in_state, torch.ones(1, 1).fill_(sos).long().cuda()], dim=-1)
        prob = nn(h0, in_st, out['embedded_in_txt'], batch.in_txt,
                  batch.in_txt_mask)
        _, next_word = torch.max(prob, dim=-1)
        in_state = torch.cat([in_sos, next_word], dim=-1)
        if next_word[0][-1] == word2idx['<eos>']: break
    out = []
    for state in in_state[0]:
        if state.item() == word2idx['<unk>'] or state.item(
        ) == word2idx['<pad>']:
            continue
        if state.item() not in idx2word: pdb.set_trace()
        out.append(idx2word[state.item()])
    return out
Esempio n. 24
0
def value_train_loop(hypers, nn, criterion=torch.nn.MSELoss(reduction='sum')):
    for epoch in range(hypers['epochs']):
        nn.train()
        train_losses, test_losses = [], []
        for batch in range(hypers['batch_train']):
            graphs, labels = value_generate_batch(hypers['H'], hypers['n'])
            loss = value_eval_batch(nn, graphs, labels, criterion)
            loss.backward()
            train_losses.append(loss.item())
            nn.optim.step(), nn.zero_grad()

        nn.eval()
        for batch in range(hypers['batch_test']):
            graphs, labels = value_generate_batch(hypers['H'], hypers['n'])
            loss = value_eval_batch(nn, graphs, labels, criterion)
            test_losses.append(loss.item())
        print(
            f"Train loss is {sum(train_losses) / len(train_losses):.4E}.\nTest loss is {sum(test_losses)/len(test_losses):.4E}.\n"
        )
        graph, energy = value_generate_batch(hypers['H'], hypers['n'], 1)
        print(graph, energy, nn(graph))
Esempio n. 25
0
def evaluate_neuralnet(nn, env):
    '''
    Evaluate an agent running it in the environment and computing the total reward
    '''
    obs = env.reset()
    game_reward = 0

    while True:
        # Output of the neural net
        net_output = nn(torch.tensor(obs))
        # the action is the value clipped returned by the nn
        action = np.clip(net_output.data.cpu().numpy().squeeze(), -1, 1)
        new_obs, reward, done, _ = env.step(action)
        obs = new_obs

        game_reward += reward

        if done:
            break

    return game_reward
Esempio n. 26
0
def errors(nn): 
    for X, y in test_loader:
        predictions = nn(X)
        predictions = predictions.detach().numpy()
        predictions = np.exp(predictions)
        predictions = predictions / np.sum(predictions, axis=-1).reshape(-1, 1)

        prob = predictions[np.arange(y.shape[0]), y]

        ind = np.argsort(prob)[:25]

        plt.figure(figsize=(6, 7))
        for i in range(25):
            plt.subplot(5, 5, i+1)
            Xn = X[ind[i]]
            Xn = Xn.transpose(0, 1).transpose(1, 2).numpy()
            Xn = Xn.astype('float64')
            Xn = (Xn - np.min(Xn, axis=0)) / (np.max(Xn, axis=0) - np.min(Xn, axis=0))
            Xn = np.rint(Xn * 255).astype('uint8')
            plt.imshow(Xn.reshape(332, 332, 3))
            plt.title("%d(%d) - %.2f" % (np.argmax(predictions[ind[i], :]), y[ind[i]], prob[ind[i]]))
            plt.axis('off')   
        plt.savefig("Errors_pion_8_new.png", dpi = 250)
        break
Esempio n. 27
0
File: ES.py Progetto: karush17/esac
def evaluate_neuralnet(nn, env, wrap):
    # Evaluate an agent running it in the environment and computing the total reward
    obs = dm_wrap(env.reset(), wrap=wrap)
    game_reward = 0
    reward = 0
    done = False

    while True:
        # Output of the neural net
        obs = torch.FloatTensor(obs)
        action = nn(obs)
        action = np.clip(action.data.cpu().numpy().squeeze(), -1, 1)
        # action = action.data.numpy().argmax()
        # action = np.asarray([action])
        new_obs, reward, done, _ = env.step(action)

        obs = dm_wrap(new_obs, wrap=wrap)

        game_reward += reward

        if done:
            break

    return game_reward
Esempio n. 28
0
        self.pool = nn.MaxPool1d(3,stride=2)

        self.conv3 = nn.Conv1d(96,128, kernel_size=3, stride=1, padding=0)
        self.conv4 = nn.Conv1d(128, 256, kernel_size=3, stride=1, padding=0)
        
        self.fc = nn.Linear(15104, 48)
        self.do = nn.Dropout()
        self.out = nn.Linear(48, classes)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.pool(F.relu(self.conv2(x)))

        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        
        x = x.view(x.size(0),-1)
        x = F.relu(self.fc(self.do(x)))
    
        x = self.out(x)
        return x

if __name__=='__main__':
    #Test code
    nn = net(20)
    x = torch.randn(32,2,64)
    y = nn(x)
    print(y)
    
        assert size > 0
        self.weight_batch = size
        if size > 1:
            self.weight = Parameter(torch.Tensor(size, self.out_features, self.in_features))
            if self.bias is not None:
                self.bias = Parameter(torch.Tensor(size, self.out_features))
        else:
            self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))
            if self.bias is not None:
                self.bias = Parameter(torch.Tensor(self.out_features))
        self.double()


if __name__== "__main__":

    nn = Net(10,2)
    w = torch.autograd.Variable(torch.randn((10, nn.n_weights)), requires_grad=True)
    samples = np.random.rand(5,10)
    t = torch.rand(5,2).double()

    nn.set_weights(w.data.numpy())
    loss = ((nn(samples)-t) ** 2).mean()
    w1 = w[0]
    nn.set_weights(w1.data.numpy())
    loss = ((nn(samples) - t) ** 2).mean()


    loss.backward()
    g = nn.grad()
    g = 1
Esempio n. 30
0
    out = z.mean()
    print(z, out)
    a = torch.randn(2, 2)
    a = ((a*3) / (a-1))
    print(a.requires_grad)
    a.requires_grad_(True)
    print(a.requires_grad)
    b = (a*a).sum()
    print(b.grad_fn)
    out.backward()
    print(x.grad)
    x = torch.randn(3, requires_grad = True)
    y = x*2
    while y.data.norm() < 1000:
        y = y*2
    print(y)
    v = torch.tensor([0.1, 1.0, 0.0001], dtype = torch.float)
    y.backward(v)
    print(x.grad)
    print(x.requires_grad)
    print((x**2).requires_grad)
    
    with torch.no_grad():
        print((x**2).requires_grad)
    
    
if __name__ == "__main__":
    nn()
    mnn()
    testTorch()
Esempio n. 31
0
        prediction_vector = self.fc2(intermediate_vector)

        if apply_softmax:
            prediction_vector = F.softmax(prediction_vector, dim=1)

        return prediction_vector


if __name__ == '__main__':
    # args = Namespace(
    #     cbow_csv='frankenstein_with_splits.csv',
    #     vectorizer_file="vectorizer.json",
    # )
    #
    # dataset = CBOWDataset.load_dataset_and_make_vectorizer(args.cbow_csv)
    # dataset.save_vectorizer(args.vectorizer_file)
    #
    # for x in generate_batches(dataset, batch_size=32):
    #     print(x)
    #     break

    nn = NewsClassifier(5, 10, 10, 50, 10, 0.1, pretrained_embeddings=None)

    x = 1 + torch.randn(2, 5).long()

    print(x.size())

    y = nn(x)

    print(y.size())