Example #1
0
def get_input_from_batch(batch, use_cuda):
  batch_size = len(batch.enc_lens)

  enc_batch = Variable(torch.from_numpy(batch.enc_batch).long())
  enc_padding_mask = Variable(torch.from_numpy(batch.enc_padding_mask)).float()
  enc_lens = batch.enc_lens
  extra_zeros = None
  enc_batch_extend_vocab = None

  if config.pointer_gen:
    enc_batch_extend_vocab = Variable(torch.from_numpy(batch.enc_batch_extend_vocab).long())
    # max_art_oovs is the max over all the article oov list in the batch
    if batch.max_art_oovs > 0:
      extra_zeros = Variable(torch.zeros((batch_size, batch.max_art_oovs)))

  c_t_1 = Variable(torch.zeros((batch_size, 2 * config.hidden_dim)))

  coverage = None
  if config.is_coverage:
    coverage = Variable(torch.zeros(enc_batch.size()))

  if use_cuda:
    enc_batch = enc_batch.cuda()
    enc_padding_mask = enc_padding_mask.cuda()

    if enc_batch_extend_vocab is not None:
      enc_batch_extend_vocab = enc_batch_extend_vocab.cuda()
    if extra_zeros is not None:
      extra_zeros = extra_zeros.cuda()
    c_t_1 = c_t_1.cuda()

    if coverage is not None:
      coverage = coverage.cuda()

  return enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage
    def _run_mst_decoding(batch_energy: torch.Tensor, lengths: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        heads = []
        head_tags = []
        for energy, length in zip(batch_energy.detach().cpu(), lengths):
            scores, tag_ids = energy.max(dim=0)
            # Although we need to include the root node so that the MST includes it,
            # we do not want any word to be the parent of the root node.
            # Here, we enforce this by setting the scores for all word -> ROOT edges
            # edges to be 0.
            scores[0, :] = 0
            # Decode the heads. Because we modify the scores to prevent
            # adding in word -> ROOT edges, we need to find the labels ourselves.
            instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)

            # Find the labels which correspond to the edges in the max spanning tree.
            instance_head_tags = []
            for child, parent in enumerate(instance_heads):
                instance_head_tags.append(tag_ids[parent, child].item())
            # We don't care what the head or tag is for the root token, but by default it's
            # not necesarily the same in the batched vs unbatched case, which is annoying.
            # Here we'll just set them to zero.
            instance_heads[0] = 0
            instance_head_tags[0] = 0
            heads.append(instance_heads)
            head_tags.append(instance_head_tags)
        return torch.from_numpy(numpy.stack(heads)), torch.from_numpy(numpy.stack(head_tags))
    def __init__(self, input_dim, hidden_dim, word_drop, word_alphabet_size, word_emb_dim, pretrain_word_emb=None, left2right=True, fix_word_emb=True, gpu=True,  use_bias = True):
        super(LatticeLSTM, self).__init__()
        skip_direction = "forward" if left2right else "backward"
        print ("build LatticeLSTM... ", skip_direction, ", Fix emb:", fix_word_emb, " gaz drop:", word_drop)
        self.gpu = gpu
        self.hidden_dim = hidden_dim
        self.word_emb = nn.Embedding(word_alphabet_size, word_emb_dim)
        if pretrain_word_emb is not None:
            print ("load pretrain word emb...", pretrain_word_emb.shape)
            self.word_emb.weight.data.copy_(torch.from_numpy(pretrain_word_emb))

        else:
            self.word_emb.weight.data.copy_(torch.from_numpy(self.random_embedding(word_alphabet_size, word_emb_dim)))
        if fix_word_emb:
            self.word_emb.weight.requires_grad = False
        
        self.word_dropout = nn.Dropout(word_drop)

        self.rnn = MultiInputLSTMCell(input_dim, hidden_dim)
        self.word_rnn = WordLSTMCell(word_emb_dim, hidden_dim)
        self.left2right = left2right
        if self.gpu:
            self.rnn = self.rnn.cuda()
            self.word_emb = self.word_emb.cuda()
            self.word_dropout = self.word_dropout.cuda()
            self.word_rnn = self.word_rnn.cuda()
Example #4
0
    def get_hierarchical_features_till_spawn(self, actions, backtrack_steps=0):

        action_length = len(actions)-1
        pa, ca, pq_idx, cq_idx, ph_idx = flat_to_hierarchical_actions(actions)

        target_pos_idx = action_length - backtrack_steps

        controller_step = True
        if target_pos_idx in pq_idx:
            controller_step = False

        pq_idx_pruned = [v for v in pq_idx if v <= target_pos_idx]
        pa_pruned = pa[:len(pq_idx_pruned)+1]

        images = self.get_frames(
            self.episode_house,
            self.episode_pos_queue,
            preprocess=True)
        raw_img_feats = self.cnn(
            Variable(torch.FloatTensor(images)
                     .cuda())).data.cpu().numpy().copy()

        controller_img_feat, controller_action_in = False, False
        if controller_step == True:
            controller_img_feat = torch.from_numpy(raw_img_feats[target_pos_idx].copy())
            controller_action_in = pa_pruned[-1] - 2

        planner_img_feats = torch.from_numpy(raw_img_feats[pq_idx_pruned].copy())
        planner_actions_in = torch.from_numpy(np.array(pa_pruned[:-1]) - 1)

        return planner_actions_in, planner_img_feats, controller_step, controller_action_in, controller_img_feat, self.episode_pos_queue[target_pos_idx]
def evaluate(attention_model,x_test,y_test):
    """
        cv results
 
        Args:
            attention_model : {object} model
            x_test          : {nplist} x_test
            y_test          : {nplist} y_test
       
        Returns:
            cv-accuracy
 
      
    """
   
    attention_model.batch_size = x_test.shape[0]
    attention_model.hidden_state = attention_model.init_hidden()
    x_test_var = Variable(torch.from_numpy(x_test).type(torch.LongTensor))
    y_test_pred,_ = attention_model(x_test_var)
    if bool(attention_model.type):
        y_preds = torch.max(y_test_pred,1)[1]
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.LongTensor))
       
    else:
        y_preds = torch.round(y_test_pred.type(torch.DoubleTensor).squeeze(1))
        y_test_var = Variable(torch.from_numpy(y_test).type(torch.DoubleTensor))
       
    return torch.eq(y_preds,y_test_var).data.sum()/x_test_var.size(0)
    def predict(self, dataset):
        """Predict target for dataset.

        Parameters:
        ----------
        dataset (dict): dictionary with the testing dataset -
        X_wide_test, X_deep_test, target

        Returns:
        --------
        array-like with the target for dataset
        """

        X_w = Variable(torch.from_numpy(dataset.wide)).float()
        X_d = Variable(torch.from_numpy(dataset.deep))

        if use_cuda:
            X_w, X_d = X_w.cuda(), X_d.cuda()

        # set the model in evaluation mode so dropout is not applied
        net = self.eval()
        pred = net(X_w,X_d).cpu()
        if self.method == "regression":
            return pred.squeeze(1).data.numpy()
        if self.method == "logistic":
            return (pred > 0.5).squeeze(1).data.numpy()
        if self.method == "multiclass":
            _, pred_cat = torch.max(pred, 1)
            return pred_cat.data.numpy()
    def predict_proba(self, dataset):
        """Predict predict probability for dataset.
        This method will only work with method logistic/multiclass

        Parameters:
        ----------
        dataset (dict): dictionary with the testing dataset -
        X_wide_test, X_deep_test, target

        Returns:
        --------
        array-like with the probability for dataset.
        """

        X_w = Variable(torch.from_numpy(dataset.wide)).float()
        X_d = Variable(torch.from_numpy(dataset.deep))

        if use_cuda:
            X_w, X_d = X_w.cuda(), X_d.cuda()

        # set the model in evaluation mode so dropout is not applied
        net = self.eval()
        pred = net(X_w,X_d).cpu()
        if self.method == "logistic":
            pred = pred.squeeze(1).data.numpy()
            probs = np.zeros([pred.shape[0],2])
            probs[:,0] = 1-pred
            probs[:,1] = pred
            return probs
        if self.method == "multiclass":
            return pred.data.numpy()
Example #8
0
    def fit(self, X, Y, n_epoch=10, batch_size=128, en_shuffle=True):
        global_step = 0
        n_batch = len(X) / batch_size
        total_steps = int(n_epoch * n_batch)

        for epoch in range(n_epoch):
            if en_shuffle:
                shuffled = np.random.permutation(len(X))
                X = X[shuffled]
                Y = Y[shuffled]
            state = None
            for local_step, (X_batch, Y_batch) in enumerate(zip(self.gen_batch(X, batch_size),
                                                                self.gen_batch(Y, batch_size))):
                y_batch = Y_batch.ravel()
                inputs = torch.autograd.Variable(torch.from_numpy(X_batch.astype(np.int64)))
                labels = torch.autograd.Variable(torch.from_numpy(y_batch.astype(np.int64)))
                
                if (self.stateful) and (len(X_batch) == batch_size):
                    preds, state = self.forward(inputs, state)
                    state = (torch.autograd.Variable(state[0].data), torch.autograd.Variable(state[1].data))
                else:
                    preds, _ = self.forward(inputs)

                loss = self.criterion(preds, labels)                   # cross entropy loss
                self.optimizer, lr = self.adjust_lr(self.optimizer, global_step, total_steps)
                self.optimizer.zero_grad()                             # clear gradients for this training step
                loss.backward()                                        # backpropagation, compute gradients
                self.optimizer.step()                                  # apply gradients
                global_step += 1

                preds = torch.max(preds, 1)[1].data.numpy().squeeze()
                acc = (preds == y_batch).mean()
                if local_step % 100 == 0:
                    print ('Epoch [%d/%d] | Step [%d/%d] | Loss: %.4f | Acc: %.4f | LR: %.4f'
                           %(epoch+1, n_epoch, local_step, n_batch, loss.data[0], acc, lr))
Example #9
0
def get_test_data(num_train=1000, num_test=500, 
                  input_shape=(10,), output_shape=(2,),
                  classification=True, num_classes=2):
    """Generates test data to train a model on.

    classification=True overrides output_shape
    (i.e. output_shape is set to (1,)) and the output
    consists in integers in [0, num_class-1].

    Otherwise: float output with shape output_shape.
    """
    samples = num_train + num_test
    if classification:
        y = np.random.randint(0, num_classes, size=(samples,))
        X = np.zeros((samples,) + input_shape)
        for i in range(samples):
            X[i] = np.random.normal(loc=y[i], scale=0.7, size=input_shape)
    else:
        y_loc = np.random.random((samples,))
        X = np.zeros((samples,) + input_shape)
        y = np.zeros((samples,) + output_shape)
        for i in range(samples):
            X[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=input_shape)
            y[i] = np.random.normal(loc=y_loc[i], scale=0.7, size=output_shape)

    return (th.from_numpy(X[:num_train]), th.from_numpy(y[:num_train])), \
           (th.from_numpy(X[num_train:]), th.from_numpy(y[num_train:]))
    def __iter__(self):
        for batch in self.data:
            batch_size = len(batch)
            batch = list(zip(*batch))
            if self.eval:
                assert len(batch) == 7
            else:
                assert len(batch) == 9

            context_len = max(len(x) for x in batch[0])
            context_id = torch.LongTensor(batch_size, context_len).fill_(0)
            context_order = torch.LongTensor(batch_size,context_len).fill_(0)

            for i, doc in enumerate(batch[0]):
                context_id[i, :len(doc)] = torch.LongTensor(doc)
                context_order[i,:len(doc)] = torch.from_numpy(np.arange(1,len(doc)+1))
            feature_len = len(batch[1][0][0])
            context_feature = torch.Tensor(batch_size, context_len, feature_len).fill_(0)
            for i, doc in enumerate(batch[1]):
                for j, feature in enumerate(doc):
                    context_feature[i, j, :] = torch.Tensor(feature)

            context_tag = torch.LongTensor(batch_size, context_len).fill_(0)
            for i, doc in enumerate(batch[2]):
                context_tag[i, :len(doc)] = torch.LongTensor(doc)

            context_ent = torch.LongTensor(batch_size, context_len).fill_(0)
            for i, doc in enumerate(batch[3]):
                context_ent[i, :len(doc)] = torch.LongTensor(doc)
            question_len = max(len(x) for x in batch[4])
            question_id = torch.LongTensor(batch_size, question_len).fill_(0)
            question_order = torch.LongTensor(batch_size,question_len).fill_(0)
            for i, doc in enumerate(batch[4]):
                question_id[i, :len(doc)] = torch.LongTensor(doc)
                question_order[i,:len(doc)] = torch.from_numpy(np.arange(1,len(doc)+1))

            context_mask = torch.eq(context_id, 0)
            question_mask = torch.eq(question_id, 0)
            if not self.eval:
                y_s = torch.LongTensor(batch[5])
                y_e = torch.LongTensor(batch[6])
            text = list(batch[-2])
            span = list(batch[-1])
            if self.gpu:
                context_id = context_id.pin_memory()
                context_feature = context_feature.pin_memory()
                context_tag = context_tag.pin_memory()
                context_ent = context_ent.pin_memory()
                context_mask = context_mask.pin_memory()
                question_id = question_id.pin_memory()
                question_mask = question_mask.pin_memory()
                context_order = context_order.pin_memory()
                question_order = question_order.pin_memory()

            if self.eval:
                yield (context_id, context_feature, context_tag, context_ent, context_mask,
                       question_id, question_mask, context_order, question_order, text, span)
            else:
                yield (context_id, context_feature, context_tag, context_ent, context_mask,
                       question_id, question_mask, context_order, question_order, y_s, y_e, text, span)
Example #11
0
def im_detect(net, im):
    blobs, im_scales = _get_blobs(im)
    assert len(im_scales) == 1, "Only single-image batch implemented"

    im_blob = blobs['data']
    blobs['im_info'] = np.array(
        [im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)

    _, scores, bbox_pred, rois = net.test_image(blobs['data'],
                                                blobs['im_info'])

    boxes = rois[:, 1:5] / im_scales[0]
    scores = np.reshape(scores, [scores.shape[0], -1])
    bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
    if cfg.TEST.BBOX_REG:
        # Apply bounding-box regression deltas
        box_deltas = bbox_pred
        pred_boxes = bbox_transform_inv(
            torch.from_numpy(boxes), torch.from_numpy(box_deltas)).numpy()
        pred_boxes = _clip_boxes(pred_boxes, im.shape)
    else:
        # Simply repeat the boxes, once for each class
        pred_boxes = np.tile(boxes, (1, scores.shape[1]))

    return scores, pred_boxes
Example #12
0
def load_pretrained_npy(faster_rcnn_model, fname):
    params = np.load(fname).item()
    # vgg16
    vgg16_dict = faster_rcnn_model.rpn.features.state_dict()
    for name, val in list(vgg16_dict.items()):
        # # print name
        # # print val.size()
        # # print param.size()
        if name.find('bn.') >= 0:
            continue
        i, j = int(name[4]), int(name[6]) + 1
        ptype = 'weights' if name[-1] == 't' else 'biases'
        key = 'conv{}_{}'.format(i, j)
        param = torch.from_numpy(params[key][ptype])

        if ptype == 'weights':
            param = param.permute(3, 2, 0, 1)

        val.copy_(param)

    # fc6 fc7
    frcnn_dict = faster_rcnn_model.state_dict()
    pairs = {'fc6.fc': 'fc6', 'fc7.fc': 'fc7'}
    for k, v in list(pairs.items()):
        key = '{}.weight'.format(k)
        param = torch.from_numpy(params[v]['weights']).permute(1, 0)
        frcnn_dict[key].copy_(param)

        key = '{}.bias'.format(k)
        param = torch.from_numpy(params[v]['biases'])
        frcnn_dict[key].copy_(param)
Example #13
0
def interpolate(ae, gg, z1, z2, vocab,
                steps=5, sample=None, maxlen=None):
    """
    Interpolating in z space
    Assumes that type(z1) == type(z2)
    """
    if type(z1) == Variable:
        noise1 = z1
        noise2 = z2
    elif type(z1) == torch.FloatTensor or type(z1) == torch.cuda.FloatTensor:
        noise1 = Variable(z1, volatile=True)
        noise2 = Variable(z2, volatile=True)
    elif type(z1) == np.ndarray:
        noise1 = Variable(torch.from_numpy(z1).float(), volatile=True)
        noise2 = Variable(torch.from_numpy(z2).float(), volatile=True)
    else:
        raise ValueError("Unsupported input type (noise): {}".format(type(z1)))

    # interpolation weights
    lambdas = [x*1.0/(steps-1) for x in range(steps)]

    gens = []
    for L in lambdas:
        gens.append(generate(ae, gg, (1-L)*noise1 + L*noise2,
                             vocab, sample, maxlen))

    interpolations = []
    for i in range(len(gens[0])):
        interpolations.append([s[i] for s in gens])
    return interpolations
def l2l_train(model, cluster_center, n_epoch=10000, trunc_step=10):
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    M_all = Variable(torch.zeros(model.n_class, model.n_dim))
    B_all = Variable(torch.zeros(model.n_class))
    for epoch in range(n_epoch):
        loss = 0
        M_step, B_step = [], []
        for step in range(trunc_step):
            data = generate_data(cluster_center)
            optimizer.zero_grad()
            x, y = Variable(torch.from_numpy(data[0])).float(), Variable(torch.from_numpy(data[1]))
            w, b = model(x)
            M = Variable(torch.zeros(model.n_class_n, model.n_dim))
            B = Variable(torch.zeros(model.n_class_n))
            for k in range(model.n_class_n):
                M[k] = torch.cat((w[:, 0][y == model.n_class_l + k].view(-1, 1),
                                  w[:, 1][y == model.n_class_l + k].view(-1, 1)), 1).mean(0)
                B[k] = b[y == model.n_class_l + k].mean()
            if step == 0:
                M_ = M
                B_ = B
            else:
                M_ = step / (step + 1) * M_step[-1] + 1 / (step + 1) * M
                B_ = step / (step + 1) * B_step[-1] + 1 / (step + 1) * B
            M_step.append(M_)
            B_step.append(B_)
            pred = torch.mm(x, M_.t()) + B_.view(1, -1).expand_as(torch.mm(x, M_.t()))
            loss += F.cross_entropy(pred, y)
        loss.backward()
        optimizer.step()
        print('Train Epoch: {}\tLoss: {:.6f}'.format(epoch, loss.data[0]))
    return M_all, B_all, cluster_center
Example #15
0
def resize(original, um_sizes, desired_res):
    """ Resize array originally of um_sizes size to have desired_res resolution.

    We preserve the center of original and resized arrays exactly in the middle. We also
    make sure resolution is exactly the desired resolution. Given these two constraints,
    we cannot hold FOV of original and resized arrays to be exactly the same.

    :param np.array original: Array to resize.
    :param tuple um_sizes: Size in microns of the array (one per axis).
    :param int or tuple desired_res: Desired resolution (um/px) for the output array.

    :return: Output array (np.float32) resampled to the desired resolution. Size in pixels
        is round(um_sizes / desired_res).
    """
    import torch.nn.functional as F

    # Create grid to sample in microns
    grid = create_grid(um_sizes, desired_res) # d x h x w x 3

    # Re-express as a torch grid [-1, 1]
    um_per_px = np.array([um / px for um, px in zip(um_sizes, original.shape)])
    torch_ones = np.array(um_sizes) / 2 - um_per_px / 2  # sample position of last pixel in original
    grid = grid / torch_ones[::-1].astype(np.float32)

    # Resample
    input_tensor = torch.from_numpy(original.reshape(1, 1, *original.shape).astype(
        np.float32))
    grid_tensor = torch.from_numpy(grid.reshape(1, *grid.shape))
    resized_tensor = F.grid_sample(input_tensor, grid_tensor, padding_mode='border')
    resized = resized_tensor.numpy().squeeze()

    return resized
Example #16
0
    def imgEncodeTorch(self, abimg):
        abimg = abimg.cuda()
        w, h = abimg.shape[1], abimg.shape[2]
        label = torch.zeros((w*h, 313))
        label = label.cuda()

        (dists, indexes) = self.nbrs.kneighbors(
            abimg.view(abimg.shape[0], -1).t(), self.NN)
        dists = torch.from_numpy(dists).float().cuda()
        indexes = torch.from_numpy(indexes).cuda()

        weights = torch.exp(-dists**2/(2*self.sigma**2)).cuda()
        weights = weights/torch.sum(weights, dim=1).view(-1, 1)

        pixel_indexes = torch.Tensor.long(torch.arange(
            start=0, end=abimg.shape[1]*abimg.shape[2])[:, np.newaxis])
        pixel_indexes = pixel_indexes.cuda()
        label[pixel_indexes, indexes] = weights
        label = label.t().contiguous().view(313, w, h)

        rebal_indexes = indexes[:, 0]
        rebal_weights = self.weights[rebal_indexes]
        rebal_weights = rebal_weights.view(w, h)
        rebal_label = rebal_weights * label

        return rebal_label
def l2l_validate(model, cluster_center, n_epoch=100):
    val_accuracy = []
    for epoch in range(n_epoch):
        data_l = generate_data_l(cluster_center)
        data_n = generate_data_n(cluster_center, model.n_class_n)
        x_l, y_l = Variable(torch.from_numpy(data_l[0])).float(), Variable(
            torch.from_numpy(data_l[1]))
        x_n, y_n = Variable(torch.from_numpy(data_n[0])).float(), Variable(
            torch.from_numpy(data_n[1]))
        pred_ll, pred_nl, w, b = model(x_l, x_n)
        M = Variable(torch.zeros(model.n_class_n, model.n_dim))
        B = Variable(torch.zeros(model.n_class_n))
        for k in range(model.n_class_n):
            M[k] = torch.cat((w[:, 0][y_n == model.n_class_l + k].view(-1, 1),
                              w[:, 1][y_n == model.n_class_l + k].view(-1, 1)), 1).mean(0)
            B[k] = b[y_n == model.n_class_l + k].mean()
        pred_ln = torch.mm(x_l, M.t()) + B.view(1, -1).expand_as(torch.mm(x_l, M.t()))
        pred_nn = torch.mm(x_n, M.t()) + B.view(1, -1).expand_as(torch.mm(x_n, M.t()))
        pred = torch.cat((torch.cat((pred_ll, pred_nl)), torch.cat((pred_ln, pred_nn))), 1)
        pred = pred.data.max(1)[1]
        y = torch.cat((y_l, y_n))
        accuracy = pred.eq(y.data).cpu().sum() * 1.0 / y.size()[0]
        # print('accuracy: %.2f' % accuracy)
        val_accuracy.append(accuracy)
        acc_l = pred.eq(y.data).cpu()[0:100].sum() * 1.0 / 100
        acc_n = pred.eq(y.data).cpu()[100:150].sum() * 1.0 / 50
        print('accuracy: %.2f, lifelong accuracy: %.2f, new accuracy: %.2f' % (accuracy, acc_l, acc_n))

    return numpy.mean(numpy.asarray(val_accuracy))
Example #18
0
 def __call__(self, pic):
     if isinstance(pic, np.ndarray):
         # handle numpy array
         img = torch.from_numpy(pic.transpose((2, 0, 1)))
         # backard compability
         return img.float().div(255)
     # handle PIL Image
     if pic.mode == 'I':
         img = torch.from_numpy(np.array(pic, np.int32, copy=False))
     elif pic.mode == 'I;16':
         img = torch.from_numpy(np.array(pic, np.int16, copy=False))
     else:
         img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
     # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
     if pic.mode == 'YCbCr':
         nchannel = 3
     elif pic.mode == 'I;16':
         nchannel = 1
     else:
         nchannel = len(pic.mode)
     img = img.view(pic.size[1], pic.size[0], nchannel)
     # put it from HWC to CHW format
     # yikes, this transpose takes 80% of the loading time/CPU
     img = img.transpose(0, 1).transpose(0, 2).contiguous()
     if isinstance(img, torch.ByteTensor):
         return img.float().div(255)
     else:
         return img
    def collate_data(self, data):
        video_tensor = np.zeros((len(data), 2048))
        flow_tensor = np.zeros((len(data), 1024))
        face_tensor = np.zeros((len(data), 128))
        audio_tensor = np.zeros((len(data), self.max_words,128))
        text_tensor = np.zeros((len(data), self.max_words, 300))
        coco_ind = np.zeros((len(data)))
        face_ind = np.zeros((len(data)))

        for i in range(len(data)):

            coco_ind[i] = data[i]['coco_ind']
            face_ind[i] = data[i]['face_ind']
            video_tensor[i] = data[i]['video']
            flow_tensor[i] = data[i]['flow']

            if len(data[i]['face']) > 0:
                face_tensor[i] = data[i]['face']
            
            la = len(data[i]['audio'])
            audio_tensor[i,:min(la,self.max_words), :] = data[i]['audio'][:min(self.max_words,la)]

            lt = len(data[i]['text'])
            text_tensor[i,:min(lt,self.max_words), :] = data[i]['text'][:min(self.max_words,lt)]


        return {'video': th.from_numpy(video_tensor).float(),
                'flow': th.from_numpy(flow_tensor).float(),
                'face': th.from_numpy(face_tensor).float(),
                'coco_ind': coco_ind,
                'face_ind': face_ind,
                'text': th.from_numpy(text_tensor).float(),
                'audio': th.from_numpy(audio_tensor).float()}
Example #20
0
 def corrupt(self, src, rel, dst):
     prob = self.bern_prob[rel]
     selection = torch.bernoulli(prob).numpy().astype('int64')
     ent_random = choice(self.n_ent, len(src))
     src_out = (1 - selection) * src.numpy() + selection * ent_random
     dst_out = selection * dst.numpy() + (1 - selection) * ent_random
     return torch.from_numpy(src_out), torch.from_numpy(dst_out)
Example #21
0
def evalAccuracy(model, device, args, outPutType, test_X, test_Y, validEval = False) :
    model_training_orig = model.training
    setModelMode(model, False)
    N_test = len(test_Y)*len(test_Y[0])
    totals = 0
    for b_data, b_labels in zip(test_X, test_Y):
        b_data = torch.from_numpy(b_data).to(device) 
        b_labels = torch.from_numpy(b_labels).to(device) 
        b_labels = b_labels.view(b_labels.shape[0],-1 )  # make it the same shape as output
       
        yhat = model(b_data) # need to compute the Yhat again, as this is the yhat AFTER updating the weights, not before as in 'learn()' function
        b_data = None
        
        # depending on if we are in a classification or regression problem, we evaluate performance differently
        if outPutType == OUT_REGRESSION :  
            currentRate = torch_pearsonr( yhat.view(-1)  , b_labels.view(-1))**2   
            N_test = len(test_Y) # as we are testing correlation, the N should refer to the number of batches, and NOT the total number of observations  
        elif outPutType == OUT_MULTICLASS : 
            currentRate = calc_Accuracy(yhat,b_labels )    # calculate accuracy, this is ROUNDED 
        else : # mean absolute error
            currentRate = -torch.mean(torch.abs(yhat - b_labels)) # negative as the rest of the metrics are accuracy, IE the greater the error, the lower the accuracy
            N_test = len(test_Y) # as we are testing correlation, the N should refer to the number of batches, and NOT the total number of observations
 
        currentRate = float(currentRate.detach().cpu().numpy() )  # need to move it back to CPU
        totals = totals +currentRate # sum in all minibatches

    accuracy = round( float(totals)/N_test,5)
    setModelMode(model, model_training_orig)
    return(accuracy)
    def __call__(self, inputs):
        if isinstance(inputs, Image.Image):
            inputs = torch.from_numpy(np.array(inputs).transpose(2, 0, 1))
        else:
            inputs = torch.from_numpy(inputs.transpose(2, 0, 1))

        return inputs.float()
Example #23
0
def average_without_padding(x, ids, padding_id, cuda=False, eps=1e-8):
    if cuda:
        mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().cuda().permute(1, 2, 0).expand_as(x)
    else:
        mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().permute(1, 2, 0).expand_as(x)
    s = torch.sum(x*mask, dim=2) / (torch.sum(mask, dim=2)+eps)
    return s
    def run_test(self, args):
        print("testing...")
        self.eval()

        game = DoomInstance(
            args.vizdoom_config, args.wad_path, args.skiprate, visible=True, actions=args.action_set, bot_cmd=args.bot_cmd)
        step_state = game.get_state_normalized()

        state = NormalizedState(screen=None, depth=None, labels=None, variables=None)
        state.screen = torch.Tensor(1, *args.screen_size)
        state.variables = torch.Tensor(1, args.variable_num)

        while True:
            # convert state to torch tensors
            state.screen[0, :] = torch.from_numpy(step_state.screen)
            state.variables[0, :] = torch.from_numpy(step_state.variables)
            # compute an action
            action = self.get_action(state)
            # render
            step_state, _, finished = game.step_normalized(action[0][0])
            #img = step_state.screen[0:3, :]
            #img = img.transpose(1, 2, 0)
            #plt.imsave('depth-plan.png', img)
            if finished:
                print("episode return: {}".format(game.get_episode_return()))
                self.set_terminal(torch.zeros(1))
Example #25
0
def flow_resnet50_aux(pretrained=False, **kwargs):
    """Constructs a ResNet-50 model.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
    if pretrained:
        # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
        pretrained_dict = model_zoo.load_url(model_urls['resnet50'])

        model_dict = model.state_dict()
        fc_origin_weight = pretrained_dict["fc.weight"].data.numpy()
        fc_origin_bias = pretrained_dict["fc.bias"].data.numpy()

        # 1. filter out unnecessary keys
        pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
        # 2. overwrite entries in the existing state dict
        model_dict.update(pretrained_dict) 
        # print(model_dict)
        fc_new_weight = model_dict["fc_aux.weight"].numpy() 
        fc_new_bias = model_dict["fc_aux.bias"].numpy() 

        fc_new_weight[:1000, :] = fc_origin_weight
        fc_new_bias[:1000] = fc_origin_bias

        model_dict["fc_aux.weight"] = torch.from_numpy(fc_new_weight)
        model_dict["fc_aux.bias"] = torch.from_numpy(fc_new_bias)

        # 3. load the new state dict
        model.load_state_dict(model_dict)

    return model
Example #26
0
def train():
    num_classses = 21
    net = ssd.SSD300(num_classes=num_classses)
    ssd_box_coder = ssd.SSDBoxCoder(net)

    C, H, W = (3, 300, 300)
    x = Variable(torch.randn(1, C, H, W))
    boxes = torch.from_numpy(np.array([(0, 0, 100, 100), (25, 25, 125, 125), (200, 200, 250, 250), (0, 0, 300, 300)], dtype=np.float32))
    labels = torch.from_numpy(np.array([1, 2, 3, 4], dtype=np.long))
    loc_targets, cls_targets = ssd_box_coder.encode(boxes, labels)
    loc_targets = loc_targets[None, :]
    cls_targets = cls_targets[None, :]
    # print('loc_targets.size():{}'.format(loc_targets.size()))
    # print('cls_targets.size():{}'.format(cls_targets.size()))

    optimizer = optim.SGD(net.parameters(), lr=1e-5, momentum=0.9, weight_decay=5e-4)
    criterion = ssd.SSDLoss(num_classes=num_classses)

    for epoch in range(100):
        loc_preds, cls_preds = net(x)
        # print('loc_preds.size():{}'.format(loc_preds.size()))
        # print('cls_preds.size():{}'.format(cls_preds.size()))
        optimizer.zero_grad()

        loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets)
        loss.backward()
        optimizer.step()
Example #27
0
    def __init__(self, height, width, lr = 1, aux_loss = False, ray_tracing = False):
        super(Depth3DGridGen_with_mask, self).__init__()
        self.height, self.width = height, width
        self.aux_loss = aux_loss
        self.lr = lr
        self.ray_tracing = ray_tracing

        self.grid = np.zeros( [self.height, self.width, 3], dtype=np.float32)
        self.grid[:,:,0] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.height), 0), repeats = self.width, axis = 0).T, 0)
        self.grid[:,:,1] = np.expand_dims(np.repeat(np.expand_dims(np.arange(-1, 1, 2.0/self.width), 0), repeats = self.height, axis = 0), 0)
        self.grid[:,:,2] = np.ones([self.height, width])
        self.grid = torch.from_numpy(self.grid.astype(np.float32))

        self.theta = self.grid[:,:,0] * np.pi/2 + np.pi/2
        self.phi = self.grid[:,:,1] * np.pi

        self.x = torch.sin(self.theta) * torch.cos(self.phi)
        self.y = torch.sin(self.theta) * torch.sin(self.phi)
        self.z = torch.cos(self.theta)

        self.grid3d = torch.from_numpy(np.zeros( [self.height, self.width, 4], dtype=np.float32))

        self.grid3d[:,:,0] = self.x
        self.grid3d[:,:,1] = self.y
        self.grid3d[:,:,2] = self.z
        self.grid3d[:,:,3] = self.grid[:,:,2]
def artist_works_with_labels():     # painting from the famous artist (real target)
    a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]
    paintings = a * np.power(PAINT_POINTS, 2) + (a-1)
    labels = (a-1) > 0.5            # upper paintings (1), lower paintings (0), two classes
    paintings = torch.from_numpy(paintings).float()
    labels = torch.from_numpy(labels.astype(np.float32))
    return paintings, labels
    def transform(self, img, lbl):
        """transform

        :param img:
        :param lbl:
        """
        img = img[:, :, ::-1]
        img = img.astype(np.float64)
        img -= self.mean
        img = m.imresize(img, (self.img_size[0], self.img_size[1]))
        # Resize scales images from 0 to 255, thus we need
        # to divide by 255.0
        img = img.astype(float) / 255.0
        # NHWC -> NCWH
        img = img.transpose(2, 0, 1)
        
        classes = np.unique(lbl)
        lbl = lbl.astype(float)
        lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F')
        lbl = lbl.astype(int)
        

        if not np.all(classes == np.unique(lbl)):
            print("WARN: resizing labels yielded fewer classes")

        if not np.all(np.unique(lbl[lbl!=self.ignore_index]) < self.n_classes):
            print('after det', classes,  np.unique(lbl))
            raise ValueError("Segmentation map contained invalid class values")

        img = torch.from_numpy(img).float()
        lbl = torch.from_numpy(lbl).long()

        return img, lbl
def data_generator(args, screens, variables, labels, episodes):
    # remove short episodes
    episode_min_size = args.episode_size*args.skiprate
    episodes = episodes[episodes[:, 1]-episodes[:, 0] > episode_min_size]
    episodes_num = len(episodes)
    #
    batch_size = args.batch_size
    step_idx = episodes[:, 0].copy() + np.random.randint(args.skiprate, size=episodes_num)
    step_screens = np.ndarray(shape=(batch_size, *screens.shape[1:]), dtype=np.float32)
    step_variables = np.ndarray(shape=(batch_size, *variables.shape[1:]), dtype=np.float32)
    step_labels = np.ndarray(shape=(batch_size,), dtype=np.int)
    step_terminals = np.ones(shape=(batch_size, 1), dtype=np.float32)
    # select episodes for the initial batch
    batch_episodes = np.random.randint(episodes_num, size=batch_size)
    while True:
        for i in range(batch_size):
            idx = batch_episodes[i]
            step_screens[i, :] = screens[step_idx[idx]] / 127.5 - 1.0
            step_variables[i, :] = variables[step_idx[idx]] / 100
            step_labels[i] = labels[step_idx[idx]]
            step_idx[idx] += args.skiprate
            if step_idx[idx] > episodes[idx][1]:
                step_idx[idx] = episodes[idx][0] + np.random.randint(args.skiprate)
                step_terminals[i] = 0
                # reached terminal state, select a new episode
                batch_episodes[i] = np.random.randint(episodes_num)
            else:
                step_terminals[i] = 1

        yield torch.from_numpy(step_screens), \
              torch.from_numpy(step_variables), \
              torch.from_numpy(step_labels), \
              torch.from_numpy(step_terminals)
Example #31
0
    pred, uncert = sgp.predict(X_train, 0 * X_train)
    error = np.sqrt(np.mean((pred - y_train)**2))
    trainll = np.mean(sps.norm.logpdf(pred - y_train, scale=np.sqrt(uncert)))
    print('Train RMSE: ', error)
    print('Train ll: ', trainll)

    # We pick the next 60 inputs
    next_inputs = sgp.batched_greedy_ei(60, np.min(X_train, 0),
                                        np.max(X_train, 0))
    valid_smiles = []
    new_features = []
    for i in range(60):
        all_vec = next_inputs[i].reshape((1, -1))
        tree_vec, mol_vec = np.hsplit(all_vec, 2)
        tree_vec = create_var(torch.from_numpy(tree_vec).float())
        mol_vec = create_var(torch.from_numpy(mol_vec).float())
        s = model.decode(tree_vec, mol_vec, prob_decode=False)
        if s is not None:
            valid_smiles.append(s)
            new_features.append(all_vec)

    print(len(valid_smiles), "molecules are found")
    valid_smiles = valid_smiles[:50]
    new_features = next_inputs[:50]
    new_features = np.vstack(new_features)
    save_object(valid_smiles,
                opts.save_dir + "/valid_smiles{}.dat".format(iteration))

    import sascorer
    import networkx as nx
Example #32
0
 def run(self, img):
     """ Process a numpy image to extract points and descriptors.
     Input
       img - HxW numpy float32 input image in range [0,1].
     Output
       corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
       desc - 256xN numpy array of corresponding unit normalized descriptors.
       heatmap - HxW numpy heatmap in range [0,1] of point confidences.
       """
     assert img.ndim == 2, 'Image must be grayscale.'
     assert img.dtype == np.float32, 'Image must be float32.'
     H, W = img.shape[0], img.shape[1]
     inp = img.copy()
     inp = (inp.reshape(1, H, W))
     inp = torch.from_numpy(inp)
     inp = torch.autograd.Variable(inp).view(1, 1, H, W)
     if self.cuda:
         inp = inp.cuda()
     # Forward pass of network.
     outs = self.net.forward(inp)
     semi = outs
     # Convert pytorch -> numpy.
     semi = semi.data.cpu().numpy().squeeze()
     # --- Process points.
     dense = np.exp(semi)  # Softmax.
     dense = dense / (np.sum(dense, axis=0) + .00001)  # Should sum to 1.
     # Remove dustbin.
     nodust = dense[:-1, :, :]
     # Reshape to get full resolution heatmap.
     Hc = int(H / self.cell)
     Wc = int(W / self.cell)
     nodust = nodust.transpose(1, 2, 0)
     heatmap = np.reshape(nodust, [Hc, Wc, self.cell, self.cell])
     heatmap = np.transpose(heatmap, [0, 2, 1, 3])
     heatmap = np.reshape(heatmap, [Hc * self.cell, Wc * self.cell])
     xs, ys = np.where(heatmap >= self.conf_thresh)  # Confidence threshold.
     if len(xs) == 0:
         return np.zeros((3, 0)), None
     pts = np.zeros((3, len(xs)))  # Populate point data sized 3xN.
     pts[0, :] = ys
     pts[1, :] = xs
     pts[2, :] = heatmap[xs, ys]
     pts, _ = self.nms_fast(pts, H, W,
                            dist_thresh=self.nms_dist)  # Apply NMS.
     inds = np.argsort(pts[2, :])
     pts = pts[:, inds[::-1]]  # Sort by confidence.
     # Remove points along border.
     if self.border_remove != 0:
         bord = self.border_remove
         toremoveW = np.logical_or(pts[0, :] < bord, pts[0, :] >=
                                   (W - bord))
         toremoveH = np.logical_or(pts[1, :] < bord, pts[1, :] >=
                                   (H - bord))
         toremove = np.logical_or(toremoveW, toremoveH)
         pts = pts[:, ~toremove]
     # --- Process descriptor.
     # D = coarse_desc.shape[1]
     # if pts.shape[1] == 0:
     #     desc = np.zeros((D, 0))
     # else:
     #     # Interpolate into descriptor map using 2D point locations.
     #     samp_pts = torch.from_numpy(pts[:2, :].copy())
     #     samp_pts[0, :] = (samp_pts[0, :] / (float(W) / 2.)) - 1.
     #     samp_pts[1, :] = (samp_pts[1, :] / (float(H) / 2.)) - 1.
     #     samp_pts = samp_pts.transpose(0, 1).contiguous()
     #     samp_pts = samp_pts.view(1, 1, -1, 2)
     #     samp_pts = samp_pts.float()
     #     if self.cuda:
     #         samp_pts = samp_pts.cuda()
     #     desc = torch.nn.functional.grid_sample(coarse_desc, samp_pts)
     #     desc = desc.data.cpu().numpy().reshape(D, -1)
     #     desc /= np.linalg.norm(desc, axis=0)[np.newaxis, :]
     return pts, heatmap
Example #33
0
        if image.shape[0] > image.shape[1]:
            image = imutils.resize(image, width=450)
        else:
            image = imutils.resize(image, width=900)
        original_height = image.shape[0]
        original_width = image.shape[1]
        resized_image = utils.resize_image(image, model.WIDTH)
        resized_image = np.expand_dims(resized_image, 0)

        start = time.time()
        tf.logging.info('Predicting {}...'.format(
            os.path.basename(image_list[index])))
        detections = sess.run(predictions, feed_dict={inputs: resized_image})
        tf.logging.info('Performing non-maximum suppression...')
        result = utils.torch_non_max_suppression(
            detections=torch.from_numpy(detections),
            confidence_threshold=0.25,
            num_classes=model.NUM_CLASSES,
            nms_conf=0.4)
        end = time.time()
        tf.logging.info('Total time: {}'.format(end - start))

        if result is not None:
            for box in result:
                p1 = (int(box[1]), int(box[2]))
                p1 = utils.convert_to_original_size(p1, original_height,
                                                    original_width,
                                                    model.HEIGHT, model.WIDTH)
                p2 = (int(box[3]), int(box[4]))
                p2 = utils.convert_to_original_size(p2, original_height,
                                                    original_width,
Example #34
0
        for i in range(future):  # if we should predict the future
            h_t, c_t = self.lstm1(output, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        outputs = torch.stack(outputs, 1).squeeze(2)
        return outputs


if __name__ == '__main__':
    # set random seed to 0
    np.random.seed(0)
    torch.manual_seed(0)
    # load data and make training set
    data = torch.load('traindata.pt')
    input = Variable(torch.from_numpy(data[3:, :-1]), requires_grad=False)
    target = Variable(torch.from_numpy(data[3:, 1:]), requires_grad=False)
    test_input = Variable(torch.from_numpy(data[:3, :-1]), requires_grad=False)
    test_target = Variable(torch.from_numpy(data[:3, 1:]), requires_grad=False)
    # build the model
    seq = Sequence()
    seq.double()
    criterion = nn.MSELoss()
    # use LBFGS as optimizer since we can load the whole data to train
    optimizer = optim.LBFGS(seq.parameters(), lr=0.8)
    #begin to train
    for i in range(15):
        print('STEP: ', i)

        def closure():
            optimizer.zero_grad()
Example #35
0
    def __call__(self, sample):
        sample['img'] = torch.from_numpy(sample['img'])
        if sample['annot'] is not None:
            sample['annot'] = torch.from_numpy(sample['annot'])

        return sample
Example #36
0
def subsequent_mask(size):
    "Mask out subsequent positions."
    attn_shape = (1, size, size)
    # Note: Here np.triu is upper triabgle of an array
    subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
    return torch.from_numpy(subsequent_mask) == 0
Example #37
0
def get_data():

    x_test, labels = [], []

    data = np.genfromtxt("list_attr_celeba.csv",
                         dtype=float,
                         delimiter=',',
                         names=True)
    print(data.shape)

    train_labels = []
    test_labels = []
    train_paths = []
    test_paths = []

    n = 0
    for row in data:
        if n < 10000:
            labels = np.array([
                int(row['Eyeglasses']),
                int(row['Bangs']),
                int(row['Black_Hair']),
                int(row['Blond_Hair']),
                int(row['Brown_Hair']),
                int(row['Gray_Hair']),
                int(row['Male']),
                int(row['Pale_Skin']),
                int(row['Smiling']),
                int(row['Young'])
            ])
            train_labels.append(labels)
            train_paths.append(row['image_id'])
        elif n < 12000:
            labels = np.array([
                int(row['Eyeglasses']),
                int(row['Bangs']),
                int(row['Black_Hair']),
                int(row['Blond_Hair']),
                int(row['Brown_Hair']),
                int(row['Gray_Hair']),
                int(row['Male']),
                int(row['Pale_Skin']),
                int(row['Smiling']),
                int(row['Young'])
            ])
            test_labels.append(labels)
            test_paths.append(row['image_id'])
        else:
            break
        n = n + 1

    y_train = train_labels

    for row in train_paths:
        img = Image.open('./img_align_celeba/' + train)
        img = numpy.array(img.resize((48, 48)))
        x_train.append(img)

    x_train = torch.from_numpy(np.array(x_train) / 255)
    y_train = torch.from_numpy(np.array(y_train))

    print(x_train.size())
    print(y_train.size())

    pickle.dump(x_train, open("pic_tensor.p", "wb"))
    pickle.dump(x_train, open("pic_tensor.p", "wb"))

    return x_train.to(device), y_train.to(device)
print(model)
par_dict = {}
for par in model.named_parameters():
    print("--------------------------------------------")
    print("setting [",par[0],par[1].shape,"]")
    if "weight" in par[0] and len(par[0].split("."))==3 and len(par[1].shape)==4:
        layerid1 = int(par[0].split(".")[0])
        layerid2 = int(par[0].split(".")[1])
        vgglayername = "features.%d.weight"%(layerid2)
        try:
            np_vggweight = model_data[vgglayername].numpy()
        except:
            continue
        vggweight = np.transpose( np_vggweight, (2,3,1,0) )
        vggweight = torch.from_numpy( vggweight.reshape( (9,1,vggweight.shape[2],vggweight.shape[3]) ) ).to(device)
        print(par[0],par[1].shape,'-->',vgglayername,vggweight.shape)
        par_dict[par[0]] = vggweight[:,:,:par[1].shape[2],:par[1].shape[3]]
    else:
        par_dict[par[0]] = par[1]
        #print(par[1])

    if "weight" in par[0] and len(par[0].split("."))==3 and len(par[1].shape)==1:
        par_dict[par[0].replace("weight","running_var")] = torch.ones( (par[1].shape[0]) )
    if "bias" in par[0] and len(par[0].split("."))==3 and len(par[1].shape)==1:
        par_dict[par[0].replace("bias","running_mean")] = torch.zeros( (par[1].shape[0]) )
#print(par_dict)
#par_dict["2.running_mean"] = torch.zeros( (32) )
#par_dict["2.running_var"]  = torch.ones( (32) )
model.load_state_dict(par_dict)
    
Example #39
0
def load_tf_weights_in_electra(model,
                               config,
                               tf_checkpoint_path,
                               discriminator_or_generator="discriminator"):
    """ Load tf checkpoints in a pytorch model.
    """
    try:
        import re
        import numpy as np
        import tensorflow as tf
    except ImportError:
        logger.error(
            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions."
        )
        raise
    tf_path = os.path.abspath(tf_checkpoint_path)
    logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
    # Load weights from TF model
    init_vars = tf.train.list_variables(tf_path)
    names = []
    arrays = []
    for name, shape in init_vars:
        logger.info("Loading TF weight {} with shape {}".format(name, shape))
        array = tf.train.load_variable(tf_path, name)
        names.append(name)
        arrays.append(array)
    for name, array in zip(names, arrays):
        original_name: str = name

        try:
            if isinstance(model, ElectraForMaskedLM):
                name = name.replace("electra/embeddings/",
                                    "generator/embeddings/")

            if discriminator_or_generator == "generator":
                name = name.replace("electra/", "discriminator/")
                name = name.replace("generator/", "electra/")

            name = name.replace("dense_1", "dense_prediction")
            name = name.replace("generator_predictions/output_bias",
                                "generator_lm_head/bias")

            name = name.split("/")
            # print(original_name, name)
            # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
            # which are not required for using pretrained model
            if any(n in ["global_step", "temperature"] for n in name):
                logger.info("Skipping {}".format(original_name))
                continue
            pointer = model
            for m_name in name:
                if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
                    scope_names = re.split(r"_(\d+)", m_name)
                else:
                    scope_names = [m_name]
                if scope_names[0] == "kernel" or scope_names[0] == "gamma":
                    pointer = getattr(pointer, "weight")
                elif scope_names[0] == "output_bias" or scope_names[
                        0] == "beta":
                    pointer = getattr(pointer, "bias")
                elif scope_names[0] == "output_weights":
                    pointer = getattr(pointer, "weight")
                elif scope_names[0] == "squad":
                    pointer = getattr(pointer, "classifier")
                else:
                    pointer = getattr(pointer, scope_names[0])
                if len(scope_names) >= 2:
                    num = int(scope_names[1])
                    pointer = pointer[num]
            if m_name.endswith("_embeddings"):
                pointer = getattr(pointer, "weight")
            elif m_name == "kernel":
                array = np.transpose(array)
            try:
                assert pointer.shape == array.shape, original_name
            except AssertionError as e:
                e.args += (pointer.shape, array.shape)
                raise
            print("Initialize PyTorch weight {}".format(name), original_name)
            pointer.data = torch.from_numpy(array)
        except AttributeError as e:
            print("Skipping {}".format(original_name), name, e)
            continue
    return model
    def test(self):
        import onnx
        from ngraph_onnx.onnx_importer.importer import import_onnx_model
        import ngraph as ng
        global dim0, dim2, dim3

        torch.set_grad_enabled(False)

        epoch = self.optimizer.get_last_epoch() + 1
        self.ckp.write_log('\nEvaluation:')
        self.ckp.add_log(
            torch.zeros(1, len(self.loader_test), len(self.scale))
        )
        self.model.eval()

        timer_test = utility.timer()
        if self.args.save_results: self.ckp.begin_background()
        # print(self.loader_test)
        for idx_data, d in enumerate(self.loader_test):
            for idx_scale, scale in enumerate(self.scale):
                d.dataset.set_scale(idx_scale)
                print('idx_scale={}'.format(idx_scale))
                # print("len: {}".format(len(d)))
                # for lr, hr, filename, _ in tqdm(d, ncols=80):
                for batch, (lr, hr, filename, _) in enumerate(d):
                    print('{} '.format(batch), end='', flush=True)
                    lr, hr = self.prepare(lr, hr)
                    print('test lr.size: {}'.format(lr.size()))
                    dim0 = lr.size()[0]
                    dim2 = lr.size()[2]
                    dim3 = lr.size()[3]
                    
                    showbug = False
                    if showbug: print('stage1', flush=True)
                    if self.args.ngraph:
                        
                        pytorch_model_name = self.args.ngraph
                        pytorch_edsr_model = torch.load(pytorch_model_name).cuda()
                        if showbug: print('stage2-1', flush=True)
                        # print(lr.size())
                        # dummy_input = torch.randn_like(lr, device='cuda')
                        if showbug: print('stage2-2', flush=True)
                        edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)
                        # print('Export to onnx model {}'.format(edsr_onnx_filename))
                        torch.onnx.export(pytorch_edsr_model, lr.to(torch.device('cuda')), edsr_onnx_filename, export_params=True, verbose=False, training=False)
                        if showbug: print('stage2-3', flush=True)

                        edsr_onnx_model = onnx.load(edsr_onnx_filename)
                        # print(onnx.helper.printable_graph(edsr_onnx_model.graph))

                        if showbug: print('stage2-4', flush=True)
                        ng_models = import_onnx_model(edsr_onnx_model)

                        # print('Convert to nGreph Model')

                        ng_model = ng_models[0]
                        if showbug: print('stage2-5', flush=True)
                        runtime = ng.runtime(backend_name='CPU')
                        if showbug: print('stage2-6', flush=True)
                        edsr_ng_model = runtime.computation(ng_model['output'], *ng_model['inputs'])
                        if showbug: print('stage2-7', flush=True)

                        sr = edsr_ng_model(lr, idx_scale)
                        if showbug: print('stage2-8', flush=True)
                        sr = torch.from_numpy(sr)
                        if showbug: print('stage2-9', flush=True)
                    elif self.args.tensorrt:
                        pytorch_model_name = self.args.tensorrt
                        pytorch_edsr_model = torch.load(pytorch_model_name)
                        
                        # lr_np = lr.numpy().astype(np.float32)
                        dummy_input = torch.randn_like(lr, device='cuda')
                        edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)
                        print('Export to onnx model {}'.format(edsr_onnx_filename))
                        torch.onnx.export(pytorch_edsr_model, dummy_input, edsr_onnx_filename, export_params=True, verbose=False, training=False)

                        import os
                        import onnx

                        edsr_onnx_model = onnx.load(edsr_onnx_filename)
                        # print(onnx.helper.printable_graph(edsr_onnx_model.graph))

                        import tensorrt
                        import onnx_tensorrt.backend as backend
                        import numpy as np

                        tensorrt_engine = backend.prepare(edsr_onnx_model, device='CUDA:0')
                        # lr_np = lr_np.to(torch.device("cuda:0"))
                        # lr.numpy().astype(np.float32)

                        sr = tensorrt_engine.run(lr.numpy().astype(np.float32))[0]
                        sr = torch.from_numpy(sr)

                        print('complete one')   



                        pytorch_model_name = self.args.tensorrt
                        pytorch_edsr_model = torch.load(pytorch_model_name)
                        
                        # lr_np = lr.numpy().astype(np.float32)
                        dummy_input = torch.randn_like(lr, device='cuda')
                        edsr_onnx_filename = '{}.onnx'.format(pytorch_model_name)
                        print('Export to onnx model {}'.format(edsr_onnx_filename))
                        torch.onnx.export(pytorch_edsr_model, dummy_input, edsr_onnx_filename, export_params=True, verbose=False, training=False)

                        import os
                        import onnx

                        edsr_onnx_model = onnx.load(edsr_onnx_filename)
                        # print(onnx.helper.printable_graph(edsr_onnx_model.graph))

                        import tensorrt
                        import onnx_tensorrt.backend as backend
                        import numpy as np

                        tensorrt_engine = backend.prepare(edsr_onnx_model, device='CUDA:0')
                        # lr_np = lr_np.to(torch.device("cuda:0"))
                        # lr.numpy().astype(np.float32)

                        sr = tensorrt_engine.run(lr.numpy().astype(np.float32))[0]
                        sr = torch.from_numpy(sr)
                        
                        print('complete two')   
                    else:
                        sr = self.model(lr, idx_scale)

                    if showbug: print('stage3', flush=True)
                    sr = utility.quantize(sr, self.args.rgb_range)

                    if showbug: print('stage4', flush=True)
                    save_list = [sr]
                    if showbug: print('stage5', flush=True)
                    self.ckp.log[-1, idx_data, idx_scale] += utility.calc_psnr(
                        sr, hr, scale, self.args.rgb_range, dataset=d
                    )
                    if showbug: print('stage6', flush=True)
                    if self.args.save_gt:
                        save_list.extend([lr, hr])
                    if showbug: print('stage7', flush=True)

                    if self.args.save_results:
                        self.ckp.save_results(d, filename[0], save_list, scale)
                    if showbug: print('stage8', flush=True)

                self.ckp.log[-1, idx_data, idx_scale] /= len(d)
                best = self.ckp.log.max(0)
                psnr = self.ckp.log[-1, idx_data, idx_scale].numpy()
                print('')
                self.ckp.write_log(
                    '[{} x{}]\tPSNR: {:.3f} (Best: {:.3f} @epoch {})'.format(
                        d.dataset.name,
                        scale,
                        self.ckp.log[-1, idx_data, idx_scale],
                        best[0][idx_data, idx_scale],
                        best[1][idx_data, idx_scale] + 1
                    )
                )
                
        self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
        self.ckp.write_log('Saving...')

        if self.args.save_results:
            self.ckp.end_background()

        if not self.args.test_only:
            self.ckp.save(self, epoch, is_best=(best[1][0, 0] + 1 == epoch))

        self.ckp.write_log(
           'Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True
        )

        torch.set_grad_enabled(True)
        return psnr
Example #41
0
        video_list = [(train[0][k],augment)
                       for k in random_indices[i:(batch_size+i)]]
        data = pool_threads.map(loadFrame,video_list)

        next_batch = 0
        for video in data:
            if video.size==0: # there was an exception, skip this
                next_batch = 1
        if(next_batch==1):
            continue

        x = np.asarray(data,dtype=np.float32)
        x = Variable(torch.FloatTensor(x)).cuda().contiguous()

        y = train[1][random_indices[i:(batch_size+i)]]
        y = torch.from_numpy(y).cuda()

        output = model(x)

        loss = criterion(output, y)
        optimizer.zero_grad()

        loss.backward()
        optimizer.step()
        
        prediction = output.data.max(1)[1]
        accuracy = ( float( prediction.eq(y.data).sum() ) /float(batch_size))*100.0
        if(epoch==0):
            print(i,accuracy)
        train_accu.append(accuracy)
    accuracy_epoch = np.mean(train_accu)
    real_boxes = np.array([[20, 30, 70, 80], [320, 23, 500, 120]], dtype = np.float32)
    boxes = []
    for box in real_boxes:
        bndbox = []
        for i, pt in enumerate(box):
            cur_pt = pt / 500 if i % 2 == 0 else pt / 500
            bndbox.append(cur_pt)
        boxes.append(bndbox)
    boxes = np.array(boxes, dtype = np.float32)

    labels = np.array([[1], [7]], dtype = np.int64)
    print("before augment", boxes)
    imgs, boxes, labels = augment(imgs, boxes, labels)
    print("after augment", boxes)
    print(labels)
    boxes, labels = torch.from_numpy(boxes), torch.from_numpy(labels)

    conf_target, loc_target = encoder(boxes, labels)

    print(" [*] Encoding results")
    print(" [*] conf_target:")
    print(conf_target)
    print(" [*] loc_target:")
    print(loc_target)


    decoder = Decoder(input_size = 300)

    print(" [*] Decoding result")
    conf_pred = np.stack([one_hot(target, 9).numpy() for target in conf_target])
    conf_pred = conf_pred.reshape(-1, 9)
Example #43
0
                    # Sample the rest and keep the fixed one
                    _, con_noise = model.ginput_noise(n_con_samples)
                    z_noise = model.sample_fixed_noise(
                        'normal', 1, noise_dim=model.z_dim)
                    z_noise = z_noise.expand(
                        (n_con_samples, model.z_dim))
                    
                    # Generate an image
                    gen_x = model.g_forward(z_noise, con_noise).detach()
                    filename = 'evalImages_fixusual_r{0}'.format(str(repeat))
                    model.plot_traj_grid(gen_x, filename, model.test_dir,
                                          n=n_con_samples)
        
        # Plot original
        test_dataset = TrajDataset(path_to_data, device)
        ref_np = torch.from_numpy(
            test_dataset.get_subset(len(test_dataset), n_con_samples, reshape=False))
                
        filename = 'evalImages_original'
        model.plot_traj_grid(ref_np, filename, model.test_dir,
                              n=n_con_samples)

    # Evaluate the model
    if args.compute_prd:
        eval_config = config_file['eval_config']
        eval_config['filepath'] = eval_config['filepath'].format(args.config_name)

        compute_chpnt_prds = eval_config['compute_chpnt_prds']
        chnpt_list = eval_config['chnpt_list']
        
        print(eval_config)
        if not args.train:
Example #44
0
    def __init__(self, gender='female', num_betas=10):
        super(STAR, self).__init__()

        if gender not in ['male', 'female', 'neutral']:
            raise RuntimeError('Invalid Gender')

        if gender == 'male':
            path_model = cfg.path_male_star
        elif gender == 'female':
            path_model = cfg.path_female_star
        else:
            path_model = cfg.path_neutral_star

        if not os.path.exists(path_model):
            raise RuntimeError('Path does not exist %s' % (path_model))
        import numpy as np

        star_model = np.load(path_model, allow_pickle=True)
        J_regressor = star_model['J_regressor']
        rows, cols = np.where(J_regressor != 0)
        vals = J_regressor[rows, cols]
        self.num_betas = num_betas

        # Model sparse joints regressor, regresses joints location from a mesh
        self.register_buffer('J_regressor',
                             torch.cuda.FloatTensor(J_regressor))

        # Model skinning weights
        self.register_buffer('weights',
                             torch.cuda.FloatTensor(star_model['weights']))
        # Model pose corrective blend shapes
        self.register_buffer(
            'posedirs',
            torch.cuda.FloatTensor(star_model['posedirs'].reshape((-1, 93))))
        # Mean Shape
        self.register_buffer('v_template',
                             torch.cuda.FloatTensor(star_model['v_template']))
        # Shape corrective blend shapes
        self.register_buffer(
            'shapedirs',
            torch.cuda.FloatTensor(
                np.array(star_model['shapedirs'][:, :, :num_betas])))
        # Mesh traingles
        self.register_buffer(
            'faces', torch.from_numpy(star_model['f'].astype(np.int64)))
        self.f = star_model['f']
        # Kinematic tree of the model
        self.register_buffer(
            'kintree_table',
            torch.from_numpy(star_model['kintree_table'].astype(np.int64)))

        id_to_col = {
            self.kintree_table[1, i].item(): i
            for i in range(self.kintree_table.shape[1])
        }
        self.register_buffer(
            'parent',
            torch.LongTensor([
                id_to_col[self.kintree_table[0, it].item()]
                for it in range(1, self.kintree_table.shape[1])
            ]))

        self.verts = None
        self.J = None
        self.R = None
#         top5 = AverageMeter()
#
#         # load the best checkpoint
#         self.load_checkpoint(best=self.best)
#         self.model.eval()
#         for i, (images, labels) in enumerate(self.test_loader):
#             if self.use_gpu:
#                 images, labels = images.cuda(), labels.cuda()
#             images, labels = Variable(images), Variable(labels)
#
#             # forward pass
#             outputs = self.model(images)
#             loss = self.loss_fn(outputs, labels)
#
#             # measure accuracy and record loss
#             prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
#             losses.update(loss.item(), images.size()[0])
#             top1.update(prec1.item(), images.size()[0])
#             top5.update(prec5.item(), images.size()[0])
#
#         print(
#             '[*] Test loss: {:.3f}, top1_acc: {:.3f}%, top5_acc: {:.3f}%'.format(
#                 losses.avg, top1.avg, top5.avg)
#         )

import torch
import numpy
x = torch.from_numpy(numpy.array([[1, 2], [3, 4]]))

y = 1 * x + 2 * x[0, :]
print(y)
def plot(model, likelihood):  
    model.eval()
    likelihood.eval()
    with torch.no_grad(): 
      model = model.double()    
      observed_pred = likelihood(model(test_x))
      # Plot:
      f, ax = plt.subplots(1, 1, figsize=(6, 5))
      pred_labels = observed_pred.mean.view(81, 81) 
      ax.scatter(x1_torch[0, :], x1_torch[1, :], marker='o')
      ax.scatter(x2_torch[0, :], x2_torch[1, :], marker='+')
      ax.contour(test_x1, test_x2, pred_labels.numpy()) 
      ax.contour(test_x1, test_x2, pred_labels.numpy(), [0.5], colors=['red'])

# Converting numpy data to torch:
train_x = torch.from_numpy(x)
train_x = train_x.double()
train_y = torch.from_numpy(y)
train_y = train_y.double()
test_x = torch.from_numpy(tgrid) 
test_x1 = torch.from_numpy(t1)
test_x2 = torch.from_numpy(t2)
x1_torch = torch.from_numpy(x1)
x2_torch = torch.from_numpy(x2)

# Manual parameters: [No training]
model = GPClassificationModel(train_x)
init_lengthscale = 0.5
init_sigmaf_sq = 10.0
model.covar_module.base_kernel.lengthscale = init_lengthscale
model.covar_module.outputscale = init_sigmaf_sq
 def binary_loss(y_pred, y):
     y_pred = torch.from_numpy(y_pred)
     logits = (y * y_pred.clamp(1e-12).log() + (1 - y) *
               (1 - y_pred).clamp(1e-12).log()).mean()
     return -logits
Example #48
0
def load_fc(buf, start, fc_model):
    num_w = fc_model.weight.numel()
    num_b = fc_model.bias.numel()
    fc_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b]));     start = start + num_b
    fc_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]));   start = start + num_w 
    return start
Example #49
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='MNIST')
    parser.add_argument('--batch-size',
                        type=int,
                        default=16,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 50)'
                        )  # train itself 9221, test 3767
    parser.add_argument('--epochs',
                        type=int,
                        default=60,
                        metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate (default: 0.005)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.3,
                        metavar='M',
                        help='SGD momentum (default: 0.005)')
    parser.add_argument('--weight_decay',
                        type=float,
                        default=0,
                        metavar='M',
                        help='SGD momentum (default: 0.0005)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=2,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda:0" if use_cuda else "cpu")
    kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}

    #=================================Load Data=================================
    X_train, y_train = read_dataset('MNIST_X_train.csv', 'MNIST_y_train.csv')
    X_test, y_test = read_dataset('MNIST_X_test.csv', 'MNIST_y_test.csv')
    X_train, X_test = normalize_features(X_train, X_test)

    print('Trian:', X_train.shape)
    print('Test:', X_test.shape)
    print(y_train.shape)
    print(y_test.shape)

    #==================================Pack Data================================
    train_data = torch.from_numpy(X_train).float()
    test_data = torch.from_numpy(X_test).float()

    trainset = torch.utils.data.TensorDataset(
        train_data, torch.from_numpy(y_train.ravel()))
    testset = torch.utils.data.TensorDataset(test_data,
                                             torch.from_numpy(y_test.ravel()))

    # Define data loader
    train_loader = torch.utils.data.DataLoader(dataset=trainset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(dataset=testset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)

    #=================================Design Net================================
    C_in = 1
    H1 = 4
    H2 = 256
    K1 = (3, 3)
    P1 = 2
    H0 = H1 * 13 * 13
    D_out = 10
    model = CNNLayerNet(C_in, H0, H1, H2, K1, P1, D_out).to(device)

    # optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           eps=1e-08,
                           weight_decay=args.weight_decay,
                           amsgrad=False)
    lr_adjust = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=20,
                                          gamma=0.5,
                                          last_epoch=-1)

    for epoch in range(1, args.epochs + 1):
        lr_adjust.step()
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, epoch, test_loader)

    if (args.save_model):
        torch.save(model.state_dict(), "mnist_cnn.pt")
Example #50
0
    def __getitem__(self, index):

        #---------
        #  Image
        #---------

        img_path = self.img_files[index % len(self.img_files)].rstrip()
        img = np.array(Image.open(img_path))

        # Handles images with less than three channels
        while len(img.shape) != 3:
            index += 1
            img_path = self.img_files[index % len(self.img_files)].rstrip()
            img = np.array(Image.open(img_path))

        h, w, _ = img.shape
        dim_diff = np.abs(h - w)
        # Upper (left) and lower (right) padding
        pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
        # Determine padding
        pad = ((pad1, pad2), (0, 0),
               (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0))
        # Add padding
        input_img = np.pad(img, pad, 'constant', constant_values=128) / 255.
        padded_h, padded_w, _ = input_img.shape
        # Resize and normalize
        input_img = resize(input_img, (*self.img_shape, 3), mode='reflect')
        # Channels-first
        input_img = np.transpose(input_img, (2, 0, 1))
        # As pytorch tensor
        input_img = torch.from_numpy(input_img).float()

        #---------
        #  Label
        #---------

        label_path = self.label_files[index % len(self.img_files)].rstrip()

        labels = None
        if os.path.exists(label_path):
            labels = np.loadtxt(label_path).reshape(-1, 5)
            # Extract coordinates for unpadded + unscaled image
            x1 = w * (labels[:, 1] - labels[:, 3] / 2)
            y1 = h * (labels[:, 2] - labels[:, 4] / 2)
            x2 = w * (labels[:, 1] + labels[:, 3] / 2)
            y2 = h * (labels[:, 2] + labels[:, 4] / 2)
            # Adjust for added padding
            x1 += pad[1][0]
            y1 += pad[0][0]
            x2 += pad[1][0]
            y2 += pad[0][0]
            # Calculate ratios from coordinates
            labels[:, 1] = ((x1 + x2) / 2) / padded_w
            labels[:, 2] = ((y1 + y2) / 2) / padded_h
            labels[:, 3] *= w / padded_w
            labels[:, 4] *= h / padded_h
        # Fill matrix
        filled_labels = np.zeros((self.max_objects, 5))
        if labels is not None:
            filled_labels[range(
                len(labels))[:self.max_objects]] = labels[:self.max_objects]
        filled_labels = torch.from_numpy(filled_labels)

        return img_path, input_img, filled_labels
Example #51
0
     "tau": args.tau,
     "n": args.n,
     "length": args.length,
     "device": device,
 }
 if model.is_attn:
     context_voc = Vocabulary()
     context_voc.load(model.params["context_voc"])
     to_input["context_voc"] = context_voc
 if model.params["use_ch"]:
     ch_voc = Vocabulary()
     ch_voc.load(model.params["ch_voc"])
     to_input["ch_voc"] = ch_voc
 for i in tqdm(range(len(dataset)), file=logfile):
     if model.is_w2v:
         to_input["input"] = torch.from_numpy(dataset.input_vectors[i])
     if model.is_ada:
         to_input["input"] = torch.from_numpy(
             dataset.input_adaptive_vectors[i])
     if model.is_attn:
         to_input["word"] = dataset.data[i][0][0]
         to_input["context"] = " ".join(dataset.data[i][2])
     if model.params["use_ch"]:
         to_input["CH_word"] = dataset.data[i][0][0]
     if model.params["use_seed"]:
         to_input["prefix"] = dataset.data[i][0][0]
     else:
         to_input["prefix"] = BOS
     defsave.write("Word: {0}\nContext: {1}\n".format(
         dataset.data[i][0][0], " ".join(dataset.data[i][2])))
     defsave.write(generate(**to_input) + "\n")
def test(args, shared_model):
    log = {}
    logger = setup_logger("test_log", "./logs/test_log")

    torch.manual_seed(args.seed)
    env = Tetris(50)

    model = agentNET()
    model.eval()

    test_time = 0
    reward_num = 0
    clean_sum = 0
    max_reward = -1

    while (1):
        model.load_state_dict(shared_model.state_dict())
        if args.gpu:
            model = model.cuda()
            cx = Variable(torch.zeros(1, 78).cuda())
            hx = Variable(torch.zeros(1, 78).cuda())
        else:
            cx = Variable(torch.zeros(1, 78))
            hx = Variable(torch.zeros(1, 78))

        state = env.reset()  #50 100 3
        state = torch.from_numpy(state).float()

        while (1):
            if args.gpu:
                value, logit, (hx, cx) = model(
                    (Variable(state.unsqueeze(0)).cuda(), (hx, cx)))
            else:
                value, logit, (hx, cx) = model(
                    (Variable(state.unsqueeze(0)), (hx, cx)))

            prob = F.softmax(logit)
            if args.gpu:
                action = prob.max(1)[1].data.cpu()
            else:
                action = prob.max(1)[1].data

            state, done, reward, clean = env.step(action.numpy()[0])
            state = torch.from_numpy(state).float()
            reward_num += reward
            clean_sum += clean.get('1', -1000)

            if done:
                #print('dead', test_time)
                test_time += 1
                break

        if test_time % 50 == 0:
            if reward_num > max_reward:
                if args.gpu:
                    model = model.cpu()
                state_to_save = model.state_dict()
                torch.save(state_to_save, "./tetris.dat")
                logger.info('save')
                max_reward = reward_num
                if args.gpu:
                    model = model.cuda()
            logger.info('reward = ' + str(reward_num / test_time))
            logger.info('cleaned = ' + str(clean_sum / test_time))
            test_time = 0
            reward_num = 0
            clean_sum = 0
Example #53
0
        # implement depthwise horizontal layer as a grouped convolution, preserve the stride of the original convolution
        depthwise_horizontal_layer = \
               torch.nn.Conv2d(in_channels=horizontal.shape[1], \
                   out_channels=horizontal.shape[1],
                   kernel_size=(1, horizontal.shape[0]), stride=layer.stride,
                   padding=(0, layer.padding[0]),
                   dilation=layer.dilation, groups=horizontal.shape[1], bias=False)

        # iplement pointwise layer as conv2d with 1x1 kernel also
        pointwise_r_to_t_layer = torch.nn.Conv2d(in_channels=last.shape[1], \
                   out_channels=last.shape[0], kernel_size=1, stride=1,
                   padding=0, dilation=layer.dilation, bias=False)

        # convert if using GPU
        if USE_GPU:
            horizontal = torch.from_numpy(horizontal).type(
                'torch.FloatTensor').cuda()
            vertical = torch.from_numpy(vertical).type(
                'torch.FloatTensor').cuda()
            first = torch.from_numpy(first).type('torch.FloatTensor').cuda()
            last = torch.from_numpy(last).type('torch.FloatTensor').cuda()
        else:
            horizontal = torch.from_numpy(horizontal).type('torch.FloatTensor')
            vertical = torch.from_numpy(vertical).type('torch.FloatTensor')
            first = torch.from_numpy(first).type('torch.FloatTensor')
            last = torch.from_numpy(last).type('torch.FloatTensor')

        # initialize weights of the layers constructed with the tensors obtained from the CP decomposition
        depthwise_horizontal_layer.weight.data = \
               torch.transpose(horizontal, 1, 0).unsqueeze(1).unsqueeze(1)
        depthwise_vertical_layer.weight.data = \
               torch.transpose(vertical, 1, 0).unsqueeze(1).unsqueeze(-1)
Example #54
0
TRAIN_X = VECTORIZER.transform(TRAIN_X_RAW).todense()
for index, row in enumerate(TRAIN_X):
    if np.sum(row) < 1e-2:
        TRAIN_X[index, :] = np.ones((1, VOCABULARY_SIZE),dtype=np.float32)
TRAIN_X = TRAIN_X / TRAIN_X.sum(axis=1)
TRAIN_X = np.matmul(EMBEDDINGS, TRAIN_X.T)

DEV_X = VECTORIZER.transform(DEV_X_RAW).T
DEV_X = DEV_X / DEV_X.sum(axis=0)
DEV_X = np.matmul(EMBEDDINGS, DEV_X)

TEST_X = VECTORIZER.transform(TEST_X_RAW).T
TEST_X = TEST_X / TEST_X.sum(axis=0)
TEST_X = np.matmul(EMBEDDINGS, TEST_X)

TRAIN_DATA = data_utils.TensorDataset(torch.from_numpy(TRAIN_X.T.astype(np.float32)), torch.from_numpy(TRAIN_Y))
TRAIN_LOADER = data_utils.DataLoader(TRAIN_DATA, batch_size=173,
                                          shuffle=True, num_workers=2, drop_last=True)
DEV_DATA = data_utils.TensorDataset(torch.from_numpy(DEV_X.T.astype(np.float32)), torch.from_numpy(DEV_Y))
DEV_LOADER = data_utils.DataLoader(DEV_DATA, batch_size=1,
                                          shuffle=False, num_workers=2)
TEST_DATA = data_utils.TensorDataset(torch.from_numpy(TEST_X.T.astype(np.float32)), torch.from_numpy(TEST_Y))
TEST_LOADER = data_utils.DataLoader(TEST_DATA, batch_size=1,
                                          shuffle=False, num_workers=2)
##################################################################
# Define the network
##################################################################
class SentimentNet(nn.Module):
    def __init__(self, hidden_dim):
        super(SentimentNet, self).__init__()
        self.linear1 = nn.Linear(EMBEDDING_SIZE, hidden_dim)
Example #55
0
                     'crazing', 'inclusion', 'patches',
                     'pitted_surface', 'rolled-in_scale', 'scratches')
    fasterRCNN = vgg16(classes, pretrained=False, class_agnostic=False)
    fasterRCNN.create_architecture()
    fasterRCNN.cuda()
    fasterRCNN.eval()

    im_data = torch.FloatTensor(1).to(device)
    im_info = torch.FloatTensor(1).to(device)
    num_boxes = torch.LongTensor(1).to(device)
    gt_boxes = torch.FloatTensor(1).to(device)

    input = np.random.random((1,args.img_size, args.img_size,3))
    im_blob = input
    im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], 1.5]], dtype=np.float32)

    im_data_pt = torch.from_numpy(im_blob)
    im_data_pt = im_data_pt.permute(0, 3, 1, 2)
    im_info_pt = torch.from_numpy(im_info_np)
    # summary(net1, input_size=(3,300,300))
    im_data.resize_(im_data_pt.size()).copy_(im_data_pt)
    im_info.resize_(im_info_pt.size()).copy_(im_info_pt)
    gt_boxes.resize_(1, 1, 5).zero_()
    num_boxes.resize_(1).zero_()

    total = sum([param.nelement() for param in fasterRCNN.parameters()])

    print('  + fasterRCNN Number of params: %.2fM' % (total / 1e6))


    print_model_parm_flops(fasterRCNN,im_data,im_info,gt_boxes,num_boxes)
Example #56
0
def id_to_torch(speaker_id):
    if speaker_id is not None:
        speaker_id = np.asarray(speaker_id)
        speaker_id = torch.from_numpy(speaker_id).unsqueeze(0)
    return speaker_id
Example #57
0
def compute_hausdorff_distance(
    y_pred: Union[np.ndarray, torch.Tensor],
    y: Union[np.ndarray, torch.Tensor],
    include_background: bool = False,
    distance_metric: str = "euclidean",
    percentile: Optional[float] = None,
    directed: bool = False,
):
    """
    Compute the Hausdorff distance.

    Args:
        y_pred: input data to compute, typical segmentation model output.
            It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
            should be binarized.
        y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.
            The values should be binarized.
        include_background: whether to skip distance computation on the first channel of
            the predicted output. Defaults to ``False``.
        distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
            the metric used to compute surface distance. Defaults to ``"euclidean"``.
        percentile: an optional float number between 0 and 100. If specified, the corresponding
            percentile of the Hausdorff Distance rather than the maximum result will be achieved.
            Defaults to ``None``.
        directed: whether to calculate directed Hausdorff distance. Defaults to ``False``.
    """

    if not include_background:
        y_pred, y = ignore_background(y_pred=y_pred, y=y)
    if isinstance(y, torch.Tensor):
        y = y.float()
    if isinstance(y_pred, torch.Tensor):
        y_pred = y_pred.float()

    if y.shape != y_pred.shape:
        raise ValueError(
            f"y_pred and y should have same shapes, got {y_pred.shape} and {y.shape}."
        )

    batch_size, n_class = y_pred.shape[:2]
    hd = np.empty((batch_size, n_class))
    for b, c in np.ndindex(batch_size, n_class):
        (edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])
        if not np.any(edges_gt):
            warnings.warn(
                f"the ground truth of class {c} is all 0, this may result in nan/inf distance."
            )
        if not np.any(edges_pred):
            warnings.warn(
                f"the prediction of class {c} is all 0, this may result in nan/inf distance."
            )

        distance_1 = compute_percent_hausdorff_distance(
            edges_pred, edges_gt, distance_metric, percentile)
        if directed:
            hd[b, c] = distance_1
        else:
            distance_2 = compute_percent_hausdorff_distance(
                edges_gt, edges_pred, distance_metric, percentile)
            hd[b, c] = max(distance_1, distance_2)
    return torch.from_numpy(hd)
Example #58
0
 def forward(self, states):
     if not isinstance(states, torch.Tensor):
         states = torch.from_numpy(states).float().to(
             tu.global_device())
     dist = super().forward(states)
     return torch.softmax(dist, dim=-1)
Example #59
0
        if SAVE_MODEL and not LOAD_MODEL:
            torch.save(
                {
                    "epoch": epoch_i,
                    "hyper": hyper,
                    "loss": (l_mem_mean, l_mem_var),
                    "state_dict": diff_net.state_dict()
                }, f"./models/{filename}_loss_{i:01d}.torch")

        print("\n################################################")
        print("Evaluating Performance:\n")
        t0_batch = time.perf_counter()

        with torch.no_grad():
            # Convert NumPy samples to torch:
            x_torch = torch.from_numpy(x_test).float().to(diff_net.device)
            y_hat, dydx_hat = diff_net(x_torch)
            y_hat = y_hat.cpu().numpy()
            dydx_hat = dydx_hat.transpose(dim0=1,
                                          dim1=2).cpu().numpy().squeeze()

        t_batch = (time.perf_counter() - t0_batch) / (float(x_test.shape[0]))

        # Compute Errors:
        err_y = 1. / float(x_test.shape[0]) * np.sum((y_hat - y_test)**2)
        err_dydx = 1. / float(x_test.shape[0]) * np.sum(
            (dydx_hat - dydx_test)**2)
        print(f"Performance:")
        print(f"       y MSE = {err_y:.3e}")
        print(f"   dy/dx MSE = {err_dydx:.3e}")
Example #60
0
        iter = iter + 1

        if (loss.data[0] < best_loss):
            print('saving')
            best_loss = loss.data[0]
            torch.save(model, 'noise2.pkl')
        #Evalute model performance on test set
        if (epoch % 5 == 0):
            t = 0
            for a, b in train_test_image_folder:
                x = np.asarray(
                    a)  #There is some proble in PIL to tensor conversion
                x = x[:, :, :1].reshape(
                    1, 1, 180, 180
                )  #Greyscale conversion and need approriate dim of (x,1,180,180)
                #for model to work

                test_img = torch.from_numpy(x / 255.0).float().cuda()
                std = np.random.uniform(20, 30)
                nse = torch.FloatTensor(test_img.size()).normal_(mean=0,
                                                                 std=std /
                                                                 255.0).cuda()
                #torch.sum(test_img**2).cpu().item()
                nssy_img = test_img + nse
                out = model(nssy_img)
                est_image = nssy_img - out

                print("PSNR of test image" + str(t) + " is " +
                      str(psnr(est_image, test_img).cpu().item()))
                t = t + 1