Exemple #1
0
def test(data_loader, Hnet, Rnet):
    print_log("---------- test begin ---------", opt.test_log)
    Hnet.eval()
    Rnet.eval()
    for i, data in enumerate(data_loader):
        all_pics = data
        this_batch_size = int(all_pics.size()[0] / 2)

        cover_img = all_pics[0:this_batch_size, :, :, :]
        secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
        concat_img = torch.cat([cover_img, secret_img], dim=1)

        if torch.cuda.is_available():
            cover_img = cover_img.cuda()
            secret_img = secret_img.cuda()
            concat_img = concat_img.cuda()

        concat_imgv = Variable(concat_img, requires_grad=False)
        cover_imgv = Variable(cover_img, requires_grad=False)
        secret_imgv = Variable(secret_img, requires_grad=False)

        stego = Hnet(concat_imgv)
        secret_rev = Rnet(stego)

        errH = loss(stego, cover_imgv)  # loss between cover and container
        errR = loss(secret_rev,
                    secret_imgv)  # loss between secret and revealed secret
        err_sum = errH + opt.beta * errR

        save_pic('test', cover_img, stego, secret_img, secret_rev,
                 opt.test_pics, 2, i)
        test_log = '%d: loss is %.6f' % (i, err_sum.item()) + '\n'
        print_log(test_log, opt.test_log)

    print_log("---------- test end ----------", opt.test_log)
    def validation(self, val_x, val_y):
        """
        Validation function using for internal testing of the
        model training.
        :param val_x: validation data set.
        :param val_y: validation correlated classes.
        :return: avg_loss, the average loss of the validation set
                 acc, the accuracy of the validation set (success/validation set length).
        """
        loss_sum = 0
        success = 0

        for x, y in zip(val_x, val_y):
            x = np.reshape(x, (1, x.shape[0]))
            z1, h1, z2 = self.feedforward(x)
            probs = utils.softmax(self.weights2, h1, self.bias2, CLASSES)
            loss_sum += utils.loss(probs[int(y)])
            y_hat = utils.softmax(self.weights2, h1, self.bias2,
                                  CLASSES).argmax(axis=0)

            # the model guess is correct.
            if y == y_hat[0]:
                success += 1
        avg_loss = loss_sum / len(val_y)
        acc = success / len(val_y)
        return avg_loss, acc
Exemple #3
0
def train_step(tokens, length, training, dropout):
    inputs = tokens[:, :-1]
    encoded = encoder(inputs=inputs,
                      length=length,
                      dropout=dropout,
                      attention_dropout=dropout,
                      use_2d=use_2d)
    logits = decoder(inputs=inputs,
                     encoded=encoded,
                     dropout=dropout,
                     attention_dropout=dropout,
                     use_2d=use_2d,
                     encoded_length=encoded_length)
    ln = utils.get_tensor_shape(tokens)[1]
    mask = tf.sequence_mask(length + 1, ln)
    loss = utils.loss(tokens, logits, mask, use_2d=use_2d)

    def true_fn():
        weights = get_weights()
        grads = tf.gradients(loss, weights)
        opt = optimizer.apply_gradients(zip(grads, weights))
        with tf.control_dependencies([opt]):
            opt = tf.zeros([], tf.bool)
        return opt

    def false_fn():
        opt = tf.zeros([], tf.bool)
        return opt

    opt = tf.cond(training, true_fn, false_fn)
    return loss, opt
Exemple #4
0
def main(args):
    # run the model forward with some initial conditions
    # these are the labels.
    f, g = get_initial_conditions(args)
    u = run_heat_forward(f, args)

    # creat new initial condtions
    f_est, g_est = get_initial_conditions(args)

    for _ in range(args.num_inv_steps):
        u_est = run_heat_forward(f_est, args)

        # compute the error and gradients
        err = ((u_est - u.data)**2).sum()
        f_est.register_hook(save_grad('f_est'))
        err.backward()
        f_est = f_est - args.lr * grads['f_est']
        print('u_T error: {:.4f}'.format(err))
        f_err = loss(f_est, f)  #(((f-f_est)**2).sum()
        print('f_err: {:.4f}'.format(f_err))

    if args.inverse_debug:
        img, fig = plot_image(f, title='True Initial Condition (f)')
        plt.figure(1)
        img, fig = plot_image(f_est,
                              title='Estimated Initial Condition (f_est)')
        plt.figure(2)
        plt.show()
    return f, f_est
    def train(self, x_set, y_set):
        """
        Train function, training the model by splitting first the
        train dataset to train and validation, for each epoch we use
        shuffle for the original dataset and split it again. at the end
        of each epoch we use validation function to check accuracy and
        average loss for the specific epoch.
        :param x_set: the complete training dataset.
        :param y_set: the correlated classes.
        """
        loss_sum = 0
        for i in range(EPOCHS):
            x_set, y_set = utils.shuffle(x_set, y_set)
            train_x, train_y, val_x, val_y = utils.split_validation(
                x_set, y_set, VALIDATION_SIZE)
            train_x, train_y = utils.shuffle(train_x, train_y)

            # running of each example from the train dataset.
            for x, y in zip(train_x, train_y):
                x = np.reshape(x, (1, x.shape[0]))
                z1, h1, z2 = self.feedforward(x)
                probs = utils.softmax(self.weights2, h1, self.bias2, CLASSES)
                loss = utils.loss(probs[int(y)])
                loss_sum += loss
                self.backprop(x, y, z1, h1, z2, probs)
            val_loss, acc = self.validation(val_x, val_y)
Exemple #6
0
def testStep(patchLR, patchHR, maskHR, model, optimizer, loss, metric,
             testLoss, testPSNR):
    predPatchHR = checkpoint.model(patchLR, training=False)
    loss = loss(patchHR, maskHR, predPatchHR)
    metric = metric(patchHR, maskHR, predPatchHR)

    testLoss(loss)
    testPSNR(metric)
def cross_validation(classifier, train_data, train_target, n_folds=5):
    """
    Cross validation function.

    :param classifier:
    :param train_data:
    :param train_target:
    :param n_folds:
    :return:
    """
    cv_param_names = list(classifier.grid_param.keys())
    list_comb = [classifier.grid_param[name] for name in cv_param_names]

    # # Cross validation
    # Init the CV criteria
    best_cv_criteria = np.inf
    kf = KFold(n_splits=n_folds, shuffle=True)

    for current_comb in itertools.product(*list_comb):
        L = []
        clf_list = []

        for train_index, test_index in kf.split(train_data):
            param = {
                cv_param_names[i]: current_comb[i]
                for i in range(len(cv_param_names))
            }

            train_data_fold = train_data[train_index]
            train_target_fold = train_target[train_index]

            classifier(parameters=param)
            classifier.fit(train_data=train_data_fold,
                           train_target=train_target_fold)

            test_fold = train_data[test_index]
            pred = classifier.predict(test_data=test_fold)

            clf_param = classifier.get_params()
            clf_list.append(clf_param)

            test_fold_target = train_target[test_index]
            L.append(
                loss(real_targets=test_fold_target, predicted_targets=pred))

        # L = np.array(L, dtype=np.float)
        current_cv_criteria = np.mean(L)

        if current_cv_criteria < best_cv_criteria:
            position = L.index(min(L))
            best_clf_param = clf_list[position]
            best_cv_criteria = current_cv_criteria

    logger.debug('Loss: %f; Cross validated parameters: %s', best_cv_criteria,
                 best_clf_param)
    classifier(parameters=best_clf_param)
    classifier.fit(train_data=train_data, train_target=train_target)
Exemple #8
0
def test_okutama(data_loader, model, device, epoch):
    model.eval()
    
    actions_meter=AverageMeter()
    # activities_meter=AverageMeter()
    loss_meter=AverageMeter()
    num_boxes = 12
    B = 2
    T = 5
    epoch_timer=Timer()
    with torch.no_grad():
        for batch_data in data_loader:
            # prepare batch data
            batch_data=[b.to(device=device) for b in batch_data]
            batch_size=batch_data[0].shape[0]
            num_frames=batch_data[0].shape[1]
            
            actions_in=batch_data[2].reshape((batch_size,num_frames, num_boxes))
            # activities_in=batch_data[3].reshape((batch_size,num_frames))
            bboxes_num=batch_data[3].reshape(batch_size,num_frames)

            # forward
            actions_scores=model((batch_data[0],batch_data[1],batch_data[3]))
            actions_scores = torch.reshape(actions_scores, (B*T,num_boxes)).to(device=device)
            actions_in_nopad=[]
            

            actions_in=actions_in.reshape((batch_size*num_frames,num_boxes,))
            bboxes_num=bboxes_num.reshape(batch_size*num_frames,)
            for bt in range(batch_size*num_frames):
                N=bboxes_num[bt]
                actions_in_nopad.append(actions_in[bt,:N])

            loss = nn.MultiLabelMarginLoss()
            actions_loss = loss(actions_scores, actions_in)
            actions_loss = Variable(actions_loss, requires_grad = True)


            actions_correct=torch.sum(torch.eq(actions_scores.int(),actions_in.int()).float())

            
            # Get accuracy
            actions_accuracy=actions_correct.item()/(actions_scores.shape[0] * num_boxes)

            actions_meter.update(actions_accuracy, actions_scores.shape[0])

            # Total lossloss_meter.update(actions_loss.item(), batch_size)

    test_info={
        'time':epoch_timer.timeit(),
        'epoch':epoch,
        'loss':loss_meter.avg,
        'actions_acc':actions_meter.avg*100
    }

    return test_info
Exemple #9
0
    def __init__(self):
        self.img_path = './data/images'
        self.anno_path = './data/annotations'
        self.ft_path = './feature_maps/'
        self.model_path = './checkpoint/'
        self.model_name = 'segmentation.ckpt-285'
        self.model = os.path.join(self.model_path, self.model_name)

        # Parameters
        self.depth = 7
        self.classes = 1
        self.img_size = 32

        # Placeholders
        self.x = tf.placeholder(tf.float32,
                                shape=[None, None, None, self.depth],
                                name='input')
        self.y_true = tf.placeholder(tf.float32,
                                     shape=[None, None, None, self.classes],
                                     name='y_true')
        self.rate = tf.placeholder(tf.float32, name='dropout_rate')
        self.is_training = tf.placeholder(tf.bool, shape=())

        # Build network
        self.y01 = cvmodel.build_model(input=self.x,
                                       drop_rate=0,
                                       is_training=False)

        # Calculate loss + f1
        self.cost_reg, self.f1_vec, self.recall, \
        self.precision, self.specificity, self.accuracy = utils.loss(logits=[self.y01],
                                                      labels=self.y_true,
                                                      classes_weights=[2.])
        # Open session and restore model
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver()
        self.saver.restore(self.sess, self.model)

        # Load data
        self.img_names = utils.load_train(path=self.img_path)
        self.anno_names = utils.load_train(path=self.anno_path)
        self.imgs_ = utils.get_image_array(self.img_names, self.img_size)
        self.annos_ = utils.get_annotation_array(self.anno_names,
                                                 self.img_size)
        n = self.imgs_.shape[0]

        print('\nNumber of images:', n)
        # Get number of trainable variables
        v_nb = np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ])
        print('Number of trainable variables:', v_nb)
Exemple #10
0
def train_epoch(train_loader, models, optimizer, logr, epoch):
    model, bprox, mdn = models
    model = model.train()
    block_average_meter = utils.AverageMeter()
    average_meter = utils.AverageMeter()

    for i, batch_data in enumerate(train_loader):
        batch_data = {
            key: val.to(device)
            for key, val in batch_data.items() if val is not None
        }

        # monocular depth estimation in range of (2,1000) as in paper
        # scale to be in depth range of the appropriate dataset
        # bilateral proxy was also trained to take images in range of (2,1000)
        # and output images in range of (2, 1000)
        if args.dataset == 'kitti':
            mdi = torch.clamp(
                F.interpolate(utils.DepthNorm(mdn(batch_data['rgb'])),
                              scale_factor=2), 2, 1000) / 1000 * 85
            inpainted = bprox(
                torch.cat([mdi / 85 * 1000, batch_data['d'] / 85 * 1000],
                          dim=1)) / 1000 * 85
        elif args.dataset == 'nyu_v2':
            mdi = torch.clamp(utils.DepthNorm(mdn(batch_data['rgb'])), 10,
                              1000) / 1000 * 10
            inpainted = bprox(
                torch.cat([mdi / 10 * 1000, batch_data['d'] / 10 * 1000],
                          dim=1)) / 1000 * 10
        else:
            print("invalid dataset")
            exit()

        batch_data['bproxi'] = inpainted
        batch_data['mdi'] = mdi

        pred = model(batch_data)
        loss = utils.loss(pred, batch_data, args.dataset)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        writer.add_scalar('data/train_loss',
                          loss.cpu().data.numpy(),
                          epoch * len(train_loader) + i)
        result = utils.Result()
        result.evaluate(pred, batch_data['gt'])
        block_average_meter.update(result)
        average_meter.update(result)
        if (i + 1) % 20 == 0:
            logr.print(i, epoch, args.lr, len(train_loader),
                       block_average_meter, average_meter)
Exemple #11
0
    def test_loss(self):
        with self.assertRaises(AssertionError):
            loss([], [], L=0)

        with self.assertRaises(AssertionError):
            loss([], [], L=3)

        size = 10
        y = np.random.randint(2, size=size)
        yhat = np.copy(y)

        self.assertEqual(loss(yhat, y, L=1), 0)
        self.assertEqual(loss(yhat, y, L=2), 0)

        yhat = (y == 0).astype(int)
        self.assertEqual(loss(yhat, y, L=1), size)
        self.assertEqual(loss(yhat, y, L=2), size)

        y = np.array([1, 0, 0, 1, 1])
        yhat = np.array([.9, 0.2, 0.1, .4, .9])

        self.assertEqual(loss(yhat, y, L=1), 1.1)
        self.assertEqual(loss(yhat, y, L=2), 0.43)
Exemple #12
0
def trainStep(patchLR, patchHR, maskHR, model, optimizer, loss, metric,
              trainLoss, trainPSNR):
    with tf.GradientTape() as tape:

        predPatchHR = model(patchLR, training=True)
        loss = loss(
            patchHR, maskHR, predPatchHR
        )  # Loss(patchHR: tf.Tensor, maskHR: tf.Tensor, predPatchHR: tf.Tensor)

    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    metric = metric(patchHR, maskHR, predPatchHR)
    trainLoss(loss)
    trainPSNR(metric)
Exemple #13
0
def forward_pass(x, c, theta, layers_count, regularization=0):
    sample_size, sample_count = x.shape
    labels_count = c.shape[0]
    xs = np.zeros((sample_size, sample_count, layers_count + 1))
    xs[:, :, 0] = x
    for k in range(1, layers_count + 1):
        w1, b = get_net_weights(theta, k - 1, sample_size)
        xs[:, :, k] = net(xs[:, :, k - 1], w1, b)
    theta_layer_size = sample_size + 1 * (sample_size**2)
    loss_weights_idx = layers_count * theta_layer_size
    b = theta[loss_weights_idx:loss_weights_idx + labels_count]
    w = theta[loss_weights_idx + labels_count:loss_weights_idx + labels_count +
              sample_size * labels_count].reshape(labels_count, sample_size).T
    loss_val = loss(xs[:, :, layers_count], c, w, b, regularization)
    probabilities = softmax(xs[:, :, layers_count], w, b)
    return probabilities, loss_val, xs
Exemple #14
0
def main():
    qanet = QANet(50)
    init1 = filter(lambda p: p.requires_grad and p.dim() >= 2,
                   qanet.parameters())
    init2 = filter(lambda p: p.requires_grad and p.dim() <= 2,
                   qanet.parameters())
    # Parameter initialization
    for param in init1:
        nn.init.xavier_uniform_(param)
    for param in init2:
        nn.init.normal_(param)

    train = SQuAD(TRAIN_JSON)
    val = SQuAD(DEV_JSON)

    # trainSet = DataLoader(dataset=train, batch_size=4, shuffle=True, collate_fn=collate)
    valSet = DataLoader(dataset=val,
                        batch_size=4,
                        shuffle=True,
                        collate_fn=collate)
    trainSet = DataLoader(dataset=train,
                          batch_size=4,
                          shuffle=True,
                          collate_fn=collate)

    print('length of dataloader', len(trainSet))

    optimizer = torch.optim.Adam(qanet.parameters(), lr=LEARNING_RATE)
    loss_list = []
    for epoch in range(10):
        print('epoch ', epoch)
        for i, (c, q, a) in enumerate(trainSet):
            y_pred = qanet(c, q)
            loss = utils.loss(y_pred, a)
            loss_list.append(loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i % 200 == 0:
                print('loss ', loss.item())
        with open('your_file.txt', 'w') as f:
            for item in loss_list:
                f.write("%s\n" % item)
            print('loss file written.')
        torch.save(qanet, 'qanet')
        print('model saved.')
def test_batch_with_labels(net, file, resize = False, batch_size = 10, image_size = 384, smooth = 1.0, lam = 1.0):
    '''
    Test on a validation dataset (here we only consider BCE loss instead of focal loss).
    No TTA or ensemble used at this case.
    Parameters:
        @net: the object for network model.
        @file: root directory of the validation dataset.
        @resize: boolean flag for image resize.
        @batch_size: batch size
        @image_size: the size that the image is converted to.
        @smooth: number to be added on denominator and numerator when compute dice loss.
        @lam: weight to balance the dice loss in the final combined loss.
    Return: 
        average loss (BCE + dice) over batches
        F1 score of the test
    '''

    # keep original data
    data_augment = False
    rotate = False
    change_color = False
    test_dataset = utils.MyDataset(file, resize, data_augment, image_size, rotate, change_color)
    dataloader = utils_data.DataLoader(dataset = test_dataset, batch_size = batch_size, shuffle=False)
    epoch_loss = 0.0
    numer = 0.0
    denom = 0.0
    gamma = 0.0
    loss_type = 'bce'
    Loss = utils.loss(smooth, lam, gamma, loss_type)
    for i, batch in enumerate(dataloader):
        print('Test on batch %d'%i)
        image = utils.np_to_var(batch['image'])
        mask = utils.np_to_var(batch['mask'])
        pred = net.forward(image)
        
        loss = Loss.final_loss(pred, mask)
        epoch_loss += loss.data.item() * batch_size
        
        mask = utils.var_to_np(mask)
        pred = utils.var_to_np(pred)
        numer += (mask * (pred > 0.5)).sum()
        denom += mask.sum() + (pred > 0.5).sum()
        
    epoch_loss /= len(test_dataset)
    f1 = 2.0 * numer / denom
    return epoch_loss, f1
Exemple #16
0
def train(net):
    ###read file
    train_dataset = mnistData(conf.root_path, train=True)
    dataloader = torch.utils.data.DataLoader(train_dataset,
                                             batch_size=conf.batch_size,
                                             shuffle=True,
                                             drop_last=True)
    #dataiter = iter(dataloader)

    ###optimizer
    #optimize = optim.SGD(net.parameters(),lr = conf.lr)
    optimize = optim.Adam(net.parameters(), lr=conf.lr)
    if conf.debug:
        for name, parameter in net.named_parameters():
            print name, parameter.shape
        raw_input("wait")
    for epoch in range(conf.epoch_num):
        for i, data in enumerate(dataloader, 0):
            images, labels = data
            if conf.cuda:
                images, labels = images.cuda(), labels.cuda()
            #print labels.type
            images, labels = Variable(images), Variable(labels)
            v = net(images)
            l = utils.loss(labels, v)
            if conf.visualize:
                conf.train_loss_win = visualize_loss(
                    epoch * len(dataloader) + i, l.data.cpu(),
                    conf.train_loss_env, conf.train_loss_win)
            optimize.zero_grad()
            l.backward()
            optimize.step()
            print "step is {},loss is {}".format(i, l.data[0])
        print "epoch is {},loss is {}".format(epoch, l.data[0])
        if epoch != 0 and epoch % 10 == 0:
            torch.save(net.state_dict(), conf.model_name + "_" + str(epoch))
import numpy as np

from test_utils import gradient_test
from utils import gradient_loss_theta
from utils import gradient_loss_x
from utils import loss

if __name__ == "__main__":
    sample_size = 5
    c = np.array([[1], [0], [0]])
    labels_count = c.shape[0]
    sample_count = c.shape[1]

    x = np.random.randn(sample_size, sample_count)
    w = np.random.randn(sample_size, labels_count)
    b = np.random.randn(labels_count, 1)

    dx = np.random.randn(sample_size, sample_count)
    dw = np.random.randn(sample_size, labels_count)
    db = np.random.randn(labels_count, 1)
    dtheta = np.concatenate((db.flatten('F'), dw.flatten('F'))) \
        .reshape(db.shape[0] * db.shape[1] + dw.shape[0] * dw.shape[1], 1)

    gradient_test(lambda e: loss(x + dx * e, c, w, b),
                  lambda e: e * np.matmul(dx.T, gradient_loss_x(x, c, w, b)).item(),
                  'Softmax gradient test w.r.t to x', 'q1_x')
    gradient_test(lambda e: loss(x, c, w + dw * e, b + db * e),
                  lambda e: e * np.matmul(dtheta.T, gradient_loss_theta(x, c, w, b)).item(),
                  'Softmax gradient test w.r.t to w', 'q1_w')
def func(coefs):
    return ut.loss(coefs, y0, knots, splrep, P, u, 0, 0, 0, 0)  
n = len(x)
coef0 = np.ones((K-1)*p + 1)

l1, l2, l3, l4 = [0, 0, 0, 0]

P = ut.get_projecting_matrix(knots)
splrep = ut.get_splrep(X, knots)

# =============================================================================
# Test objective_func and grad_func function 
# =============================================================================

y0 = np.arange(1,8,dx)
u=3.0
coefs = np.concatenate((-0.02*coef0, coef0))
objfunc = ut.loss(coefs, y0, knots, splrep, P, u, 0, 0, 0, 0)
#%%
print("objection function =",objfunc)
gradfunc = np.round(ut.grad_func(coefs, y0, knots, splrep, P, u),2)
print()
print("---- Test grad_func function by comparing it with approx_fprime function -----")
print("grad-func = ", gradfunc)
def func(coefs):
    return ut.loss(coefs, y0, knots, splrep, P, u, 0, 0, 0, 0)  
approx_fprime = np.round(optimize.approx_fprime(coefs, func, epsilon=1e-6),2)
print("approx-fprime = ", approx_fprime)
print("Is grad-func equal to approx-fprime? :", np.array_equal(gradfunc, approx_fprime))
#%%

print()
print("-----Test return_group_to_index--------")
Exemple #20
0
def train():
    source_data, target_data, test_data, word2id = utils.load_data()
    embeddings = utils.load_embeddings(word2id)

    random.seed(1)
    random.shuffle(target_data)

    cv_losses = []
    for k in range(1, 11):
        train_data, dev_data = utils.train_dev_split(target_data, k)
        model_file = FLAGS.model_file + str(k)
        print model_file

        print "训练集1数据大小:%d" % len(source_data)
        print "训练集2数据大小:%d" % len(train_data)
        print "验证集数据大小:%d" % len(dev_data)
        print "embedding大小:(%d,%d)" % (embeddings.shape[0],
                                       embeddings.shape[1])

        model_dir = '../model'
        graph = tf.Graph()
        sess = tf.Session(graph=graph)
        with graph.as_default():
            model = getattr(models, FLAGS.model_name)(embeddings)
            saver = tf.train.Saver(tf.global_variables())
            if FLAGS.restore == 1:
                saver.restore(sess, os.path.join(model_dir, FLAGS.model_file))
                print "Restore from pre-trained model"
            else:
                sess.run(tf.global_variables_initializer())
            print "Train start!"

            best_loss = 1e6
            best_epoch = 0
            not_improved = 0
            for epoch in range(FLAGS.max_epoch):

                print epoch, "================================================"
                train_loss = []
                ground_trues = []
                predicts = []

                for batch_data in utils.minibatches2(source_data,
                                                     train_data,
                                                     FLAGS.batch_size,
                                                     ratio=1,
                                                     mode='train'):
                    loss, predict = model.train_step(sess, batch_data[:3],
                                                     batch_data[3])
                    train_loss.extend(loss)
                    predicts.extend(predict)
                    ground_trues.extend(batch_data[2])
                train_loss = utils.loss(ground_trues, train_loss)
                p, r, f1 = utils.score(ground_trues, predicts)
                print "%d-fold Train epoch %d finished. loss:%.4f  p:%.4f r:%.4f f1:%.4f" % (
                    k, epoch, train_loss, p, r, f1)

                valid_loss = []
                ground_trues = []
                predicts = []
                for batch_data in utils.minibatches(dev_data,
                                                    FLAGS.batch_size,
                                                    mode='dev'):
                    loss, predict = model.valid_step(sess, batch_data, 2)
                    valid_loss.extend(loss)
                    predicts.extend(predict)
                    ground_trues.extend(batch_data[2])
                valid_loss = utils.loss(ground_trues, valid_loss)
                p, r, f1 = utils.score(ground_trues, predicts)
                print "%d-fold,Valid epoch %d finished. loss:%.4f  p:%.4f r:%.4f f1:%.4f" % (
                    k, epoch, valid_loss, p, r, f1)

                if valid_loss < best_loss:
                    best_loss = valid_loss
                    best_epoch = epoch
                    not_improved = 0
                    print "save model!"
                    saver.save(sess, os.path.join(model_dir, model_file))
                else:
                    not_improved += 1
                    if not_improved > 4:
                        print "停止训练!"
                        break
                print
            print "Best epoch %d  best loss %.4f" % (best_epoch, best_loss)
            print "#########################################################"
            cv_losses.append(best_loss)
    print "final cv loss: %.4f" % (sum(cv_losses) / len(cv_losses))
	def __init__(self, n_iterations=1000, activation_function=Sigmoid, loss=SquareLoss, learning_rate=0.01):
		self.n_iter = n_iterations
		self.activ_func = activation_function()
		self.loss = loss()
		self.learning_rate = learning_rate
Exemple #22
0
            for i in range(0, N, BATCH_SIZE):
                view_batch, pos_batch = cameras[i:i + BATCH_SIZE], positions[
                    i:i + BATCH_SIZE]
                x = torch.from_numpy(view_batch[:, 0:5, ...]).to(device)
                p = torch.from_numpy(pos_batch[:, 0:5, ...]).to(device)
                x_q = torch.from_numpy(view_batch[:, -1, ...]).to(device)
                p_q = torch.from_numpy(pos_batch[:, -1, ...]).to(device)
                output_image, priors, posteriors = qgn(x,
                                                       p,
                                                       x_q,
                                                       p_q,
                                                       global_step.float(),
                                                       training=True)

                global_step += 1
                total_loss, model_loss, dist_loss = loss(
                    output_image, x_q, priors, posteriors, NO_FEATURES)
                writer.add_scalar('total loss', total_loss, global_step)
                writer.add_scalar('model loss', model_loss, global_step)
                writer.add_scalar('dist loss', dist_loss, global_step)

                writer.add_image('output_image_train', output_image[0],
                                 global_step)
                writer.add_image('input_images_train', make_grid(x[0]),
                                 global_step)
                #writer.add_image('input_image_2', x[0][1], global_step)
                #writer.add_image('input_image_3', x[0][2], global_step)
                #writer.add_image('input_image_4', x[0][3], global_step)
                #writer.add_image('input_image_5', x[0][4], global_step)

                total_loss.backward()
                optimizer.step()
Exemple #23
0
and returns the average loss across the examples.
'''
st.code('''
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

def loss(model, x, y):
  loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=True)
  y_ = model(x)
  return loss_object(y_true=y, y_pred=y_)

crossentropy_loss = loss(model, features, labels)
print(f"Loss test: {crossentropy_loss:.2f}")
''')

crossentropy_loss = loss(model, features, labels)
st.write(f"Loss test:  {crossentropy_loss:.2f}")
'''
```tf.GradientTape``` calculates gradients used to optimise the model.
'''
st.code('''
def grad(model, inputs, targets):
    with tf.GradientTape() as tape:
        loss_value = loss(model, inputs, targets)
    return loss_value, tape.gradient(loss_value, model.trainable_variables)
''')
'''
#### Create an optimiser

An *optimizer* applies the computed gradients to the model's variables
to minimize the ```loss``` function.
no_epochs = 200

# train the model
feat_train = models.calc_features(x_train, choice=feat_type, param=param)
for epoch in range(no_epochs):

    # forward pass: calculate the model's prediction on training data
    y_pred_train = learning_model.forward(feat_train)

    # backward pass: train the linear regression model, using gradient descent
    learning_model.backward(y_train, y_pred_train, feat_train)

# compute loss on test data
feat_test = models.calc_features(x_test, choice=feat_type, param=param)
y_pred_test = learning_model.forward(feat_test)
loss_test_linear = utils.loss(y_test, y_pred_test)

#----------------------------------------------------------------------
# Polynomial
#----------------------------------------------------------------------

# load the polynomial paramaters
feat_type = dict_hyper['poly']['feat_type']
param = dict_hyper['poly']['param']
lr = dict_hyper['poly']['lr']
weight_init = dict_hyper['poly']['weight_init']

# generate artificial data
(x_train, y_train, x_val, y_val, x_test, y_test, left_limit,
 right_limit) = utils.data_generator(utils.poly)
Exemple #25
0
import numpy as np
import matplotlib.pyplot as plt

#Define constants for utils
utils.BAUD_RATE = 960
utils.SAMP_RATE = 96000
utils.FREQ_LOW = 1e3
utils.FREQ_HIGH = 1e6

#randomly generate a true binary message
sig_len = 500
true_msg = np.concatenate(
    (np.array(5 * [0, 1]), np.random.randint(2, size=sig_len)))

#use utils to generate an FSK signal (with/wo noise)
meas_sig = utils.gen_fsk(true_msg, pre=0.1, post=0.1, noise=0.13)

#use utils to decode the measured FSK signal
meas_msg = utils.decode_fsk_zc(meas_sig, sig_thresh=0.5, zc_thresh=0.4)
#meas_msg = utils.decode_fsk_PA(meas_sig)

#Find the percent error between the two bit sequences
bit_err = utils.loss(meas_msg, true_msg)
print(str(bit_err * 100) + "% error")

print(meas_msg)
print(true_msg)

#plt.plot(meas_sig)
#plt.show()
Exemple #26
0
def main(args):

    # get arguments
    rate_num = args.rate_num
    use_side_feature = args.use_side_feature
    lr = args.lr
    weight_decay = args.weight_decay
    num_epochs = args.num_epochs
    hidden_dim = args.hidden_dim
    side_hidden_dim = args.side_hidden_dim
    out_dim = args.out_dim
    drop_out = args.drop_out
    split_ratio = args.split_ratio
    save_steps = args.save_steps
    log_dir = args.log_dir
    saved_model_folder = args.saved_model_folder
    use_data_whitening = args.use_data_whitening
    use_laplacian_loss = args.use_laplacian_loss
    laplacian_loss_weight = args.laplacian_loss_weight

    # mark and record the training file, save the training arguments for future analysis
    post_fix = '/' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
    log_dir = log_dir + post_fix
    writer = SummaryWriter(log_dir=log_dir)
    f = open(log_dir + '/test.txt', 'a')
    f.write(str(vars(args)))
    f.close()

    print(log_dir)

    #get prepared data
    feature_u, feature_v, feature_dim, all_M_u, all_M_v, side_feature_u, side_feature_v, all_M, mask, user_item_matrix_train, user_item_matrix_test, laplacian_u, laplacian_v = prepare(
        args)

    if not os.path.exists(saved_model_folder):
        os.makedirs(saved_model_folder)
    weights_name = saved_model_folder + post_fix + '_weights'

    net = utils.create_models(feature_u, feature_v, feature_dim, hidden_dim,
                              rate_num, all_M_u, all_M_v, side_hidden_dim,
                              side_feature_u, side_feature_v, use_side_feature,
                              out_dim, drop_out)
    net.train()  # in train mode

    # create AMSGrad optimizer
    optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
    Loss = utils.loss(all_M, mask, user_item_matrix_train,
                      laplacian_loss_weight)
    iter_bar = tqdm(range(num_epochs), desc='Iter (loss=X.XXX)')
    for epoch in iter_bar:

        optimizer.zero_grad()

        score = net.forward()

        if use_laplacian_loss:
            loss = Loss.laplacian_loss(score, laplacian_u, laplacian_v)
        else:
            loss = Loss.loss(score)

        loss.backward()

        optimizer.step()

        with torch.no_grad():
            rmse = Loss.rmse(score)

            val_rmse = validate(score, rate_num, user_item_matrix_test)
            iter_bar.set_description(
                'Iter (loss=%5.3f, rmse=%5.3f, val_rmse=%5.5f)' %
                (loss.item(), rmse.item(), val_rmse.item()))

            #             writer.add_scalars('scalar',{'loss': loss.item(), 'rmse': rmse.item(), 'val_rmse':val_rmse.item(),},epoch)
            writer.add_scalars('scalar', {'loss': loss.item()}, epoch)

        if epoch % save_steps == 0:
            torch.save(net.state_dict(), weights_name)

    rmse = Loss.rmse(score)
    print('Final training RMSE: ', rmse.data.item())
    torch.save(net.state_dict(), weights_name)

    sm = nn.Softmax(dim=0)
    score = sm(score)
    score_list = torch.split(score, rate_num)
    pred = 0
    for i in range(rate_num):
        pred += (i + 1) * score_list[0][i]

    pred = utils.var_to_np(pred)

    #     pred = np.load('./prediction.npy')

    ### test the performance
    #     user_item_matrix_test = np.load('./processed_dataset/user_item_matrix_test.npy')
    test_mask = user_item_matrix_test > 0

    square_err = (pred * test_mask - user_item_matrix_test)**2
    mse = square_err.sum() / test_mask.sum()
    test_rmse = np.sqrt(mse)
    print('Test RMSE: ', test_rmse)
Exemple #27
0
def valid(data_loader, epoch, Hnet, Rnet):
    start_time = time.time()
    Hnet.eval()  # 验证
    Rnet.eval()

    val_Hlosses = AverageMeter()
    val_Rlosses = AverageMeter()

    # batch_size循环
    for i, data in enumerate(data_loader):
        Hnet.zero_grad()
        Rnet.zero_grad()

        all_pics = data  # all pics contains cover images and secret images
        this_batch_size = int(all_pics.size()[0] /
                              2)  # get true batch size of this step

        # first half of images will become cover images, the rest are treated as secret images
        cover_img = all_pics[0:this_batch_size, :, :, :]  # batchsize,3,256,256
        secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]

        # concat cover images and secret images as input of H-net
        concat_img = torch.cat([cover_img, secret_img], dim=1)

        if opt.use_gpu:
            cover_img = cover_img.cuda()
            secret_img = secret_img.cuda()
            concat_img = concat_img.cuda()

        concat_imgv = Variable(concat_img.clone(),
                               requires_grad=False)  # 不反向传播
        cover_imgv = Variable(cover_img.clone(), requires_grad=False)
        secret_imgv = Variable(secret_img.clone(), requires_grad=False)

        stego = Hnet(concat_imgv)
        errH = loss(stego, cover_imgv)  # loss between cover and container
        val_Hlosses.update(errH.detach().item(), this_batch_size)

        secret_rev = Rnet(stego)
        errR = loss(secret_rev,
                    secret_imgv)  # loss between secret and revealed secret
        val_Rlosses.update(errR.detach().item(), this_batch_size)

    val_hloss = val_Hlosses.avg
    val_rloss = val_Rlosses.avg
    val_sumloss = val_hloss + opt.beta * val_rloss

    # save pictures and 差异图片
    if (epoch % opt.result_freq == 0) or (epoch == opt.epochs - 1):
        save_pic('valid', cover_img, stego, secret_img, secret_rev,
                 opt.validation_pics, opt.batch_size, epoch)

    # print log
    epoch_time = time.time() - start_time
    epoch_log = 'valid:' + '\n'
    epoch_log += "epoch %d/%d : " % (epoch, opt.epochs)
    epoch_log += "one epoch time is %.0fm %.0fs" % (epoch_time // 60,
                                                    epoch_time % 60) + "\n"
    epoch_log += "learning rate: optimizerH_lr = %.8f\t optimizerR_lr = %.8f" % (
        optimizerH.param_groups[0]['lr'],
        optimizerR.param_groups[0]['lr']) + "\n"
    # schedulerH.get_lr()[0] schedulerR.get_lr()[0]
    epoch_log += "Hloss=%.6f\t Rloss=%.6f\t sumLoss=%.6f" % (
        val_hloss, val_rloss, val_sumloss) + "\n"

    if epoch % opt.loss_freq == 0:
        print_log(epoch_log, logPath)
    else:
        print_log(epoch_log, logPath, console=False)

    return val_hloss, val_rloss, val_sumloss
Exemple #28
0
def train(data_loader, epoch, Hnet, Rnet):
    start_time = time.time()
    train_Hlosses = AverageMeter()
    train_Rlosses = AverageMeter()
    train_SumLosses = AverageMeter()

    # 早停法
    # patience = 7
    # early_stopping = EarlyStopping(patience=patience, verbose=True)

    Hnet.train()  # 训练
    Rnet.train()

    # batch_size循环
    for i, data in enumerate(data_loader):
        Hnet.zero_grad()
        Rnet.zero_grad()

        all_pics = data  # allpics contains cover images and secret images
        this_batch_size = int(all_pics.size()[0] /
                              2)  # get true batch size of this step

        # first half of images will become cover images, the rest are treated as secret images
        cover_img = all_pics[0:this_batch_size, :, :, :]  # batchsize,3,256,256
        secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
        # concat cover images and secret images as input of H-net
        concat_img = torch.cat([cover_img, secret_img], dim=1)

        if opt.use_gpu:
            cover_img = cover_img.cuda()
            secret_img = secret_img.cuda()
            concat_img = concat_img.cuda()

        concat_imgv = Variable(concat_img.clone(), requires_grad=False)
        cover_imgv = Variable(cover_img.clone(), requires_grad=False)
        secret_imgv = Variable(secret_img.clone(), requires_grad=False)

        stego = Hnet(concat_imgv)
        errH = loss(stego, cover_imgv)  # loss between cover and container
        train_Hlosses.update(errH.item(), this_batch_size)

        secret_rev = Rnet(stego)
        errR = loss(secret_rev,
                    secret_imgv)  # loss between secret and revealed secret
        train_Rlosses.update(errR.item(), this_batch_size)

        err_sum = errH + opt.beta * errR  # sum_loss
        train_SumLosses.update(err_sum.item(), this_batch_size)

        err_sum.backward()
        optimizerH.step()
        optimizerR.step()

    # TODO: early stop
    # early_stopping(val_SumLosses.avg, Hnet)
    # early_stopping(val_SumLosses.avg, Rnet)

    # if early_stopping.early_stop:
    #     print("Early stopping")
    #     break

    # save result pictures
    if (epoch % opt.result_freq == 0) or (epoch == opt.epochs - 1):
        save_pic('train', cover_img, stego, secret_img, secret_rev,
                 opt.result_pics, opt.batch_size, epoch)
        # save_batch_pic('train', cover_img, stego, secret_img, secret_rev, opt.result_pics, opt.batch_size, epoch)

    # save model params
    if epoch % opt.checkpoint_freq == 0 or epoch == opt.epochs - 1:
        torch.save(
            Hnet.state_dict(),
            os.path.join(
                opt.checkpoint_path, 'H_epoch%04d_sumloss%.6f_lr%.6f.pth' %
                (epoch, train_SumLosses.avg,
                 optimizerH.param_groups[0]['lr'])))
        torch.save(
            Rnet.state_dict(),
            os.path.join(
                opt.checkpoint_path, 'R_epoch%04d_sumloss%.6f_lr%.6f.pth' %
                (epoch, train_SumLosses.avg,
                 optimizerR.param_groups[0]['lr'])))

    # print log
    epoch_time = time.time() - start_time
    epoch_log = 'train:' + '\n'
    epoch_log += "epoch %d/%d : " % (epoch, opt.epochs)
    epoch_log += "one epoch time is %.0fm %.0fs" % (epoch_time // 60,
                                                    epoch_time % 60) + "\n"
    epoch_log += "learning rate: optimizerH_lr = %.8f\t optimizerR_lr = %.8f" % (
        optimizerH.param_groups[0]['lr'],
        optimizerR.param_groups[0]['lr']) + "\n"
    # schedulerH.get_lr()[0] schedulerR.get_lr()[0]
    epoch_log += "Hloss=%.6f\t Rloss=%.6f\t sumLoss=%.6f" % (
        train_Hlosses.avg, train_Rlosses.avg, train_SumLosses.avg) + "\n"

    if epoch % opt.loss_freq == 0:
        print_log(epoch_log, logPath)
    else:
        print_log(epoch_log, logPath, console=False)
Exemple #29
0
 epoch_loss = 0
 train_loss = []
 # val_loss = []
 # epoch_train_loss = []
 # epoch_val_loss = []
 epoch_samples = 0
 #    print("Epoch Loss for Epoch : {} is {} ".format(epoch,epoch_loss))
 for batch_idx, batch in enumerate(train_loader):
     model.train()
     img = batch[0].to(device)
     mask1 = batch[1].to(device)
     mask2 = batch[2].to(device)
     # #          print("Shapes", img.shape, mask.shape)
     epoch_samples += img.size(0)
     pred_mask1, pred_mask2 = model(img)
     loss_model = loss(mask1, mask2, pred_mask1, pred_mask2)
     #          print(loss_model.item())
     #           #loss_model = criterion(pred_mask, mask)
     loss_model = loss_model.mean()
     epoch_loss += loss_model.item()
     optimizer.zero_grad()
     loss_model.backward()
     optimizer.step()
     # #          print("EPOCH:{0} || BATCH NO:{1} || LOSS:{2}".format(epoch,batch_idx,loss_model.item()))
     if batch_idx % 3000 == 0:
         torch.save(
             model.module.state_dict(),
             "../outputs12/checkpoints/ckpt_{}_{}.pth".format(
                 batch_idx, epoch))
         metrics["batch_idx"] = batch_idx
         metrics["epoch"] = epoch
Exemple #30
0
# *********************************************************************************************************
# textRCNN
# hidden_size = 128
# logits = cls_model.textRCNN(input_x, dropout_keep_prob, batch_size, sequence_length, vocab_size, embed_size,
#                             num_classes, hidden_size)
# *********************************************************************************************************

if multi_label_flag:
    print("going to use multi label loss.")
    loss_val = utils.loss_multilabel(tf.cast(input_y, tf.float32),
                                     logits,
                                     l2_lambda=l2_lambda)
else:
    print("going to use single label loss.")
    loss_val = utils.loss(tf.cast(input_y, tf.float32),
                          logits,
                          l2_lambda=l2_lambda)

predictions = tf.argmax(logits, 1, name="predictions")  # shape:[None,]
labels = tf.argmax(input_y, 1, output_type=tf.int32, name="labels")
correct_prediction = tf.equal(tf.cast(predictions, tf.int32),
                              labels)  # tf.argmax(logits, 1)-->[batch_size]

accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                          name="Accuracy")  # shape=()
learning_rate = tf.train.exponential_decay(
    learning_rate,
    tf.train.get_or_create_global_step(),
    decay_steps,
    decay_rate,
    staircase=True)