コード例 #1
0
def train_model(args, model, train, dev, src, trg, teacher_model=None, save_path=None, maxsteps=None):

    if args.tensorboard and (not args.debug):
        from tensorboardX import SummaryWriter
        writer = SummaryWriter('{}{}'.format(args.event_path, args.prefix+args.hp_str))

    # optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    if args.optimizer == 'Adam':
        opt = torch.optim.Adam(params, betas=(0.9, 0.98), eps=1e-9)
    else:
        raise NotImplementedError

    # if resume training
    if (args.load_from is not None) and (args.resume):
        with torch.cuda.device(args.gpu):   # very important.
            offset, opt_states = torch.load(os.path.join(args.model_path, args.load_from + '.pt.states'),
                                            map_location=lambda storage, loc: storage.cuda())
            opt.load_state_dict(opt_states)
    else:
        offset = 0

    # metrics
    if save_path is None:
        save_path = args.model_name

    best = Best(max, *['BLEU_dec{}'.format(ii+1) for ii in range(args.valid_repeat_dec)], \
                     'i', model=model, opt=opt, path=save_path, gpu=args.gpu, \
                     which=range(args.valid_repeat_dec))
    train_metrics = Metrics('train loss', *['loss_{}'.format(idx+1) for idx in range(args.train_repeat_dec)], data_type = "avg")
    dev_metrics = Metrics('dev loss', *['loss_{}'.format(idx+1) for idx in range(args.valid_repeat_dec)], data_type = "avg")
    if not args.no_tqdm:
        progressbar = tqdm(total=args.eval_every, desc='start training.')

    for iters, batch in enumerate(train):
        iters += offset

        if iters % args.save_every == 0:
            args.logger.info('save (back-up) checkpoints at iter={}'.format(iters))
            with torch.cuda.device(args.gpu):
                torch.save(best.model.state_dict(), '{}.pt'.format(args.model_name))
                torch.save([iters, best.opt.state_dict()], '{}.pt.states'.format(args.model_name))

        if iters % args.eval_every == 0:
            dev_metrics.reset()
            outputs_data = valid_model(args, model, dev, dev_metrics, teacher_model=None, print_out=True)

            if args.tensorboard and (not args.debug):
                for ii in range(args.valid_repeat_dec):
                    writer.add_scalar('dev/single/Loss_{}'.format(ii + 1), getattr(dev_metrics, "loss_{}".format(ii+1)), iters)
                    writer.add_scalar('dev/single/BLEU_{}'.format(ii + 1), outputs_data['bleu'][ii], iters)

                writer.add_scalars('dev/multi/BLEUs', {"iter_{}".format(idx+1):bleu for idx, bleu in enumerate(outputs_data['bleu']) }, iters)
                writer.add_scalars('dev/multi/Losses', \
                    { "iter_{}".format(idx+1):getattr(dev_metrics, "loss_{}".format(idx+1)) \
                     for idx in range(args.valid_repeat_dec) }, \
                     iters)

            if not args.debug:
                best.accumulate(*outputs_data['bleu'], iters)
                values = list( best.metrics.values() )
                args.logger.info("best model : {}, {}".format( "BLEU=[{}]".format(", ".join( [ str(x) for x in values[:args.valid_repeat_dec] ] ) ), \
                                                              "i={}".format( values[args.valid_repeat_dec] ), ) )
            args.logger.info('model:' + args.prefix + args.hp_str)

            # ---set-up a new progressor---
            if not args.no_tqdm:
                progressbar.close()
                progressbar = tqdm(total=args.eval_every, desc='start training.')

        if maxsteps is None:
            maxsteps = args.maximum_steps

        if iters > maxsteps:
            args.logger.info('reach the maximum updating steps.')
            break

        # --- training --- #
        model.train()
        def get_learning_rate(i, lr0=0.1, disable=False):
            if not disable:
                return max(0.00003, args.lr / math.pow(5, math.floor(i/50000)))
                '''
                return lr0 * 10 / math.sqrt(args.d_model) * min(
                        1 / math.sqrt(i), i / (args.warmup * math.sqrt(args.warmup)))
                '''
            return args.lr
        opt.param_groups[0]['lr'] = get_learning_rate(iters + 1, disable=args.disable_lr_schedule)
        opt.zero_grad()

        # prepare the data
        inputs, input_masks, \
        targets, target_masks, \
        sources, source_masks,\
        encoding, batch_size = model.quick_prepare(batch)

        #print(input_masks.size(), target_masks.size(), input_masks.sum())

        if type(model) is Transformer:
            decoder_inputs, decoder_masks = inputs, input_masks
        elif type(model) is FastTransformer:
            decoder_inputs, _, decoder_masks = \
                    model.prepare_initial(encoding, sources, source_masks, input_masks)
            initial_inputs = decoder_inputs

        if type(model) is Transformer:
            out = model(encoding, source_masks, decoder_inputs, decoder_masks)
            loss = model.cost(targets, target_masks, out)
        elif type(model) is FastTransformer:
            losses = []
            for iter_ in range(args.train_repeat_dec):

                curr_iter = min(iter_, args.num_shared_dec-1)
                next_iter = min(curr_iter + 1, args.num_shared_dec-1)

                out = model(encoding, source_masks, decoder_inputs, decoder_masks, iter_=curr_iter)
                losses.append( model.cost(targets, target_masks, out=out, iter_=curr_iter) )

                logits = model.decoder[curr_iter].out(out)
                if args.use_argmax:
                    _, argmax = torch.max(logits, dim=-1)
                else:
                    logits = softmax(logits)
                    logits_sz = logits.size()
                    logits_ = Variable(logits.data, requires_grad=False)
                    argmax = torch.multinomial(logits_.contiguous().view(-1, logits_sz[-1]), 1)\
                            .view(*logits_sz[:-1])

                decoder_inputs = F.embedding(argmax, model.decoder[next_iter].out.weight *
                                             math.sqrt(args.d_model))
                if args.sum_out_and_emb:
                    decoder_inputs += out

                if args.diff_loss_w > 0 and ((args.diff_loss_dec1 == False) or (args.diff_loss_dec1 == True and iter_ == 0)):
                    num_words = out.size(1)

                    # first L2 normalize
                    out_norm = out.div(out.norm(p=2, dim=-1, keepdim=True))

                    # calculate loss
                    diff_loss = torch.mean((out_norm[:,1:,:] * out_norm[:,:-1,:]).sum(-1).clamp(min=0)) * args.diff_loss_w

                    # add this losses to all losses
                    losses.append(diff_loss)

            loss = sum(losses)

        # accmulate the training metrics
        train_metrics.accumulate(batch_size, *losses, print_iter=None)

        # train the student
        loss.backward()
        if args.grad_clip > 0:
            total_norm = nn.utils.clip_grad_norm(params, args.grad_clip)
        opt.step()

        info = 'training step={}, loss={}, lr={:.5f}'.format(
                    iters,
                    "/".join(["{:.3f}".format(export(ll)) for ll in losses]),
                    opt.param_groups[0]['lr'])

        if iters % args.eval_every == 0 and args.tensorboard and (not args.debug):
            for idx in range(args.train_repeat_dec):
                writer.add_scalar('train/single/Loss_{}'.format(idx+1), export(losses[idx]), iters)

        if args.no_tqdm:
            if iters % args.eval_every == 0:
                args.logger.info(train_metrics)
        else:
            progressbar.update(1)
            progressbar.set_description(info)
        train_metrics.reset()
コード例 #2
0
def main_test():
    print("----------START OF TESTS-----------")
    # Get configuration
    conf = config()
    
    ################################### Task 1.1: Parameter initialization
    
    from model import initialization
    params = initialization(conf)
    
    ################################### Task 1.2: Forward propagation
    
    # Import Activation functions [1.2a & 1.2b]
    from model import activation
    from model import softmax
    
    # Test Activation functions
    from tests import task_2a
    from tests import task_2b
    input_Z, expected_A = task_2a()
    A = activation(input_Z, 'relu')
    print('Activation valid?:',np.array_equal(expected_A, A))
    input_Z, expected_S = task_2b()
    S = softmax(input_Z)
    print('Softmax valid?:',np.array_equal(np.round(expected_S,decimals=3), np.round(S,decimals=3)))
    
    # Import Forward propagation [1.2c]
    from model import forward
    from tests import task_2c
    
    ### Test Forward propagation
    conf, X_batch, params, expected_Z_1, expected_A_1, expected_Z_2, expected_Y_proposed = task_2c()
    Y_proposed, features = forward(conf, X_batch, params, is_training=True)
    print('feature Z_1 valid?:',np.array_equal(expected_Z_1, np.round(features['Z_1'],decimals=8)))
    print('feature A_1 valid?:',np.array_equal(expected_A_1, np.round(features['A_1'],decimals=8)))
    print('feature Z_2 valid?:',np.array_equal(expected_Z_2, np.round(features['Z_2'],decimals=8)))
    print('proposed Y valid?:',np.array_equal(expected_Y_proposed, np.round(Y_proposed,decimals=8)))
    
    ################################### Task 1.3: Cross Entropy cost function
    
    # Import Cost function
    from model import cross_entropy_cost
    from tests import task_3
    
    ### Test Cost function
    Y_proposed, Y_batch, expected_cost_value, expected_num_correct = task_3()
    cost_value, num_correct = cross_entropy_cost(Y_proposed, Y_batch)
    print('Cost value valid?:',np.array_equal(np.round(expected_cost_value,decimals=4), np.round(cost_value,decimals=4)))
    print('Number of succesess valid?:',np.array_equal(expected_num_correct, np.round(num_correct,decimals=4)))
    
    ################################### Task 1.4: Backward propagation
    
    # Import Derivative of the activation function [1.4a]
    from model import activation_derivative
    from tests import task_4a
    
    # Test Derivative of activation
    input_Z, expected_dg_dz = task_4a()
    dg_dz = activation_derivative(input_Z, "relu")
    print('Derivative function valid?:',np.array_equal(expected_dg_dz, np.round(dg_dz,decimals=4)))

    # Import Backward propagation [1.4b]
    from model import backward
    from tests import task_4b
    
    # Test Backward propagation
    (conf, Y_proposed, Y_batch, params, features,
     expected_grad_W_1, expected_grad_b_1, expected_grad_W_2, expected_grad_b_2) = task_4b()
    grad_params = backward(conf, Y_proposed, Y_batch, params, features)
    print('Grad_W_1 valid?:',np.array_equal(np.round(expected_grad_W_1,decimals=4), np.round(grad_params["grad_W_1"],decimals=4)))
    print('Grad_b_1 valid?:',np.array_equal(np.round(expected_grad_b_1,decimals=4), np.round(grad_params["grad_b_1"][:, np.newaxis],decimals=4)))
    print('Grad_W_2 valid?:',np.array_equal(np.round(expected_grad_W_2,decimals=4), np.round(grad_params["grad_W_2"],decimals=4)))
    print('Grad_b_2 valid?:',np.array_equal(np.round(expected_grad_b_2,decimals=4), np.round(grad_params["grad_b_2"][:, np.newaxis],decimals=4)))
    
    ################################### Task 1.5: Update parameters
    
    # Import Update
    from model import gradient_descent_update
    from tests import task_5
    
    # Test Update
    (conf, params, grad_params,
     expected_updated_W_1, expected_updated_b_1, expected_updated_W_2, expected_updated_b_2) = task_5()
    updated_params = gradient_descent_update(conf, params, grad_params)
    
    print('update of W_1 valid?:',np.array_equal(np.round(expected_updated_W_1,decimals=4), np.round(updated_params["W_1"],decimals=4)))
    print('update of b_1 valid?:',np.array_equal(np.round(expected_updated_b_1,decimals=4), np.round(updated_params["b_1"],decimals=4)))
    print('update of W_2 valid?:',np.array_equal(np.round(expected_updated_W_2,decimals=4), np.round(updated_params["W_2"],decimals=4)))
    print('update of b_2 valid?:',np.array_equal(np.round(expected_updated_b_2,decimals=4), np.round(updated_params["b_2"],decimals=4)))

    print("----------END OF TESTS-----------")
コード例 #3
0
def wgan(att, train, unseen, seen, opt):
    # 配置 #
    tf.random.set_seed(opt.random_seed)  # random_seed
    # 配置 #

    # 数据读取 #
    train_X = tf.transpose(train[0])
    train_y = train[1]
    train_label, train_inx = tf.unique(train_y)
    train_onehot = tf.one_hot(train_inx, depth=train_label.shape[0])
    cps_db = tf.data.Dataset.from_tensor_slices((train_X, train_onehot))
    cps_db = cps_db.shuffle(train_X.shape[0]).batch(opt.class_batch)

    train_att = tf.transpose(tf.gather(att, train_y - 1, axis=1))
    train_db = tf.data.Dataset.from_tensor_slices(
        (train_X, train_onehot, train_att))
    train_db = train_db.shuffle(train_X.shape[0]).batch(opt.train_batch)

    unseen_y = unseen[1]
    unseen_labels, _ = tf.unique(unseen_y)
    unseen_att = tf.gather(att, unseen_labels - 1, axis=1)
    unseen_att = tf.transpose(unseen_att)
    # 数据读取 #

    # WGAN-定义 #
    generate = generator()
    discrim = discriminor()
    unseen_y = unseen[1]
    unseen_label, _ = tf.unique(unseen_y)
    class_test = softmax(unseen_label.shape[0],
                         opt.class_regularizer)  # 参数是不可见类的类数量
    class_all_test = softmax(att.shape[1], opt.class_regularizer)

    zc_optimizer = tf.keras.optimizers.Nadam(opt.class_lr, beta_1=0.5)
    gzc_optimizer = tf.keras.optimizers.Nadam(opt.class_lr, beta_1=0.5)
    g_optimizer = tf.keras.optimizers.Nadam(opt.train_lr, beta_1=0.5)
    d_optimizer = tf.keras.optimizers.Nadam(opt.train_lr, beta_1=0.5)

    max_zsl = 0
    max_gu = 0
    max_gs = 0
    max_h = 0
    # WGAN-定义 #

    # 预训练分类器 #
    cps = pretrain(cps_db, train_label.shape[0], opt.pre_epoch,
                   opt.pre_class_lr, opt.pre_classifier_read, opt)
    # 预训练分类器 #

    # 训练 #
    for epoch in range(opt.train_epochs):
        print("第", epoch + 1, "次迭代:")
        print("生成器和判别器训练:")

        # 训练GAN网路 #
        for _, (x_b, y_b, att_b) in tqdm(enumerate(train_db)):
            for _ in range(5):  # 判别器训练
                noise = tf.random.truncated_normal(att_b.shape)
                g_x = generate.call(att_b, noise)

                with tf.GradientTape() as tape1:
                    wd_real = tf.reduce_mean(discrim.call(x_b, att_b))
                    wd_fake = tf.reduce_mean(discrim.call(g_x, att_b))
                    gp = gradient_penalty(discrim, x_b, g_x, att_b)

                    loss_d = -wd_real + wd_fake + opt.gp_lambda * gp
                grads = tape1.gradient(loss_d, discrim.trainable_variables)
                d_optimizer.apply_gradients(
                    zip(grads, discrim.trainable_variables))

            with tf.GradientTape() as tape2:  # 生成器训练
                noise = tf.random.truncated_normal(att_b.shape)
                g_x = generate.call(att_b, noise)

                wd_fake = tf.reduce_mean(discrim.call(g_x, att_b))
                pre_label = cps.call(g_x)
                loss_c = tf.reduce_mean(
                    tf.keras.losses.categorical_crossentropy(
                        y_b, pre_label, from_logits=False))

                loss_g = -wd_fake + loss_c * opt.cls_beita
            grads = tape2.gradient(loss_g, generate.trainable_variables)
            g_optimizer.apply_gradients(
                zip(grads, generate.trainable_variables))
        print("第%d次迭代:D->%f, G->%f" % (epoch + 1, loss_d, loss_g))
        # 训练GAN网路 #

        # 生成数据 #
        generate_x, generate_y = syn_features(generate, unseen, att,
                                              opt.generate_num)  # 生成数据
        generate_label, generate_inx = tf.unique(generate_y)
        generate_onehot = tf.one_hot(generate_inx,
                                     depth=generate_label.shape[0])
        generate_db = tf.data.Dataset.from_tensor_slices(
            (generate_x, generate_onehot))
        generate_db = generate_db.shuffle(generate_x.shape[0]).batch(
            opt.class_batch)

        all_x = tf.concat([train_X, generate_x], axis=0)
        all_y = tf.concat([train_y, generate_y], axis=0)
        all_label, all_inx = tf.unique(all_y)

        mask = [0] * all_label.shape[0]
        for label in train_label:
            loc = tf.where(all_label == label)
            mask[loc[0][0]] += 1
        mask = tf.convert_to_tensor(mask, dtype=tf.float32)

        all_onehot = tf.one_hot(all_inx, depth=all_label.shape[0])
        all_db = tf.data.Dataset.from_tensor_slices((all_x, all_onehot))
        all_db = all_db.shuffle(all_x.shape[0]).batch(opt.class_batch)
        # 生成数据

        print("分类器训练:")
        mid_zsl = 0
        mid_H = 0
        mid_gu = 0
        mid_gs = 0

        # 训练分类器并计算准确度 #
        for _ in tqdm(range(opt.valid_epoch)):
            for _, (test_x, test_y) in enumerate(generate_db):  # zsl分类器训练
                with tf.GradientTape() as tape3:  # 生成器训练
                    pre_y = class_test(test_x)
                    loss_regular = tf.add_n(class_test.losses)
                    loss_zc = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            test_y, pre_y, from_logits=False)) + loss_regular
                grads = tape3.gradient(loss_zc, class_test.trainable_variables)
                zc_optimizer.apply_gradients(
                    zip(grads, class_test.trainable_variables))

            for _, (all_x, all_y) in enumerate(all_db):  # gzsl分类器训练
                with tf.GradientTape() as tape4:  # 生成器训练
                    pre_y = class_all_test(all_x)
                    loss_regular = tf.add_n(class_all_test.losses)
                    loss_gzc = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            all_y, pre_y, from_logits=False)) + loss_regular
                grads = tape4.gradient(loss_gzc,
                                       class_all_test.trainable_variables)
                gzc_optimizer.apply_gradients(
                    zip(grads, class_all_test.trainable_variables))

            details = caculate(class_test, unseen, generate_label)
            ac_unseen = tf.reduce_mean(details)
            gdetails = caculate(class_all_test, unseen, all_label, True,
                                opt.calibrate, mask)
            gac_unseen = tf.reduce_mean(gdetails)
            gac_seen = caculate(class_all_test, seen, all_label, True,
                                opt.calibrate, mask)
            gac_seen = tf.reduce_mean(gac_seen)
            H = (2 * gac_unseen * gac_seen) / (gac_seen + gac_unseen)
            if ac_unseen > mid_zsl:
                mid_zsl = ac_unseen
            if H > mid_H:
                mid_H = H
                mid_gu = gac_unseen
                mid_gs = gac_seen

        if mid_zsl > max_zsl:
            max_zsl = mid_zsl
            generate.save_weights('ckpt/generator.ckpt')
            discrim.save_weights('ckpt/discriminor.ckpt')
        if mid_H > max_h:
            max_h = mid_H
            max_gu = mid_gu
            max_gs = mid_gs
            generate.save_weights('ckpt/gzsl_generate.ckpt')
            discrim.save_weights('ckpt/gzsl_discrim.ckpt')

        print("第%d次迭代,ZSL精确度为:%f" % (epoch + 1, mid_zsl * 100))
        print("第%d次迭代,GZSL不可见类精确度为:%f" % (epoch + 1, mid_gu * 100))
        print("第%d次迭代,GZSL可见类精确度为:%f" % (epoch + 1, mid_gs * 100))
        print("第%d次迭代,GZSL综合指标精确度为:%f" % (epoch + 1, mid_H * 100))
        print("当前最大:ZSL->%f, GZSL-unseen->%f, GZSL-seen->%f, GZSL-H->%f" %
              (max_zsl * 100, max_gu * 100, max_gs * 100, max_h * 100))
        print("\n")
        # 训练分类器并计算准确度 #
    # 训练 #

    return (max_zsl, max_gu, max_gs, max_h)
コード例 #4
0
ファイル: airun.py プロジェクト: robertchoi/autonomousCar
    while(True):
        _,full_image = c.read()
        #full_image = cv2.resize(full_image, (320,240))
        image = scipy.misc.imresize(full_image[cfg.modelheight:], [66, 200]) / 255.0
        image1 = scipy.misc.imresize(full_image[cfg.modelheight:], [66*2, 200*2])

        #cv2.imshow('original',full_image)
        #cv2.imshow("view of AI", cv2.cvtColor(image1, cv2.COLOR_RGB2BGR))
        cv2.imshow("view of AI", image1)


        wheel = model.y.eval(session=sess,feed_dict={model.x: [image], model.keep_prob: 1.0})
        cfg.wheel = np.argmax(wheel, axis=1)
        #print('wheel value:', cfg.wheel, wheel)
        print('wheel value:', cfg.wheel, model.softmax(wheel))

    
        k = cv2.waitKey(5)
        if k == ord('q'):  #'q' key to stop program
            break

        """ Toggle Start/Stop motor movement """
        if k == ord('a'): 
            if start_flag == False: 
                start_flag = True
            else:
                start_flag = False
            print('start flag:',start_flag)
   
        #to avoid collision when ultrasonic sensor is available
コード例 #5
0
            model.istate: state,
        }

        fetch = [
            model.pi, model.mu1, model.mu2, model.sigma1, model.sigma2,
            model.rho, model.evt, model.eos, model.final_state
        ]
        [pi, mu1, mu2, sigma1, sigma2, rho, evt, eos,
         state] = sess.run(fetch, feed)

        #store internal outputs
        EOS.append(eos)
        pis.append(pi.squeeze())

        #keep track of fixation duration
        evt = softmax(evt)
        if np.argmax(evt) == 0:
            fix_len += 1
        else:
            fix_len = 0

        #sample from the generated gaussians
        idx = np.random.choice(pi.shape[1], p=pi[0])
        x1, x2 = sample_gaussian2d(mu1[0][idx], mu2[0][idx], sigma1[0][idx],
                                   sigma2[0][idx], rho[0][idx])

        #handle end of sequence (event) flag
        if (eos > 0.5) or (fix_len > max_fix_len):
            eos = 1
        else:
            eos = 0