Exemple #1
0
def train_reid_an_iter(config,
                       loaders,
                       base,
                       only_one,
                       target_loader_iter=None):

    if only_one:

        source_images, _, source_ids, source_cams = loaders.reid_source_train_iter.next_one(
        )
        source_images, source_ids = source_images.to(
            base.device), source_ids.to(base.device)
        _, source_feature_vectors, source_cls_score = base.encoder(
            source_images, True, sl_enc=False)
        # loss classification loss and triplet loss

        source_cls_loss = base.ide_creiteron(source_cls_score, source_ids,
                                             loaders.num_source_pids)
        triplet_loss_1 = base.triplet_creiteron(source_feature_vectors,
                                                source_ids)
        source_acc = accuracy(source_cls_score, source_ids, [1])[0]

    else:
        assert target_loader_iter is not None, "The labeled target loader should not be none"
        source_images, _, source_ids, source_cams = loaders.reid_source_train_iter.next_one(
        )
        source_images, source_ids = source_images.to(
            base.device), source_ids.to(base.device)
        _, source_feature_vectors, source_cls_score = base.encoder(
            source_images, True, sl_enc=False)
        # loss classification loss and triplet loss
        source_cls_loss = base.ide_creiteron(source_cls_score, source_ids,
                                             loaders.num_source_pids)
        triplet_loss_1 = base.triplet_creiteron(source_feature_vectors,
                                                source_ids)
        source_acc = accuracy(source_cls_score, source_ids, [1])[0]

        target_images, _, target_ids, ir_cams = target_loader_iter.next_one()
        target_images, target_ids = target_images.to(
            base.device), target_ids.to(base.device)
        # two losses for generated dataset
        _, target_feature_vectors, target_cls_score = base.encoder(
            target_images, True, sl_enc=False)

        target_cls_loss = base.ide_creiteron(target_cls_score, target_ids)
        triplet_loss_2 = base.triplet_creiteron(target_feature_vectors,
                                                target_ids)
        triplet_loss = (triplet_loss_1 + triplet_loss_2) / 2.0
        cls_loss = (source_cls_loss + target_cls_loss) / 2.0
        target_acc = accuracy(target_cls_score, target_ids, [1])[0]
        acc = torch.Tensor([source_acc, target_acc])
        return cls_loss, triplet_loss, acc

    return source_cls_loss, triplet_loss_1, source_acc
Exemple #2
0
def train_an_epoch(config, base, loaders):

    base.set_train()
    meter = MultiItemAverageMeter()

    ### we assume 200 iterations as an epoch
    for _ in range(200):

        ### load a batch data
        imgs, pids, _ = loaders.train_iter.next_one()
        imgs, pids = imgs.to(base.device), pids.to(base.device)

        ### forward
        logits_list, embeddings_list = base.model(imgs)

        ### loss
        ide_loss, avg_logits = base.compute_ide_loss(logits_list, pids)
        source_acc = accuracy(avg_logits, pids, [1])[0]

        ### optimize
        base.optimizer.zero_grad()
        ide_loss.backward()
        base.optimizer.step()

        ### recored
        meter.update({'ide_loss': ide_loss, 'acc': source_acc})

    return meter.get_val(), meter.get_str()
Exemple #3
0
def train_an_epoch(config, base, loaders):

    base.set_train()
    meter = MultiItemAverageMeter()

    ### we assume 200 iterations as an epoch
    for _ in range(200):

        ### load a batch data
        imgs, pids, _ = loaders.train_iter.next_one()
        imgs, pids = imgs.to(base.device), pids.to(base.device)

        ### forward
        features, cls_score = base.model(imgs)

        ### loss
        ide_loss = base.ide_creiteron(cls_score, pids)
        triplet_loss = base.triplet_creiteron(features, features, features,
                                              pids, pids, pids)
        loss = ide_loss + triplet_loss
        acc = accuracy(cls_score, pids, [1])[0]

        ### optimize
        base.optimizer.zero_grad()
        loss.backward()
        base.optimizer.step()

        ### recored
        meter.update({
            'ide_loss': ide_loss.data,
            'triplet_loss': triplet_loss.data,
            'acc': acc
        })

    return meter.get_val(), meter.get_str()
Exemple #4
0
def train_running():
    with tf.name_scope('input'):

        train_batch, train_label_batch, _ = input_data.get_batch(train_txt, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        val_batch, val_label_batch, _ = input_data.get_batch(val_txt, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

    model = models.model(x, N_CLASSES)
    model.AlexNet()
    logits = model.fc3

    loss = tools.loss(logits, y_)
    acc = tools.accuracy(logits, y_)
    train_op = tools.optimize(loss, LEARNING_RATE)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
        val_writer = tf.summary.FileWriter(logs_val_dir, sess.graph)

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break

                tra_images, tra_labels = sess.run([train_batch, train_label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, acc],
                                                feed_dict={x: tra_images, y_: tra_labels})

                if step % 50 == 0:
                    print('Step %d, train loss = %.4f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc))
                    summary_str = sess.run(summary_op, feed_dict={x: tra_images, y_: tra_labels})
                    train_writer.add_summary(summary_str, step)
                #
                if step % 200 == 0 or (step + 1) == MAX_STEP:
                    val_images, val_labels = sess.run([val_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, acc],
                                                 feed_dict={x: val_images, y_: val_labels})

                    print('**  Step %d, val loss = %.4f, val accuracy = %.2f%%  **' % (step, val_loss, val_acc))
                    summary_str = sess.run(summary_op, feed_dict={x: val_images, y_: val_labels})
                    val_writer.add_summary(summary_str, step)
                    #
                if step % 2000 == 0 or (step + 1) == MAX_STEP:
                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
def train(trainloader, model, warmup_scheduler, criterion, criterion1,
          optimizer, epoch, use_cuda):

    model.train()
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    losses1 = AverageMeter()
    losses2 = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    end = time.time()
    bar = Bar('Processing', max=len(trainloader))
    for batch_index, (images, labels) in enumerate(trainloader):
        if epoch <= args.warm:
            warmup_scheduler.step()

        images = Variable(images)
        labels = Variable(labels)

        labels = labels.cuda()
        images = images.cuda()

        optimizer.zero_grad()
        outputs, features = model(images)
        loss1 = criterion(outputs, labels)
        loss = loss1  #+ 0.2*loss2

        prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        losses1.update(loss1.item(), images.size(0))
        losses2.update(loss1.item(), images.size(0))
        top1.update(prec1.item(), images.size(0))
        top5.update(prec5.item(), images.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s |  Loss_softmax: {loss1:.4f} |  Loss_aj: {loss2:.4f} |top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_index + 1,
            size=len(trainloader),
            data=data_time.avg,
            bt=batch_time.avg,
            loss1=losses1.avg,
            loss2=losses2.avg,
            top1=top1.avg,
            top5=top5.avg,
        )
        bar.next()
    bar.finish()
    return (losses.avg, top1.avg)
def train_an_epoch(config, base, loaders, epoch=None):

	base.set_train()
	meter = MultiItemAverageMeter()

	### we assume 200 iterations as an epoch
	base.lr_scheduler.step(epoch)
	for _ in range(config.steps):

		### load a batch data
		imgs, pids, _ = loaders.train_iter.next_one()
		imgs, pids = imgs.to(base.device), pids.to(base.device)

		if 'res' in config.cnnbackbone:
			### forward
			features, cls_score = base.model(imgs)
			### loss
			ide_loss = base.ide_creiteron(cls_score, pids)
			triplet_loss = base.triplet_creiteron(features, features, features, pids, pids, pids)
			loss = ide_loss + triplet_loss
			acc = accuracy(cls_score, pids, [1])[0]
			### optimize
			base.optimizer.zero_grad()
			loss.backward()
			base.optimizer.step()
			### recored
			meter.update({'ide_loss': ide_loss.data, 'triplet_loss': triplet_loss.data, 'acc': acc})
		elif config.cnnbackbone == 'osnetain':
			### forward
			if epoch < 10:
				cls_score = base.model(imgs, fixed_cnn=True)
			else:
				cls_score = base.model(imgs, fixed_cnn=False)
			### loss
			ide_loss = base.ide_creiteron(cls_score, pids)
			acc = accuracy(cls_score, pids, [1])[0]
			### optimize
			base.optimizer.zero_grad()
			ide_loss.backward()
			base.optimizer.step()
			### recored
			meter.update({'ide_loss': ide_loss.data, 'acc': acc})

	return meter.get_val(), meter.get_str()
def test_batch():

    with tf.Graph().as_default():

        tra_image_batch, tra_label_batch = get_batch(img_batch_list, lab_batch_list)

        logits, _ = vgg.vgg_16(tra_image_batch, num_classes=5, is_training=False)
        # logits, _ = vgg.vgg_16(tra_image_batch, num_classes=5, is_training=False)

        loss = slim.losses.softmax_cross_entropy(logits, tra_label_batch)
        accuracy = tools.accuracy(logits, tra_label_batch)

        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(model_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')

                num_step = int(math.floor(408 / 32))
                num_sample = num_step * 32
                step = 0
                total_correct = 0
                total_loss = 0
                while step < num_step and not coord.should_stop():
                    batch_accuracy = sess.run(accuracy)
                    batch_loss = sess.run(loss)
                    total_correct += np.sum(batch_accuracy)
                    total_loss += np.sum(batch_loss)
                    step += 1
                    print(batch_accuracy)
                    print(batch_loss)
                print('Total testing samples: %d' % num_sample)
                print('Average accuracy: %.2f%%' % ( total_correct / step))
                print('Average loss: %2.f' % (total_loss / step))

            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
Exemple #8
0
def train_reid_an_iter(config, loaders, base):

    ### load data
    rgb_images, rgb_ids, rgb_cams, _ = loaders.reid_rgb_train_iter.next_one()
    ir_images, ir_ids, ir_cams, _ = loaders.reid_ir_train_iter.next_one()
    rgb_images, ir_images = rgb_images.to(base.device), ir_images.to(
        base.device)
    rgb_ids, ir_ids = rgb_ids.to(base.device), ir_ids.to(base.device)
    assert torch.equal(rgb_ids, ir_ids)

    ### compute feature
    _, rgb_feature_vectors, rgb_cls_score = base.encoder(rgb_images,
                                                         True,
                                                         sl_enc=False)
    _, ir_feature_vectors, ir_cls_score = base.encoder(ir_images,
                                                       True,
                                                       sl_enc=False)

    ### compute loss
    rgb_cls_loss = base.ide_creiteron(rgb_cls_score, rgb_ids)
    ir_cls_loss = base.ide_creiteron(ir_cls_score, ir_ids)
    cls_loss = (rgb_cls_loss + ir_cls_loss) / 2.0

    triplet_loss_1 = base.triplet_creiteron(rgb_feature_vectors,
                                            ir_feature_vectors,
                                            ir_feature_vectors, rgb_ids,
                                            ir_ids, ir_ids)
    triplet_loss_2 = base.triplet_creiteron(ir_feature_vectors,
                                            rgb_feature_vectors,
                                            rgb_feature_vectors, ir_ids,
                                            rgb_ids, rgb_ids)
    triplet_loss = (triplet_loss_1 + triplet_loss_2) / 2.0

    ### acc
    rgb_acc = accuracy(rgb_cls_score, rgb_ids, [1])[0]
    ir_acc = accuracy(ir_cls_score, ir_ids, [1])[0]
    acc = torch.Tensor([rgb_acc, ir_acc])

    return cls_loss, triplet_loss, acc
def validate(val_loader, backbone, model, acc_prefixes, args):
    batch_time = AverageMeter('Time', ':.3f')

    # switch to evaluate mode
    model.eval()

    # TODO: Aniruddha
    pred_var_stack, labels_var_stack = [torch.Tensor()]*5, torch.Tensor()
    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)

            # compute output
            features = backbone(images)
            outputs = model(features)

            if not i:
                acc_meters = [
                    NoBatchAverageMeter('', ':11.2f')
                    for i in range(len(outputs))
                ]
                progress = NoTabProgressMeter(
                    len(val_loader),
                    [batch_time, *acc_meters],
                    prefix='Test: ')

            # measure accuracy
            for output, acc_meter in zip(outputs, acc_meters):
                acc1, _ = accuracy(output, target, topk=(1, 5))
                acc_meter.update(acc1[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0 or i == len(val_loader)-1:
                line = progress.display(i)
                len_prefixes = len(acc_prefixes) * len(acc_prefixes[0])
                prefix_line = ' ' * (len(line) - len_prefixes)
                prefix_line += ''.join(acc_prefixes)
                logger.info(prefix_line)
                logger.info(line)
            
            for layer_id in range(5):
                pred_var_stack[layer_id] = torch.cat((pred_var_stack[layer_id], outputs[layer_id].cpu()), dim=0)
            labels_var_stack = torch.cat((labels_var_stack, target.cpu()), dim=0)

    return acc_meters, pred_var_stack, labels_var_stack
def train(train_loader, backbone, model, optimizer, acc_prefixes, epoch, args):
    batch_time = AverageMeter('B', ':.2f')
    data_time = AverageMeter('D', ':.2f')

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        images = images.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        with torch.no_grad():
            features = backbone(images)
        outputs = model(features)

        if not i:
            acc_meters = [
                NoBatchAverageMeter('', ':>11.2f')
                for i in range(len(outputs))
            ]
            progress = NoTabProgressMeter(
                len(train_loader),
                [batch_time, data_time, *acc_meters],
                prefix="Epoch: [{}]".format(epoch))

        # measure accuracy
        optimizer.zero_grad()
        for output, acc_meter in zip(outputs, acc_meters):
            loss = F.cross_entropy(output, target)
            loss.backward()
            acc1, _ = accuracy(output, target, topk=(1, 5))
            acc_meter.update(acc1[0], images.size(0))
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            line = progress.display(i)
            len_prefixes = len(acc_prefixes) * len(acc_prefixes[0])
            prefix_line = ' ' * (len(line) - len_prefixes)
            prefix_line += ''.join(acc_prefixes)
            logger.info(prefix_line)
            logger.info(line)
def NaiveBayesModelMain():
    #from tools to create the train,trainlabel,test,testlabel
    train, trainLabel, test, testLabel = tl.createDataSet()
    #declare the naivebayed model and fit the params of the model with trainSet and trainLabel
    NaiveBayesModel = NaiveBayes(train, trainLabel)
    #test the model with the testData
    predictionLabel = NaiveBayesModel.prediction(test)
    #calculate the acc, rec and F between predict result and testLabel from tools
    acc = tl.accuracy(predictionLabel, testLabel)
    rec = tl.recall(predictionLabel, testLabel)
    F = tl.Fvalue(predictionLabel, testLabel)
    #print the acc, rec and F
    print 'NaiveBayesModel Accuracy : ' + str(acc)
    print 'NaiveBayesModel Recall   : ' + str(rec)
    print 'NaiveBayesModel F-value  : ' + str(F)
def eval_training(testloader, model, criterion, epoch, use_cuda):
    global best_acc

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # switch to evaluate mode
    model.eval()

    end = time.time()
    bar = Bar('Processing', max=len(testloader))

    for batch_index, (images, labels) in enumerate(testloader):
        images = Variable(images)
        labels = Variable(labels)

        images = images.cuda()
        labels = labels.cuda()

        outputs, _ = model(images)
        loss = criterion(outputs, labels)
        prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(prec1.item(), images.size(0))
        top5.update(prec5.item(), images.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        # plot progress
        bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
            batch=batch_index + 1,
            size=len(testloader),
            data=data_time.avg,
            bt=batch_time.avg,
            total=bar.elapsed_td,
            eta=bar.eta_td,
            loss=losses.avg,
            top1=top1.avg,
            top5=top5.avg,
        )
        bar.next()
    bar.finish()
    return (losses.avg, top1.avg)
Exemple #13
0
 def evaluate(self):
     self.model.eval()
     Loss = CrossEntropyLoss(weight=torch.Tensor([0.9, 1, 1])).to(
         self.device)
     loss = 0.0
     acc = 0.0
     with torch.no_grad():
         for i in range(10):
             images, labels = next(self.test_generator)
             images = images.to(self.device)
             labels = labels.to(self.device)
             logits = self.model(images)
             loss += Loss(logits, labels)
             acc += accuracy(logits, labels)
     self.model.train()
     return loss / 10, acc / 10
Exemple #14
0
def run(batch_size=300, learning_rate=0.01):
    #region create network
    data, label = CifarInput.read_cifar10(
        r"C:\Projects\Programming\CsDiscount\cifar-10-binary", True,
        batch_size, True)
    logit = CapsNet.CapsNet(data, batch_size)
    reconstruction = tools.decoder(logit)
    reconstruction_p = tf.placeholder(dtype=tf.float32,
                                      shape=[batch_size, 32, 32, 3])
    print("Network Created")
    #endregion

    #region create optimizer
    global_step = tf.Variable(0, trainable=False, name="global_step")
    loss = tools.loss(logit, label, data, reconstruction_p, batch_size)
    accuracy = tools.accuracy(logit, label)
    train_op = tools.optimize(loss, learning_rate, global_step)
    print("Optimizer Created")
    #endregion

    #region create sessions, queues and savers
    sess = tf.Session()
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    init = tf.global_variables_initializer()
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()
    train_summary_writer = tf.summary.FileWriter(train_log_dir)
    sess.run(init)
    print("Sessions, Queues and Savers Created")
    #endregion

    for x in range(1000):
        print(x)
        reconstruction_run = sess.run(reconstruction)
        sess.run(train_op, feed_dict={reconstruction_p: reconstruction_run})
        if x % 5 == 0:
            mainwindow.newimg(reconstruction_run[0])

        if x % 100 == 0:
            print(sess.run(accuracy))
            checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
            saver.save(sess, save_path=checkpoint_path, global_step=x)
 def evaluate(self):
 
     self.model.eval()
     loss = 0.0
     acc = 0.0
 
     with torch.no_grad():
 
         for i in range(2):
 
             images, Offset, Judge1, Judge2, true_box_coors = next(self.test_generator)
 
             images, Offset, Judge1, Judge2 = images.to(self.device), Offset.to(self.device), Judge1.to(self.device), Judge2.to(self.device)
 
             class_sum, loc_sum = self.model(images)
             loss_l, loss_c = self.Loss(class_sum, loc_sum, Offset, Judge1, Judge2)
             loss += (10*loss_l + loss_c)
             acc += accuracy(class_sum, loc_sum, true_box_coors)
 
 
     self.model.train()
     return loss/2, acc/2
Exemple #16
0
def DecisionTreeModelMain():
    #根据fruit.txt抽取训练集、测试集、训练集标签、测试集标签
    trainSet, labels, testSet, testLabels = dp.createDataSet()
    #最大最小规约
    #tl.maxminScalar(trainSet)
    #tl.maxminScalar(testSet)

    #以Gini函数建树并剪枝
    Tree = buildDecisionTree(trainSet, evaluationFunc=tl.gini)
    tl.pruneTree(Tree, 0.3, evaluationFunc=tl.gini)


    #绘制决策树图像,并保存为fruit.png
    res = dp.plot(Tree)
    dot_data = dp.dotgraph(Tree)
    graph = pydotplus.graph_from_dot_data(dot_data)
    #报错提示没有graphViz模块,可以选择./DecisionTreeResultPng/中png图像进行显示
    if graph == None or dot_data == None :
        fruitPngBackup = cv2.imread("./DecisionTreeResultPng/fruit7.png")
        cv2.imshow("fruitPngBackup.png", fruitPngBackup)
        cv2.waitKey(4000)
    graph.write_png("fruit.png")

    #读取fruit.png
    #显示fruit.png,设置窗口时间为5秒自动关闭
    fruitPng = cv2.imread("./fruit.png")
    cv2.imshow('fruitPng.png', fruitPng)
    cv2.waitKey(4000)

    #测试 Step
    accu = tl.accuracy(testSet, testLabels, Tree)
    rec  = tl.recall(testSet, testLabels, Tree, len(trainSet) + len(testSet))
    F    = tl.fValue(testSet, testLabels, Tree, len(trainSet) + len(testSet))
    
    #print the acc, rec and F
    print 'DecisionTree Accuracy : ' + str(accu)
    print 'DecisionTree Recall   : ' + str(rec)
    print 'DecisionTree F-value  : ' + str(F)
Exemple #17
0
def validation(loader, model, writer):
    model.eval()
    total_loss = cuda.FloatTensor([0.])
    total_accu = cuda.FloatTensor([0.])

    widgets = [
        "processing: ",
        progressbar.Percentage(),
        " ",
        progressbar.ETA(),
        " ",
        progressbar.FileTransferSpeed(),
    ]
    bar = progressbar.ProgressBar(widgets=widgets,
                                  max_value=len(loader)).start()
    for i, batch in enumerate(loader):
        bar.update(i)
        batch_data, batch_label = batch

        batch_data = Variable(batch_data).cuda()
        batch_label = Variable(batch_label).cuda()
        logits = model(batch_data)

        loss = model.criterion(logits, batch_label)
        accu = tools.accuracy(logits=logits, targets=batch_label).data
        total_accu.add_(accu)

        total_loss.add_(loss.data)

    mean_loss = total_loss.cpu().numpy() / (i + 1)
    mean_accu = total_accu.cpu().numpy() / (i + 1)
    writer.add_scalar('val/loss', scalar_value=mean_loss, global_step=EPOCH)
    writer.add_scalar('val/accu', scalar_value=mean_accu, global_step=EPOCH)

    print('')
    print("validation-->epoch:{},mean_loss:{}, mean_accuracy:{}".format(
        EPOCH, mean_loss, mean_accu))
    bar.finish()
Exemple #18
0
def infer(valid_queue, model, criterion,args):
  objs = tools.AvgrageMeter()
  top1 = tools.AvgrageMeter()
  top5 = tools.AvgrageMeter()
  model.eval()
  
  for step, (input, target) in enumerate(valid_queue):
    input = Variable(input, volatile=True).cuda()
    target = Variable(target, volatile=True).cuda(async=True)

    logits = model(input)
    loss = criterion(logits, target)

    prec1, prec5 = tools.accuracy(logits, target, topk=(1, 5))
    n = input.size(0)
    objs.update(loss.item(), n)
    top1.update(prec1.item(), n)
    top5.update(prec5.item(), n)

    if step % args.report_freq == 0:
      logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)

  return top1.avg, objs.avg
Exemple #19
0
def test_running():
    with tf.Graph().as_default():

        mnist = input_data.read_data_sets('../MNIST_data/', one_hot=True)

        x = tf.placeholder(tf.float32, shape=[None, 784])
        x_reshape = tf.reshape(x, [-1, 28, 28, 1])
        y_ = tf.placeholder(tf.float32, [None, num_classes])

        model = models.Model(x_reshape, num_classes)
        model.lenet5()
        logits = model.logits

        acc = tools.accuracy(logits, y_)

        with tf.Session() as sess:

            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(model_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            test_acc = sess.run(acc,
                                feed_dict={
                                    x: mnist.test.images,
                                    y_: mnist.test.labels
                                })
            print('test accuarcy: %.2f%%' % (test_acc))
Exemple #20
0
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr,args):
  objs = tools.AvgrageMeter()
  top1 = tools.AvgrageMeter()
  top5 = tools.AvgrageMeter()

  for step, (input, target) in enumerate(train_queue):
    model.train()
    n = input.size(0)

    input = Variable(input, requires_grad=False).cuda()
    target = Variable(target, requires_grad=False).cuda(async=True)

    # get a random minibatch from the search queue with replacement
    input_search, target_search = next(iter(valid_queue))
    input_search = Variable(input_search, requires_grad=False).cuda()
    target_search = Variable(target_search, requires_grad=False).cuda(async=True)

    architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
    
    optimizer.zero_grad()
    logits = model(input)
    loss = criterion(logits, target)

    loss.backward()
    nn.utils.clip_grad_norm_(model.weight_parameters(), args.grad_clip)
    optimizer.step()

    prec1, prec5 = tools.accuracy(logits, target, topk=(1, 5))
    objs.update(loss.item(), n)
    top1.update(prec1.item(), n)
    top5.update(prec5.item(), n)

    if step % args.report_freq == 0:
      logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)

  return top1.avg, objs.avg
Exemple #21
0
def train():
    pre_trained_weights = './/vgg16_pretrain//vgg16.npy'
    data_dir = './/data//cifar-10-batches-bin//'
    train_log_dir = './/logs//train//'
    val_log_dir = './/logs//val//'

    with tf.name_scope('input'):
        tra_image_batch, tra_label_batch = input_data.read_cifar10(
            data_dir=data_dir,
            is_train=True,
            batch_size=BATCH_SIZE,
            shuffle=True)
        val_image_batch, val_label_batch = input_data.read_cifar10(
            data_dir=data_dir,
            is_train=False,
            batch_size=BATCH_SIZE,
            shuffle=False)

    logits = VGG.VGG16N(tra_image_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, tra_label_batch)
    accuracy = tools.accuracy(logits, tra_label_batch)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run(
                [tra_image_batch, tra_label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

                summary_str = sess.run(summary_op)
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Exemple #22
0
]

for epoch in range(training_epochs):
    model.train()

    for meter in [train_accuracy, test_accuracy, train_loss, test_loss]:
        meter.reset()

    for index, (data_in, label) in enumerate(train_dataloader):
        output = model(data_in)
        loss = loss_fn(output, label)
        train_loss.update_with_weight(loss.item(), label.shape[0])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        top1 = accuracy(output, label)[0]
        train_accuracy.update_with_weight(top1, label.shape[0])

    model.eval()

    for data_in, label in test_dataloader:
        output = model(data_in)
        loss = loss_fn(output, label)
        test_loss.update_with_weight(loss.item(), label.shape[0])
        top1 = accuracy(output, label)[0]
        test_accuracy.update_with_weight(top1, label.shape[0])

    logger.info(" | ".join([
        f'Epoch[{epoch}/{training_epochs}]',
        f'Train Loss:{train_loss.avg: .3f}',
        f'Train Accuracy:{train_accuracy.avg: .3f}%',
def train(train_img, given_dataset_labels, noise_ratio, class_no = 0, random_seed=1, dataset='cifar10', batch_size=128, epochs=45,\
 test_data='cifar10_data/test_images.npy', test_lb='cifar10_data/test_labels.npy'):

    if dataset == 'cifar10':
        #synthesize alpha-increments or baseline
        noisy_lb = manipulate_labels(given_dataset_labels, dataset,
                                     noise_ratio, class_no, random_seed)

        make_determine()
        train_data = cifar10_train(train_img, noisy_lb,
                                   no_rc_transform_train(dataset),
                                   transform_target)
        train_loader = torch.utils.data.DataLoader(train_data,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=2,
                                                   drop_last=False)
        data_length = len(train_data)

        make_determine()
        test_data = cifar10_test(test_data, test_lb, transform_test(dataset),
                                 transform_target)
        test_loader = torch.utils.data.DataLoader(test_data,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=2,
                                                  drop_last=False)

        make_determine()
        model = cifar_10_CNN()
        model.fc_layer_2.train(False)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if (torch.cuda.device_count() > 1):
        model = nn.DataParallel(model)
    optimizer = optim.SGD(model.parameters(),
                          lr=0.01,
                          momentum=0.9,
                          weight_decay=1e-4)
    start_epoch = 0

    criterion = nn.CrossEntropyLoss()
    model = model.to(device)

    N = 1280
    no_LID_sequences = 50
    LID_file = dataset + '_size_' + str(data_length) + '_indices.csv'
    if not os.path.isfile(LID_file):
        with open(LID_file, 'a', newline='') as csvFile:
            writer = csv.writer(csvFile)
            for i in range(no_LID_sequences):
                random.seed(i)
                idx = random.sample(range(data_length), N)
                writer.writerow(np.array(idx))
        csvFile.close()
    N_indices = pd.read_csv(LID_file, header=None, index_col=None)
    if dataset == 'cifar10':
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=[40, 80],
                                                   gamma=0.1)
        data_points = {}
    for row in range(len(N_indices.index)):
        data_points[row] = np.array(N_indices.iloc[row]).astype(int)

    if noise_ratio[class_no] < 100:
        record_file = './record/' + str(dataset) + '_' + str(
            noise_ratio[class_no]) + '_seed_' + str(
                random_seed) + '_record.csv'  #for initialization
    else:
        record_file = './record/' + str(dataset) + '_bl_seed_' + str(
            random_seed) + '_record.csv'
    header = [
        'epoch', 'train loss', 'train acc', 'train time', 'test loss',
        'test acc', 'test time'
    ]
    with open(record_file, 'a', newline='') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerow(header)
    csvFile.close()

    for epoch in range(start_epoch, epochs):
        #print('--------epoch: {}/{}--------'.format(epoch, epochs))
        #training
        record = [
            int(epoch),
        ]
        train_acc = 0.0
        train_loss = 0.0
        train_data_len = 0
        model.train()
        tr_start_time = time.time()

        for i, (X_train, y_train) in enumerate(train_loader):
            X_train = X_train.to(device)
            y_train = y_train.to(device)
            train_data_len += y_train.size(0)
            predictions, _ = model(X_train)
            loss = criterion(predictions, y_train)
            acc = accuracy(predictions, y_train)
            train_acc += acc
            train_loss += loss.item()
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        record.extend([
            train_loss / float(train_data_len),
            train_acc / float(train_data_len),
            time.time() - tr_start_time
        ])

        #test
        if epoch % 20 == 0 or epoch == (epochs - 1):
            test_acc = 0.0
            test_loss = 0.0
            test_data_len = 0
            test_start_time = time.time()
            with torch.no_grad():
                model.eval()
                for i, (X_test, y_test) in enumerate(test_loader):
                    X_test = X_test.to(device)
                    y_test = y_test.to(device)
                    test_data_len += y_test.size(0)
                    predictions, _ = model(X_test)
                    loss = criterion(predictions, y_test)
                    acc = accuracy(predictions, y_test)
                    test_acc += acc
                    test_loss += loss.item()
            record.extend([
                test_loss / float(test_data_len),
                test_acc / float(test_data_len),
                time.time() - test_start_time
            ])

        with open(record_file, 'a', newline='') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerow(np.array(record))
        csvFile.close()

        scheduler.step()

        #at the end of each epoch, compute lid scores
        lid_sequences = []
        LID_path = os.path.join('./log', str(random_seed))
        if not os.path.exists(LID_path):
            os.makedirs(LID_path)
        final_file_name = LID_path + '/lid_%s_%s_%s.csv' % \
                        (dataset, random_seed, noise_ratio[0])

        for key in data_points:
            lids = []
            if dataset == 'cifar10':
                lid_data = cifar10_LID(train_img, data_points[key],
                                       transform_test(dataset))
                lid_loader = torch.utils.data.DataLoader(lid_data,
                                                         batch_size=len(
                                                             data_points[key]),
                                                         shuffle=True,
                                                         num_workers=2,
                                                         drop_last=False)
            model.train()

            for i, X_train in enumerate(lid_loader):
                X_train = X_train.to(device)
                with torch.no_grad():
                    _, X_act = model(X_train)
                    X_act = np.asarray(X_act.cpu().detach(),
                                       dtype=np.float32).reshape(
                                           (X_act.shape[0], -1))

            s = int(X_train.shape[0] / batch_size)
            for ss in range(s):
                lid_batch = np.zeros(shape=(batch_size, 1))
                lid_batch[:, 0] = mle_batch(
                    X_act[ss * batch_size:(ss + 1) * batch_size],
                    X_act[ss * batch_size:(ss + 1) * batch_size])
                lids.extend(lid_batch)
            lids = np.asarray(lids, dtype=np.float32)
            lid_sequences.append(np.mean(lids))

        with open(final_file_name, 'a', newline='') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerow(np.array(lid_sequences))
        csvFile.close()
Exemple #24
0
            coord.request_stop()

        coord.join(threads)


if __name__ == '__main__':
    with tf.Graph().as_default():

        with tf.device('/cpu:0'):
            train_batch, train_label_batch = cifar10_input.read_cifar10(
                data_path=data_path,
                is_train=True,
                batch_size=batch_size,
                shuffle=True)
            val_batch, val_label_batch = cifar10_input.read_cifar10(
                data_path=data_path,
                is_train=False,
                batch_size=batch_size,
                shuffle=False)

        xs = tf.placeholder(tf.float32, shape=[batch_size, img_h, img_w, 3])
        ys = tf.placeholder(tf.int32, shape=[batch_size, num_classes])
        keep_prob = tf.placeholder(tf.float32)

        model = VGG16(xs, num_classes, keep_prob)
        logits = model.logits

        loss = tools.losses(logits, ys)
        acc = tools.accuracy(logits, ys)
        train_op = tools.optimizer(loss, learning_rate)
        train_running()
Exemple #25
0
        confusion_matrix.add(output_conf, target_conf)
        loss1 = criterion1(output, lbl_batch.long())
        loss2 = criterion2(rec, img_batch.float())
        loss = tools.to_cuda(0.9 * loss1 + 0.1 * loss2)
        loss.backward()
        train_losses.append(loss.item())
        optimizer.step()

        if iter_ % 100 == 0:
            pred = np.argmax(output.data.cpu().numpy()[0], axis=0)
            gt = lbl_batch.data.cpu().numpy()[0]
            print(
                'Train (epoch {}/{}) [{}/{} ({:.0f}%)]\tLoss_SEGM: {:.6f}\tLoss_REC: {:.6f}\tAccuracy: {}'
                .format(epoch, epochs, i, len(mydataset),
                        100. * i / len(mydataset), loss1.item(), loss2.item(),
                        tools.accuracy(pred, gt)))
            train_acc = tools.accuracy(pred, gt)

        iter_ += 1
        del (img_batch, lbl_batch, loss)

    train_acc = (np.trace(confusion_matrix.conf) /
                 float(np.ndarray.sum(confusion_matrix.conf))) * 100
    print('TRAIN_LOSS: ', '%.3f' % np.mean(train_losses), 'TRAIN_ACC: ',
          '%.3f' % train_acc)
    confusion_matrix.reset()

    with torch.no_grad():
        model.eval()
        mydataset_val = DataLoader(val_dataset, batch_size=14, shuffle=True)
        val_losses = []
Exemple #26
0
def train():
    print('loding data............')

    #导入数据
    with tf.name_scope('input'):
        train, train_label, test, test_label = Process.get_data(
            train_path, test_path)
        train_batch, train_label_batch = Process.get_batch(
            train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        test_batch, test_label_batch = Process.get_batch(
            test, test_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

    print('loding batch_data complete.......')

    #创建placeholder作为输入和标签
    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASS])

    #定义模型
    logits = vgg.VGG16N(x, N_CLASS, IS_PRETRAIN)
    #定义损失
    loss = tools.loss(logits, y_)
    #计算准确率
    accuracy = tools.accuracy(logits, y_)
    #全局步骤
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    #梯度下降
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    #保存训练步骤
    saver = tf.train.Saver(tf.global_variables())
    #summary_op = tf.summary.merge_all()
    #全局变量初始操作
    init = tf.global_variables_initializer()
    #创建sess
    sess = tf.Session()
    #全局变量操作
    sess.run(init)
    #启动coord
    coord = tf.train.Coordinator()
    #启动队列
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #一些tensorboard的可视化操作,由于会出现问题,我先注释掉了
    #  tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    #  val_summary_writer = tf.summary.FileWriter(test_log_dir, sess.graph)

    print('all init has been done! start training')

    try:
        for step in np.arange(MAX_STEP):
            print('step + ' + str(step) + 'is now')
            if coord.should_stop():
                break
            #从队列中取batch
            tra_images, tra_labels = sess.run([train_batch, train_label_batch])
            #计算损失和准确率
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: tra_images,
                                                y_: tra_labels
                                            })

            #如果到达10步的倍数,打印在现在的batch_size上的训练准确率
            if step % 10 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, tra_loss, tra_acc))
            # summary_str = sess.run(summary_op)
            # tra_summary_writer.add_summary(summary_str, step)

            #如果步骤达到200的倍数,输入一些训练数据查看在训练集上的准确率
            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [test_batch, test_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

            #  summary_str = sess.run(summary_op)
            #   val_summary_writer.add_summary(summary_str, step)

            #如果步骤达到了2000步,保存当前点的数据
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
def train():
    pre_trained_weights = './vgg16_pretrain/vgg16.npy'
    train_data_dir = './data/train/scene_train_images_20170904/'
    train_label_json = './data/train/scene_train_annotations_20170904.json'
    val_data_dir = './data/val/scene_validation_images_20170908/'
    val_label_json = './data/val/scene_validation_annotations_20170908.json'
    train_log_dir = './logs/train/'
    val_log_dir = './logs/val/'

    with tf.name_scope('input'):

        tra_images, tra_labels = input_data.get_files(train_label_json,
                                                      train_data_dir)

        tra_image_batch, tra_label_batch = input_data.get_batch(
            tra_images, tra_labels, IMG_W, IMG_H, BATCH_SIZE, CAPACITY,
            N_CLASSES)

        val_images, val_labels = input_data.get_files(val_label_json,
                                                      val_data_dir)
        val_image_batch, val_label_batch = input_data.get_batch(
            val_images, val_labels, IMG_W, IMG_H, BATCH_SIZE, CAPACITY,
            N_CLASSES)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])
    keep_prob = tf.placeholder(tf.float32)

    # %%
    logits = VGG.VGG16N(x, N_CLASSES, keep_prob, IS_PRETRAIN)
    # #%%
    # import ResNet
    # resnet = ResNet.ResNet()
    # _, logits = resnet.build(x, N_CLASSES, last_layer_type="softmax")
    # #%%
    # import InceptionV4
    # inception = InceptionV4.InceptionModel(x, [BATCH_SIZE, IMG_W, IMG_H, 3], [BATCH_SIZE, N_CLASSES], keep_prob,
    #                                        ckpt_path='train_model/model', model_path='saved_model/model')
    # logits = inception.define_model()
    # print('shape{}'.format(logits.shape))
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    #    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # load the parameter file, assign the parameters, skip the specific layers
    # tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    #    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            train_images, train_labels = sess.run(
                [tra_image_batch, tra_label_batch])
            # print(str(train_images.get_shape()))
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={
                                                x: train_images,
                                                y_: train_labels,
                                                keep_prob: 0.2
                                            })
            if step % 50 == 0 or (step + 1) == MAX_STEP:
                #                _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                #                                                feed_dict={x: train_images, y_: train_labels})
                print('Step: %d, loss: %.3f, accuracy: %.3f%%' %
                      (step, tra_loss, tra_acc))
            # summary_str = sess.run(summary_op)
            #                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                validation_images, validation_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: validation_images,
                                                 y_: validation_labels,
                                                 keep_prob: 1
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

            # summary_str = sess.run(summary_op)
            #                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Exemple #28
0
def mytrain():
    # pre_trained_weights = './VGG16_pretrain/vgg16.npy'
    data_dir = '/content/data/'
    train_log_dir = './logs2/train/'
    val_log_dir = './logs2/val/'

    with tf.name_scope('input'):
        train_image_batch, train_label_batch = input_data.read_cifar10(
            data_dir, is_train=True, batch_size=BATCH_SIZE, shuffle=True)

        val_image_batch, val_label_batch = input_data.read_cifar10(
            data_dir, is_train=False, batch_size=BATCH_SIZE, shuffle=False)

    logits = VGG.Myvgg(train_image_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, train_label_batch)
    accuracy = tools.accuracy(logits, train_label_batch)
    my_global_step = tf.Variable(0, trainable=False, name='global_step')
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    x = tf.placeholder(dtype=tf.float32, shape=[BATCH_SIZE, IMG_H, IMG_W, 3])
    y_ = tf.placeholder(dtype=tf.int32, shape=[BATCH_SIZE, N_CLASSES])

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # load pretrain weights
    # tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    train_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            train_images, train_labels = sess.run(
                [train_image_batch, train_label_batch])
            #print(train_images.shape,train_labels)
            _, train_loss, train_accuracy = sess.run(
                [train_op, loss, accuracy],
                feed_dict={
                    x: train_images,
                    y_: train_labels
                })

            if step % 128 == 0 or (step + 1) == MAX_STEP:
                print("Step: %d, loss: %.8f, accuracy: %.4f%%" %
                      (step, train_loss, train_accuracy))

                summary_str = sess.run(summary_op)
                train_summary_writer.add_summary(summary_str, step)

            if step % 128 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_accuracy = sess.run([loss, accuracy],
                                                  feed_dict={
                                                      x: val_images,
                                                      y_: val_labels
                                                  })
                print("** Step: %d, loss: %.8f, test_accuracy: %.4f%%" %
                      (step, val_loss, val_accuracy))
                summary_str = sess.run(summary_op)
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, save_path=checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limited reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
def test(test_dir, checkpoint_dir='./checkpoint/'):
    import json
    # predict the result
    test_images = os.listdir(test_dir)
    features = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    labels = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])
    # one_hot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=80)
    # train_step, cross_entropy, logits, keep_prob = network.inference(features, one_hot_labels)
    resnet = ResNet.ResNet()
    _, logits = resnet.build(features, N_CLASSES, last_layer_type="softmax")
    loss = tools.loss(logits, labels)
    accuracy = tools.accuracy(logits, labels)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)
    values, indices = tf.nn.top_k(logits, 3)

    keep_prob = tf.placeholder(tf.float32)

    with tf.Session() as sess:
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print('Restore the model from checkpoint %s' %
                  ckpt.model_checkpoint_path)
            # Restores from checkpoint
            saver.restore(sess, ckpt.model_checkpoint_path)
            start_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        else:
            raise Exception('no checkpoint find')

        result = []
        test_imglist = []
        for test_image in test_images:
            test_imgpath = os.path.join(test_dir, test_image)
            test_imglist.append(test_imgpath)
        image = tf.cast(test_imglist, tf.string)

        # make a input queue
        input_queue = tf.train.slice_input_producer([image])

        image_contents = tf.read_file(input_queue[0])
        image = tf.image.decode_jpeg(image_contents, channels=3)

        #################################################
        # data agumentation should go to here
        #################################################
        image = tf.image.resize_image_with_crop_or_pad(image, IMG_W, IMG_H)

        image = tf.image.per_image_standardization(image)
        #    image_batch, label_batch = tf.train.batch([image, label],
        #                                              batch_size=batch_size,
        #                                              num_threads=64,
        #                                              capacity=capacipy)

        image_batch = tf.train.shuffle_batch([image],
                                             batch_size=1,
                                             num_threads=64,
                                             capacity=CAPACITY,
                                             min_after_dequeue=200)
        image_batch = tf.cast(image_batch, tf.float32)
        img = sess.run([image_batch])

        for i in range(len(img)):
            x = img[i]

            temp_dict = {}
            # x = scene_input.img_resize(os.path.join(test_dir, test_image), IMG_W)

            predictions = np.squeeze(sess.run(indices,
                                              feed_dict={
                                                  features:
                                                  np.expand_dims(x, axis=0),
                                                  keep_prob:
                                                  1
                                              }),
                                     axis=0)
            temp_dict['image_id'] = test_image
            temp_dict['label_id'] = predictions.tolist()
            result.append(temp_dict)
            print('image %s is %d,%d,%d' %
                  (test_image, predictions[0], predictions[1], predictions[2]))

        with open('submit.json', 'w') as f:
            json.dump(result, f)
            print('write result json, num is %d' % len(result))
Exemple #30
0
def train():
    data_dir = '/home/xinlong/Tensorflow_workspace/canjian_AlexNet/JPG/trainval/'
    train_log_dir = '/home/xinlong/Tensorflow_workspace/canjian_AlexNet/log/train/'
    val_log_dir = '/home/xinlong/Tensorflow_workspace/canjian_AlexNet/log/val/'

    with tf.name_scope('input'):
        train, train_label, val, val_label = input_trainval.get_files(data_dir, 0.2)
        train_batch, train_label_batch = input_trainval.get_batch(train, train_label,
                                                                  IMG_H, IMG_W,
                                                                  BATCH_SIZE,
                                                                  CAPACITY)
        val_batch, val_label_batch = input_trainval.get_batch(val, val_label,
                                                              IMG_W, IMG_H,
                                                              BATCH_SIZE,
                                                              CAPACITY)

        x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_H, IMG_W, 3])
        y_ = tf.placeholder(tf.int32, shape=[BATCH_SIZE])

        logits = model_structure.AlexNet(x, 5)
        loss = tools.loss('loss', y_, logits)
        accuracy = tools.accuracy('accuracy', y_, logits)

        my_global_step = tf.Variable(0, name='global_step', trainable=False)
        train_op = tools.optimize('optimize', loss, LEARNING_RATE, my_global_step) #??

        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        init = tf.initialize_all_variables()


        with tf.Session() as sess:
            sess.run(init)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            tra_summary_writer = tf.summary.FileWriter(train_log_dir,sess.graph)
            val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

            try:
                for step in np.arange(MAX_STEP):
                    if coord.should_stop():
                        break

                    tra_images, tra_labels = sess.run([train_batch, train_label_batch])

                    _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],feed_dict={x:tra_images, y_:tra_labels})

                    if step % 10 == 0 or (step + 1) == MAX_STEP:
                        print('Step: %d, loss: %.4f, accuracy: %.4f' %(step, tra_loss, tra_acc))

                        #summary_str = sess.run(summary_op)

                        #tra_summary_writer.add_summary(summary_str, step)
                        checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                        saver.save(sess, checkpoint_path, global_step=step)
                    
                    if step % 20 == 0 or (step + 1) == MAX_STEP:
                        valid_images, valid_labels = sess.run([val_batch, val_label_batch])
                        valid_loss, valid_acc = sess.run([loss, accuracy],
                                                         feed_dict={x:valid_images, y_:valid_labels})
                        print( '** step: %d,  loss: %.4f,  accuracy: %.4f' %(step, valid_loss, valid_acc))
                        #summary_str = sess.run(summary_op)
                        #val_summary_writer.add_summary(summary_str, step)


                    if step % 2000 == 0 or (step + 1) == MAX_STEP:
                        checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                        saver.save(sess, checkpoint_path, global_step=step)


            except tf.error.OutOfRangeError:
                print('Done training -- epoch limit reached')
            finally:
                coord.request_stop()

            coord.join(threads)
Exemple #31
0
def train():

    #    pre_trained_weights1 = './/vgg16.npy'
    pre_trained_weights = './/vgg-face.mat'
    data_dir = '/home/hadoop/Desktop/My-TensorFlow-tutorials-master/VGG face segmentation  recognition/data/segmentation/training/'
    train_log_dir = './/logss/train_shuffle/'
    val_log_dir = './/logss/va_shuffle/'

    #    image_batch, label_batch = notMNIST_input.read_and_decode(tfrecords_file,BATCH_SIZE)
    image, label = notMNIST_input.get_file(data_dir)
    #        image_batch,label_batch=notMNIST_input.get_batch(image, label, IMG_W, IMG_H, BATCH_SIZE, capacity)
    X = np.array(image)
    Y = np.array(label)
    kf = KFold(n_splits=10, shuffle=False)
    total_acc = 0
    for train, test in kf.split(X, Y):
        tf.reset_default_graph()
        image_batch, label_batch = notMNIST_input.get_batch(X[train],
                                                            Y[train],
                                                            IMG_W,
                                                            IMG_H,
                                                            BATCH_SIZE,
                                                            capacity,
                                                            shuffle=True)
        image_batch_validate, label_batch_validate = notMNIST_input.get_batch(
            X[test],
            Y[test],
            IMG_W,
            IMG_H,
            BATCH_SIZE,
            capacity,
            shuffle=False)
        #        print("dddd")
        ##        print("train_index: , test_index:", (X[train],Y[train],X[test],Y[test]))
        print("X[train]/n", len(X[train]))
        print("Y[train]/n", len(Y[train]))
        print("X[test]", len(X[test]))
        print("Y[test]", len(Y[test]))

        #cast (1.8,3.4)float32 to (1,3)int64

        x = tf.placeholder(tf.float32,
                           shape=[BATCH_SIZE, IMG_W, IMG_H, 3],
                           name='place_x')
        y_ = tf.placeholder(tf.int64, shape=[
            BATCH_SIZE,
        ], name='place_y')
        logits = VGG.VGG16N(x, N_CLASSES, IS_PRETRAIN)
        print("****logits shape is ", logits.shape)

        loss = tools.loss(logits, y_)

        print("label_batch is ", y_.shape)
        accuracy = tools.accuracy(logits, y_)

        my_global_step = tf.Variable(0, name='global_step', trainable=False)
        #learning_rate = tf.train.exponential_decay(starter_learning_rate, my_global_step,
        #  2200, 0.96, staircase=True)
        train_op = tools.optimize(loss, starter_learning_rate, my_global_step)
        #    train_op_vali = tools.optimize(loss_vali, learning_rate, my_global_step)

        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()

        sess = tf.Session()

        sess.run(init)

        # load the parameter file, assign the parameters, skip the specific layers
        tools.load_with_skip(pre_trained_weights, sess, ['fc6', 'fc7', 'fc8'])

        merged_summaries = tf.summary.merge_all()
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
        val_summary_writer = tf.summary.FileWriter(val_log_dir)
        max_acc = 0
        total_time = 0

        try:
            for step in np.arange(MAX_STEP):
                if coord.should_stop():
                    break
                start_time = time.time()
                #        with tf.Session() as sess:

                #                 for train, test in kf.split(X,Y):
                #                     image_batch,label_batch=notMNIST_input.get_batch(X[train], Y[train], IMG_W, IMG_H, BATCH_SIZE, capacity)
                #                     image_batch_validate, label_batch_validate=notMNIST_input.get_batch(X[test], Y[test], IMG_W, IMG_H, BATCH_SIZE, capacity)
                #                     label_batch = tf.cast(label_batch,dtype=tf.int64)
                x_train_a, y_train_a = sess.run([image_batch, label_batch])
                x_test_a, y_test_a = sess.run(
                    [image_batch_validate, label_batch_validate])
                #            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy])
                #            tra_images,tra_labels = sess.run([image_batch, label_batch])
                _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                                feed_dict={
                                                    x: x_train_a,
                                                    y_: y_train_a
                                                })

                if step % 10 == 0 or (step + 1) == MAX_STEP:
                    feed_dict = {x: x_train_a, y_: y_train_a}
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    tra_summary_writer.add_summary(summary_str, step)
                    time_elapsed = time.time() - start_time
                    print(
                        'Step:%d , loss: %.2f, accuracy: %.2f%%(%.2f sec/step)'
                        % (step, tra_loss, tra_acc * 100, time_elapsed))

                    total_time = total_time + time_elapsed
                    if step % 50 == 0:
                        print('total time is :%.2f' % (total_time))

                if step % 200 == 0 or (step + 1) == MAX_STEP:

                    val_loss, val_acc = sess.run([loss, accuracy],
                                                 feed_dict={
                                                     x: x_test_a,
                                                     y_: y_test_a
                                                 })
                    feed_dict = {x: x_test_a, y_: y_test_a}
                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    val_summary_writer.add_summary(summary_str, step)

                    #                if cur_val_loss > max_acc:
                    #                         max_acc = cur_val_loss
                    #                         best_step = step
                    #                         checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                    #                         saver.save(sess, checkpoint_path, global_step=step)
                    #                val_summary_writer.add_summary(summary, step)
                    #                print("Model updated and saved in file: %s" % checkpoint_path)
                    #                print ('*************step %5d: loss %.5f, acc %.5f --- loss val %0.5f, acc val %.5f************'%(best_step,tra_loss, tra_acc, cur_val_loss, cur_val_eval))

                    #

                    print(
                        '************validate result:Step:%d , loss: %.2f, accuracy: %.2f%%(%.2f sec/step)'
                        % (step, val_loss, val_acc * 100, time_elapsed))
                    if val_acc > max_acc:
                        max_acc = val_acc
                        checkpoint_path = os.path.join(train_log_dir,
                                                       'model.ckpt')
                        saver.save(sess, checkpoint_path, global_step=step)
            if max_acc > total_acc:
                total_acc = max_acc
                checkpoint_path = os.path.join(val_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()

        coord.join(threads)
        sess.close()
def train():
    
    pre_trained_weights = './/vgg16_pretrain//vgg16.npy'
    data_dir = './/data//cifar-10-batches-bin//'
    train_log_dir = './/logs//train//'
    val_log_dir = './/logs//val//'
    
    with tf.name_scope('input'):
        tra_image_batch, tra_label_batch = input_data.read_cifar10(data_dir=data_dir,
                                                 is_train=True,
                                                 batch_size= BATCH_SIZE,
                                                 shuffle=True)
        val_image_batch, val_label_batch = input_data.read_cifar10(data_dir=data_dir,
                                                 is_train=False,
                                                 batch_size= BATCH_SIZE,
                                                 shuffle=False)
    
    logits = VGG.VGG16N(tra_image_batch, N_CLASSES, IS_PRETRAIN)
    loss = tools.loss(logits, tra_label_batch)
    accuracy = tools.accuracy(logits, tra_label_batch)
    my_global_step = tf.Variable(0, name='global_step', trainable=False) 
    train_op = tools.optimize(loss, learning_rate, my_global_step)
    
    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])    
    
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()   
       
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    
    # load the parameter file, assign the parameters, skip the specific layers
    tools.load_with_skip(pre_trained_weights, sess, ['fc6','fc7','fc8'])   


    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)    
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
    
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                    break
                
            tra_images,tra_labels = sess.run([tra_image_batch, tra_label_batch])
            _, tra_loss, tra_acc = sess.run([train_op, loss, accuracy],
                                            feed_dict={x:tra_images, y_:tra_labels})            
            if step % 50 == 0 or (step + 1) == MAX_STEP:                 
                print ('Step: %d, loss: %.4f, accuracy: %.4f%%' % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                tra_summary_writer.add_summary(summary_str, step)
                
            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run([val_image_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={x:val_images,y_:val_labels})
                print('**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %(step, val_loss, val_acc))

                summary_str = sess.run(summary_op)
                val_summary_writer.add_summary(summary_str, step)
                    
            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
Exemple #33
0
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))

    # Optimizer.
    optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)

    # Predictions for the training, validation, and test data.
    train_prediction = tf.nn.softmax(logits)
    valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
    test_prediction = tf.nn.softmax(model(tf_test_dataset))

num_steps = 5001

t0 = time()
with tf.Session(graph=graph) as session:
    tf.initialize_all_variables().run()
    saver = tf.train.Saver()
    print('Initialized')
    for step in range(num_steps):
        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
        batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
        batch_labels = train_labels[offset:(offset + batch_size), :]
        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
        if (step % 50 == 0):
            print('Minibatch loss at step %d: %f' % (step, l))
            print('Minibatch accuracy: %.1f%%' % tools.accuracy(predictions, batch_labels))
            print('Validation accuracy: %.1f%%' % tools.accuracy(valid_prediction.eval(), valid_labels))
    print('Test accuracy: %.1f%%' % tools.accuracy(test_prediction.eval(), test_labels))
    saver.save(session, 'test-06', global_step=step)
t1 = time()
print('Time: ', (t1 - t0))