示例#1
0
    def run(self):
        # Data loading code
        print("Loading data")
        test_dir = self.config['Dataset']['TestPath']
        pred_result_dir = os.path.join(test_dir, 'pred_result_info')
        utils.mkdir(pred_result_dir)

        dataset_test = torchvision.datasets.ImageFolder(
            test_dir, transforms.Compose([transforms.ToTensor()]))
        test_sampler = torch.utils.data.SequentialSampler(dataset_test)

        data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                       batch_size=1,
                                                       sampler=test_sampler,
                                                       num_workers=1,
                                                       pin_memory=True)

        print("Creating model")
        model = getModels(model_name=self.config['Model']['Name'],
                          num_classes=self.config['Model']['NumClass'])
        model.to(self.device)

        checkpoint = torch.load(os.path.join(
            self.output_model_path, self.config['Misc']['BestModelName']),
                                map_location='cpu')
        model.load_state_dict(checkpoint['model'])
        correct_1 = 0.0
        model.eval()
        print(dataset_test.class_to_idx)
        with torch.no_grad():
            for i, (image, label) in enumerate(data_loader_test):
                image = image.to(self.device, non_blocking=True)
                label = label.to(self.device, non_blocking=True)
                output = model(image)

                _, pred = output.topk(1, 1, largest=True, sorted=True)
                label = label.view(label.size(0), -1).expand_as(pred)

                correct = pred.eq(label)
                correct_1 += correct[:, :1].sum()

                for key in dataset_test.class_to_idx:
                    if dataset_test.class_to_idx[key] == label:
                        label_info = key
                    if dataset_test.class_to_idx[key] == pred:
                        pred_info = key

                src_img = cv2.imread(dataset_test.imgs[i][0], 1)
                cv2.putText(src_img, 'label: {}'.format(label_info), (20, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1)
                cv2.putText(src_img, 'pred: {}'.format(pred_info), (20, 60),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 20), 1)
                if label_info != pred_info:
                    shutil.move(dataset_test.imgs[i][0], pred_result_dir)
                    # cv2.imwrite(os.path.join(pred_result_dir, '{}.png'.format(i)), src_img)
                cv2.imshow('img', src_img)
                cv2.waitKey(33)

        print("acc: ", correct_1 / len(data_loader_test.dataset))
示例#2
0
def export_as_tf():
    print "exporting"
    from keras import backend as K
    from keras.models import load_model
    autoencoder, encoder, decoder = getModels()
    autoencoder.load_weights(modelsPath + modelName)
    saver = tf.train.Saver()
    sess = K.get_session()
    save_path = saver.save(sess, "Models/tf_modi")
示例#3
0
def testModel(stream_output=False):
    # Create models
    print("Creating Autoencoder, Encoder and Generator...")
    autoencoder, encoder, decoder = getModels()

    # Load Autoencoder weights
    print("Loading weights...")
    autoencoder.load_weights(modelsPath + modelName)

    # Load dataset to test
    print("Loading dataset...")
    if "aug" in modelName:
        X_train, X_test, Y_train = loadDatasetAugmented(imSize)
    else:
        X_train, X_test, Y_train = loadDatasetLabelled(imSize)
    name_list = np.unique(Y_train)
    print(X_train.shape)
    #    print(name_list)
    # Visualization functions
    #visualizeReconstructedImages(X_train[:180],X_test[:20], autoencoder)
    #     computeTSNEProjectionOfPixelSpace(X_test[:1000], display=True)
    #     computeTSNEProjectionOfLatentSpace(X_train[:1000], encoder, display=True)
    lkst = likeliest
    scrub = 0
    seed = (id * 20) + gesture_selection[likeliest]
    #    seed = (id*20) + (likeliest*4)
    scrub_destination = seed + 4
    name = Y_train[seed]
    ri = getInterpolatedFrames(X_train[randint(0, X_train.shape[0])],
                               X_train[randint(0, X_train.shape[0])],
                               encoder,
                               decoder,
                               save=False,
                               nbSteps=nsteps)
    #    ri = getInterpolatedFrames(X_train[seed], X_train[scrub_destination], encoder, decoder, save=False, nbSteps=nsteps)
    #    return
    while likeliest == lkst:
        osc_server.recv(10)
        if (smudge_enabled):
            if (scrub_input.shape[0] > 0):
                frame_buffer = [ri[sc] for sc in scrub_input]
                img_out = layer_images(frame_buffer, smudge_amt)
            else:
                continue
        else:
            img_out = ri[int(math.floor(scrub_position * (nsteps - 1)))]
        if (stream_output):
            png_bytes = pyImageStreamer.get_jpeg_image_bytes(img_out)
            if len(cl) > 0: cl[-1].write_message(png_bytes, binary=True)
        else:
            cv2.imshow('Moving_Digits', img_out)
            cv2.waitKey(2)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                return
    testModel(stream_output)
示例#4
0
def testModel():
    # Create models
    print("Creating Autoencoder, Encoder and Generator...")
    autoencoder, encoder, decoder = getModels()

    # Load Autoencoder weights
    print("Loading weights...")
    autoencoder.load_weights(modelsPath + modelName)

    # Load dataset to test
    print("Loading dataset...")
    X_train, X_test = loadDataset()

    # Visualization functions
    visualizeReconstructedImages(X_train[:16],
                                 X_test[:16],
                                 autoencoder,
                                 save=True)
def testModel():
    # Create models
    print("Creating Autoencoder, Encoder and Generator...")
    autoencoder, encoder, decoder = getModels()

    # Load Autoencoder weights
    print("Loading weights...")
    autoencoder.load_weights(modelsPath+modelName)

    # Load dataset to test
    print("Loading dataset...")
    X_train, X_test = loadDataset()

    # Visualization functions
    #visualizeReconstructedImages(X_train[:16],X_test[:16], autoencoder)
    # computeTSNEProjectionOfPixelSpace(X_test[:1000], display=True)
    # computeTSNEProjectionOfLatentSpace(X_test[:1000], encoder, display=True)
    # while 1: visualizeInterpolation(X_test[randint(0,X_test.shape[0])], X_test[randint(0,X_test.shape[0])], encoder, decoder, save=False, nbSteps=5)
    while 1 :visualizeArithmetics(X_test[randint(0,X_test.shape[0])], X_test[randint(0,X_test.shape[0])], X_test[randint(0,X_test.shape[0])], encoder, decoder)
示例#6
0
    def __init__(self):
        # 加载配置文件
        self.config = utils.loadYaml('../config/config.yaml')
        # 训练结果保存路径
        self.output_model_path = os.path.join('../output/', self.config['Misc']['OutputFolderName'])
        # gpu使用
        self.device = utils.set_gpu(self.config)
        self.img_shape = (256, 256)
        self.overlap_piexl = 200
        self.imgs_index_dict, self.rois_start_xy_index_dict, self.rois_xyxy_index_dict = self.getRefInfo(
            ref_img_path='/home/pi/Desktop/df1b_dataset/20191024/ref_deploy')
        print(self.imgs_index_dict)
        print("Creating model")
        self.model = getModels(model_name=self.config['Model']['Name'],
                               num_classes=self.config['Model']['NumClass']).to(self.device)

        checkpoint = torch.load(os.path.join(self.output_model_path, self.config['Misc']['BestModelName']),
                                map_location='cpu')
        self.model.load_state_dict(checkpoint['model'])
        self.model.eval()
示例#7
0
def trainModel(startEpoch=0):
    # Create models
    print("Creating Autoencoder...")
    autoencoder, _, _ = getModels()
    autoencoder.compile(optimizer=RMSprop(lr=0.00025), loss="mse")

    # From which we start
    if startEpoch > 0:
        # Load Autoencoder weights
        print("Loading weights...")
        autoencoder.load_weights(modelsPath + modelName)

    print("Loading dataset...")
    X_train, X_test = loadDataset()

    # Compute number of batches
    nbBatch = int(X_train.shape[0] / batchSize)

    # Train the Autoencoder on dataset
    print(
        "Training Autoencoder for {} epochs with {} batches per epoch and {} samples per batch."
        .format(nbEpoch, nbBatch, batchSize))
    print("Run id: {}".format(runID))

    # Debug utils writer
    writer = tf.summary.FileWriter("/tmp/logs/" + runID)
    batchTimes = [0. for i in range(5)]

    # For each epoch
    for epoch in range(startEpoch, nbEpoch):
        # For each batch
        for batchIndex in range(nbBatch):
            batchStartTime = time.time()
            # Get batch
            X = X_train[batchIndex * batchSize:(batchIndex + 1) * batchSize]

            # Train on batch
            autoencoderLoss = autoencoder.train_on_batch(X, X)
            trainingSummary = tf.Summary.Value(
                tag="Loss", simple_value=float(autoencoderLoss))

            # Compute ETA
            batchTime = time.time() - batchStartTime
            batchTimes = batchTimes[1:] + [batchTime]
            eta = getETA(
                sum(batchTimes) / len(batchTimes), nbBatch, batchIndex,
                nbEpoch, epoch)

            # Save reconstructions on train/test samples
            if batchIndex % 2 == 0:
                visualizeReconstructedImages(X_train[:16],
                                             X_test[:16],
                                             autoencoder,
                                             save=True,
                                             label="{}_{}".format(
                                                 epoch, batchIndex))

            # Validation & Tensorboard Debug
            if batchIndex % 20 == 0:
                validationLoss = autoencoder.evaluate(X_test[:512],
                                                      X_test[:512],
                                                      batch_size=256,
                                                      verbose=0)
                validationSummary = tf.Summary.Value(
                    tag="Validation Loss", simple_value=float(validationLoss))
                summary = tf.Summary(
                    value=[trainingSummary, validationSummary])
                print(
                    "Epoch {}/{} - Batch {}/{} - Loss: {:.3f}/{:.3f} - ETA:".
                    format(epoch + 1, nbEpoch, batchIndex + 1, nbBatch,
                           autoencoderLoss, validationLoss), eta)
            else:
                print(
                    "Epoch {}/{} - Batch {}/{} - Loss: {:.3f} - ETA:".format(
                        epoch + 1, nbEpoch, batchIndex + 1, nbBatch,
                        autoencoderLoss), eta)
                summary = tf.Summary(value=[
                    trainingSummary,
                ])
            writer.add_summary(summary, epoch * nbBatch + batchIndex)

        #Save model every epoch
        print("Saving autoencoder...")
        autoencoder.save_weights(modelsPath + modelName, overwrite=True)
示例#8
0
    def run(self):

        dataset = Datalayer(self.config,
                            transform=tf.Compose([
                                tf.ToTensor(),
                            ]))
        data_loader = DataLoader(
            dataset=dataset,
            shuffle=True,
            batch_size=self.config['Dataset']['BatchSize'],
            num_workers=self.config['Dataset']['NumWorkers'],
            pin_memory=True)
        # 创建模型
        model = getModels(self.config['Model']['Name'],
                          num_classes=self.config['Model']['NumClass'],
                          pretrained=self.config['Model']['IsPretrained']).to(
                              self.device)
        # 创建损失函数
        criterion = getLossFuns()
        # 创建优化器
        optimizer = make_optimizer(cfg=self.config, model=model)
        lr_scheduler = GeneralLR_Scheduler(optimizer,
                                           self.config,
                                           max_iter=len(data_loader) *
                                           self.config['Dataset']['Epochs'])
        start_epoch = 0
        # 恢复训练
        if self.config['Model']['IsResume']:
            checkpoint = torch.load(os.path.join(
                self.output_model_path, self.config['Misc']['BestModelName']),
                                    map_location='cpu')
            model.load_state_dict(checkpoint['model'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
            start_epoch = checkpoint['epoch'] + 1
        # 开始训练
        print("Start training")
        for epoch in range(start_epoch, self.config['Dataset']['Epochs']):

            self.train_one_epoch(model,
                                 criterion,
                                 optimizer,
                                 lr_scheduler,
                                 data_loader,
                                 self.device,
                                 epoch,
                                 print_freq=10)

            if self.output_model_path:
                checkpoint = {
                    'model': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'epoch': epoch
                }

                utils.save_on_master(
                    checkpoint,
                    os.path.join(self.output_model_path,
                                 self.config['Misc']['BestModelName']))
                if epoch % self.config['Model']['OutputFreq'] == 0:
                    utils.save_on_master(
                        checkpoint,
                        os.path.join(self.output_model_path,
                                     'model_{}.pth'.format(epoch)))