Esempio n. 1
0
def main():
    config = arg_config()
    if config.run_type == 'train':
        print("-Loading dataset ...")
        DataSet = My_dataset('train')
        train_loader = DataLoader(dataset=DataSet,\
            shuffle=False, batch_size=config.batch_size,drop_last=True,collate_fn=collate_fn)
        Dev_DataSet = My_dataset('dev', voc=DataSet.voc)
        dev_loader = DataLoader(dataset=DataSet,\
            shuffle=False, batch_size=config.batch_size,drop_last=True,collate_fn=collate_fn)
        if config.model_type == 'gru':
            model = network.GRU_Encoder_Decoder(config, DataSet.voc)
            optimizer = optim.Adam(model.parameters(),
                                   lr=config.lr,
                                   betas=(0.9, 0.98),
                                   eps=1e-09)
        elif config.model_type == 'trans':
            model = network.Transformer(config, DataSet.voc.n_words)
            optimizer = ScheduledOptim(
                optim.Adam(model.parameters(), betas=(0.9, 0.98), eps=1e-09),
                config.lr, config.embedding_size, config.n_warmup_steps)
        if torch.cuda.is_available() and config.use_gpu:
            print('**work with solo-GPU **')
            network.Global_device = torch.device("cuda:0")
            model.to(network.Global_device)
        else:
            print('**work with CPU **')
        # for n,p in model.named_parameters(): print(n)
        model.train(train_loader, dev_loader, optimizer)
    else:
        test_model(config)
Esempio n. 2
0
def quoraTF_default(flags_path, tf_path, out_dir=None, init_embeddings=None):
    flags = read_flags(flags_path)
    num_epochs = flags.num_epochs
    evaluate_epochs = flags.evaluate_epochs

    for i in range(0, num_epochs, evaluate_epochs):

        # Train n epochs and then evaluate the system
        if not out_dir:
            out_dir = train_siamese_fromtf(tf_path,
                                           flags,
                                           evaluate_epochs,
                                           init_embeddings=init_embeddings)
        else:
            train_siamese_fromtf(tf_path,
                                 flags,
                                 evaluate_epochs,
                                 out_dir,
                                 init_embeddings=init_embeddings)

        # dev_step(tf_path, out_dir, flags_path, i)
    print(' -----------------> ', out_dir)
    copyfile(flags_path, join(out_dir, 'flags.config'))
    print(' -----------------> ', out_dir)
    test_model(tf_path, out_dir, flags_path)
    def train(self, inputX, targetY, epochs, valX, valY, X, Y):
        init_time = time.time()
        for epoch in range(epochs):
            start = time.time()
            total_loss = 0
            total_batch = inputX.shape[0] // self.batch_sz
            #print(total_batch)

            for batch in range(total_batch):
                index = batch * self.batch_sz
                input_ = inputX[index:index + self.batch_sz, :, :, :, :]
                target = targetY[index:index + self.batch_sz, :, :, :, :]

                # print(input_.shape, target.shape)

                batch_loss = self.__train_step(input_, target)
                total_loss += batch_loss

            # saving (checkpoint) the model every 25 epochs
            if epoch % 10 == 0:
                self.checkpoint.save(file_prefix=self.checkpoint_prefix)
                val_loss = self.evaluate(valX, valY)
                print('Epoch {} Evaluation Loss {:.4f}'.format(
                    epoch + 1, val_loss))
                # if epoch % 50 == 0:
                test_model(self, X, Y)
                if (time.time() - init_time) / 3600.0 > 8:
                    break

            total_batch += 1
            print('Epoch {} Loss {:.4f}'.format(epoch + 1,
                                                total_loss / total_batch))
            print('Time taken for 1 epoch {} sec\n'.format(time.time() -
                                                           start))
Esempio n. 4
0
def main():
    args = get_args()
    data = Data(args.train_path, n_gram=args.n_gram)
    classes = Classes(args.classes_path)
    training_data = train.TrainingData(data)
    model = train.train_classification_model(training_data, classes.number_of_classes, dimension=300, learning_rate=0.05, epoch=10)
    test.test_model(classes, training_data.g2i, model, args.test_path, n_gram=args.n_gram)
Esempio n. 5
0
def main():
    # Parse args and split data into training and testing
    print('Parsing args...')
    pssm_list, tm_align_list, fasta_list, pssm_dir, tm_align_dir, fasta_dir = parse_args()
    print('Splitting into test and training...')
    pssm_train = sample(pssm_list, int(0.75 * len(pssm_list)))
    pssm_test = [pssm for pssm in pssm_list if pssm not in pssm_train]

    # Train the model
    gradient_descent(pssm_train, pssm_dir, fasta_dir, tm_align_dir)

    # Test the model
    test_model(pssm_test, pssm_dir, fasta_dir, tm_align_dir)
Esempio n. 6
0
def main():
    #Get a new model, or load an existing one if possible.
    model, using_existing_model = get_model()
    #Train the model.
    training_datagen = get_image_generator("training")
    validation_datagen = get_image_generator("validation")
    model = train_model(model, using_existing_model, training_datagen,
                        validation_datagen)
    #Plot the training and validation loss for each training epoch.
    plot_loss()
    #Test the model.
    testing_datagen = get_image_generator("testing")
    test_model(model, testing_datagen)
def train_or_test(arglist):
    env = make_env(arglist)
    model = set_model(arglist)

    if arglist.model_exist:
        saver = tf.train.Saver()
        saver.restore(model.sess,
                      tf.train.latest_checkpoint(arglist.save_path))
        test_model(env, model, max_episode=100)
    else:
        train_model(env,
                    model,
                    arglist.save_path + arglist.model_name,
                    max_episode=arglist.max_episode)
Esempio n. 8
0
def main():
    
    X, Y = load_dataset("../input/nexraddata/", 'data.npy')
    model = EncoderDecoder(
        2,
        [64, 48], [(3, 3), (3, 3)],
        16,
        (X.shape[2], X.shape[3], X.shape[4]),
        './training_checkpoints'
    )
    # model.restore()
    model.train(X[:700], Y[:700], 400, X[700:800], Y[700:800])

    test_model(model, X, Y)
Esempio n. 9
0
def main():
    
    X, Y = load_dataset("../input/mnistreshape/", 'mnist-reshape.npy')

    # defines the model
    model = VideoPrediction(
        num_layers=5, d_model=64, num_heads=16, dff=128, filter_size=(3, 3),
        image_shape=X.shape[2:-1], pe_input=10, pe_target=10, out_channel=X.shape[-1],
        loss_function='bin_cross'
    )

    # training on first 1000 samples
    model.train(X[:1000, :5], X[:1000, 5:], X, Y, 100, 8)

    test_model(model, X, Y, 8)
Esempio n. 10
0
def run_test_before_finetune():
    log_path = "log_test_before_finetune"

    model = torchvision.models.vgg16_bn(True)

    if torch.cuda.is_available():
        model.cuda()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    acc = 1

    while acc > 0.5:

        with torch.no_grad():
            for param in model.parameters():
                param.add_(torch.randn(param.size()) * 0.001)

        config1 = ConfigTestImagenet()

        test_dataloader1 = config1.test_loader_imagenet

        acc = test_model(model, test_dataloader1, log_path, device, False, 224)

    torch.save(model, "modified_model")
Esempio n. 11
0
def main():
    fix_random_seed(RANDOM_SEED)
    models = [('vgg', vgg16()), ('resnet18', resnet18()),
              ('resnet50', resnet50())]
    remove_parentheses('/home/m_ulyanov/data/splits/PKLot',
                       ['all.txt', 'train.txt', 'test.txt', 'val.txt'])

    for model_name, model in models:
        model = model.to(device)
        criterion = nn.CrossEntropyLoss()
        optimizer_ft = SGD(model.parameters(), lr=0.001, momentum=0.9)
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                               step_size=7,
                                               gamma=0.1)

        # train annotation file, val annotation file, full annotation file, base data dir
        cross_val_sets = [
            (TRAIN_CNRPARK_EXTRA_ANNOTATION, VAL_CNRPARK_EXTRA_ANNOTATION,
             TRAIN_CNRPARK_EXTRA_ANNOTATION, CNRPARK_EXTRA_DATA_DIR),
            (TRAIN_PKLOT_ANNOTATION, VAL_PKLOT_ANNOTATION,
             TRAIN_PKLOT_ANNOTATION, PKLOT_DATA_DIR)
        ]
        for index, (first_set,
                    second_set) in enumerate(permutations(cross_val_sets)):
            x_train, y_train = extract_annotation_file(first_set[0],
                                                       first_set[-1])
            x_val, y_val = extract_annotation_file(first_set[1], first_set[-1])

            plot_name = f'./logs/{model_name}_{index}.png'
            logs_path = f'./logs/{model_name}_{index}.txt'
            trained_model = train_model(model,
                                        criterion,
                                        optimizer_ft,
                                        exp_lr_scheduler,
                                        x_train,
                                        y_train,
                                        x_val,
                                        y_val,
                                        plot_path=plot_name,
                                        save=False,
                                        num_epochs=7,
                                        log_path=logs_path)

            x_test, y_test = extract_annotation_file(second_set[2],
                                                     second_set[-1])
            test_log_path = f'./logs/{model_name}_{index}.csv'
            test_model(trained_model, x_test, y_test, log_path=test_log_path)
Esempio n. 12
0
def erase_masks(fpath):
	x, im = downsize_file(fpath)
	masks = get_masks(x, im)
	if masks.ndim == 3:
		compiled_mask = np.amax(masks, axis=0)
	else:
		compiled_mask = masks
	compiled_mask = expand_masks(compiled_mask, 21) #convolve with a 11 x 11 kernel to expand masks for inpainting
	compiled_mask = np.array([compiled_mask for _ in range(3)])
	compiled_mask = np.moveaxis(compiled_mask, 0, -1)
	#compiled_mask = compiled_mask * 255. / np.amax(compiled_mask)
	compiled_mask = compiled_mask.astype(int)

	print(compiled_mask.shape)
	print(im.shape)
	cv2.imwrite("mask.png", compiled_mask)
	test_model(im, compiled_mask)
Esempio n. 13
0
def main():
    """

    Returns
    -------

    """
    args = parse_cmd_args()

    df_train = pd.read_csv(filepath_or_buffer=args.train, header=None)
    df_test = pd.read_csv(filepath_or_buffer=args.eval, header=None)

    with open(args.config, 'rt') as ipf:
        indices = json.load(ipf)
    id_indices = eval(indices['id_indices'])
    x_indices = eval(indices['x_indices'])
    y_indices = eval(indices['y_indices'])

    X_train = df_train.iloc[:, x_indices].values
    y_train = df_train.iloc[:, y_indices].values
    X_test = df_test.iloc[:, x_indices].values
    y_test = df_test.iloc[:, y_indices].values
    ids_test = df_test.iloc[:, id_indices]

    final_model = train_model(
        X_train,
        y_train,
        model_type='regressor',
        cv_fold=5
    )

    # final_model = train_dropout_nn(
    #     X_train,
    #     y_train,
    #     model_type='regressor',
    #     cv_fold=5
    # )

    predictions = test_model(X_test, y_test, model=final_model)
    output = ids_test.assign(target=y_test, pred=predictions)

    with open(args.output, 'wt') as opf:
        output.to_csv(opf, float_format='%.2f', index=False, header=False)

    # refit the final model to the whole data set
    X_whole = np.concatenate((X_train, X_test))
    y_whole = np.concatenate((y_train, y_test))
    final_model.fit(X_whole, y_whole)

    # write final model to disk
    with open(args.model, 'wb') as opf:
        pickle.dump(final_model, opf)
Esempio n. 14
0
def main(args):
    config = parse_config(args.config)

    out_dir = Path(
        config['training']['save_dir']) / config['training']['save_context']
    out_dir.mkdir(exist_ok=True, parents=True)
    config_out = out_dir / f"{config['training']['save_context']}_config.yaml"
    with config_out.open('w') as wf:
        yaml.dump(config, wf)
    print(f'Training config saved to {config_out}.')

    tb_logdir = out_dir / 'logdir'
    writer = SummaryWriter(log_dir=tb_logdir)

    model = build_model_from_config(config)
    dataloaders, classes = get_data_loaders_from_config(config)
    if config['datasets']['viz']:
        viz_to_tb(dataloaders['train'], writer,
                  config['datasets']['classes']['num_classes'])
    losses = get_train_val_losses()
    optimizer = get_optim_from_config(model.parameters(), config)
    scheduler = get_scheduler_from_config(optimizer, config)

    model, best_acc, best_loss, best_epoch, total_epoch = train_model(
        model, dataloaders, losses, optimizer, scheduler, writer, config)

    weights_dir = out_dir / 'weights'
    weights_dir.mkdir(parents=True, exist_ok=True)
    save_path = weights_dir / f"{config['training']['save_context']}_bestval_loss{best_loss:0.3f}_acc{best_acc:0.3f}_ep{best_epoch}of{total_epoch}.pth"
    torch.save(model.state_dict(), save_path)
    print(f'Best val weights saved to {save_path}')

    conf_mat, report, _, _, _ = test_model(model,
                                           dataloaders['test'],
                                           config,
                                           classes=classes)

    test_dir = out_dir / 'test'
    test_dir.mkdir(exist_ok=True, parents=True)
    test_out = test_dir / f"{config['training']['save_context']}_clfreport.log"
    with test_out.open('w') as wf:
        wf.write(report)

    sn_plot = sn.heatmap(conf_mat,
                         annot=True,
                         fmt='g',
                         xticklabels=classes,
                         yticklabels=classes)
    test_out_cm = test_dir / f"{config['training']['save_context']}_confmat.jpg"
    sn_plot.get_figure().savefig(test_out_cm)
Esempio n. 15
0
def main():

    inp = load_dataset("../input/ucf101bas20/", 'ucf_reshaped_bas_20.npy')
    X = tf.concat([inp[:300, :5], inp[:300, 10:15]], axis=0)
    Y = tf.concat([inp[:300, 5:10], inp[:300, 15:]], axis=0)
    print(X.shape, Y.shape)

    testX = inp[:, :10]
    testY = inp[:, 10:15]
    print(testX.shape, testY.shape)

    # defines the model
    model = VideoPrediction(num_layers=5,
                            d_model=128,
                            num_heads=16,
                            dff=128,
                            filter_size=(3, 3),
                            image_shape=X.shape[2:-1],
                            pe_input=10,
                            pe_target=10,
                            out_channel=X.shape[-1])

    model.train(X, Y, testX, testY, 125, 8)
    test_model(model, testX, testY, 8)
Esempio n. 16
0
def main():
    names, races = load_data(sys.argv[1])
    X = featurize(names)
    y = races

    Xtr, Xte, ytr, yte = train_test_split(
        X, y, test_size=0.33, random_state=RANDOM_STATE
    )
    clf = RandomForestClassifier(
        n_estimators=100, min_samples_split=2, random_state=RANDOM_STATE
    )

    clf.fit(Xtr, ytr)
    serialize_model("model", clf)

    score = test_model(clf, (Xte, yte))
    print(f"Score: {score}")
    top_features = np.argsort(clf.feature_importances_)[::-1][:10]
    print(f"Key feature ids: {top_features}")
def test_acc_matches_stored_test_acc():
    results_path = '../results'
    batch_size = 32
    img_class_map = [[0, 3, 4, 5, 6, 7, 8], [1,2]]
    output_size = len(img_class_map)
    norm_params = (126.04022903600975, 29.063149797089494)

    model_manager = ModelsManager(results_path)

    dataset = dataset_real()

    model_type = 'mini'
    model_id = 'test_mini'
    model_params = {'norm_params': norm_params,
                    'output_size': output_size,
                    'lr': 0.01}

    model_manager.new_model(model_type,
                            model_id,
                            **model_params,
                            )

    model_class = model_manager.get_model(model_id)

    input_shape = model_class.input_shape

    iterations_per_epoch=256
    max_epochs=2

    train_model(dataset,
                model_class,
                batch_size,
                iterations_per_epoch,
                max_epochs,
                avg_grad_stop=False,
                )

    epochs, train_accs, test_accs = model_class.session_stats()

    for i, epoch in enumerate(epochs):
        epoch_model = model_class.load_model(epoch)
        test_acc = test_model(dataset, epoch_model)
        print('test_acc:', test_accs[i], test_acc)
Esempio n. 18
0
def test_endpoint():
    testFiles = request.files.getlist("testFiles")

    name = request.form['model']

    if not os.path.exists(name):
        os.mkdir(name)

    if not os.path.exists(name + '/test'):
        os.mkdir(name + '/test')

    for i, file in enumerate(testFiles):
        file.save(
            os.path.join(
                name + '/test',
                'test.' + str(i + 1) + '.' + file.mimetype.split('/')[1]))

    SVM_result, MLP_result, MV5_result = test.test_model(name)

    return jsonify({'svm': SVM_result, 'mlp': MLP_result, 'mv5': MV5_result})
Esempio n. 19
0
def getCodeStr(driver):
    # driver = IEdriverbrowser()
    '''
    读取验证码文件,生成验证码
    :param picPath:
    :return:
    '''
    picDir = os.path.abspath('.') + r'\Utils\captcha_recognition2\data'
    screenPicPath = picDir + r'\screen.png'              #屏幕截图路径
    codePicPath = picDir + r'\CSCA.png'     #验证码截图路径
    print(screenPicPath)
    print(codePicPath)
    #driver.maximize_window()
    isget = driver.get_screenshot_as_file(screenPicPath)

    if isget:
        locationStr = 'image'
        element = driver.find_element_by_id(locationStr)

        pagePic = Image.open(screenPicPath)  # 读取图片
        elementLocation = element.location  # 元素位置
        elementSize = element.size  # 元素尺寸
        x_start = elementLocation['x']
        y_start = elementLocation['y']
        x_end = x_start + elementSize['width']
        y_end = y_start + elementSize['height']
        elementPic = pagePic.crop((x_start, y_start, x_end, y_end))  # 截取元素图片
        if os.path.exists(codePicPath):
            os.remove(codePicPath)
        elementPic.save(codePicPath) #重写验证码图片

        if os.path.exists(screenPicPath):  #清除屏幕截图
            os.remove(screenPicPath)

        # 生成验证码字符串
        codeStr = test.test_model(use_gpu=False, verification=True, extend_format='png', folder=codePicPath)
        return codeStr
    else:
        print('未获取到屏幕截图')
Esempio n. 20
0
def main():
    args = parser.parse_args()
    # REPRODUCIBILITY
    torch.manual_seed(0)
    np.random.seed(0)

    if args.debug:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    # Retrieve views candidates and right number of views
    if args.case == '1':
        args.vcand = np.load('view_candidates/vcand_case1.npy')
        args.nview = 12
    elif args.case == '2':
        args.vcand = np.load('view_candidates/vcand_case2.npy')
        args.nview = 20
    elif args.case == '3':
        args.vcand = np.load('view_candidates/vcand_case3.npy')
        args.nview = 160

    # Names for the saved checkpoints
    args.fname_best = 'rotationnet{}_model_best{}.pth.tar'.format(args.nview,
                                                                  datetime.now().strftime("%d_%b_%Y_%H_%M_%S"))
    args.fname = 'rotationnet{}_model{}.pth.tar'.format(args.nview, datetime.now().strftime("%d_%b_%Y_%H_%M_%S"))

    logger.debug("Number of view candidates: {}".format(np.shape(args.vcand)[0]))
    logger.debug("Number of views: {}".format(args.nview))

    if torch.cuda.is_available():
        args.device = torch.device('cuda')
    else:
        args.device = torch.device('cpu')
    logger.debug("PyTorch is using  {}".format(args.device))

    # Mini batch size is used to do an update of the gradient so it need to be divisible by the number of views
    # otherwise one or more classification are not complete
    if args.batch_size % args.nview != 0:
        logger.error('Batch size should be multiplication of the number of views, {}'.format(args.nview))
        exit(1)

    # Get number of classes
    logger.debug("Number of classes: {}".format(args.num_classes))

    # Create RotationNet model based on the given architecture.
    # The output size is (num_classes + wrong_view class) * the number of views
    model = RotationNet(args.arch, args.pretrained, (args.num_classes + 1) * args.nview, args.feature_extraction,
                        args.depth)

    # Multi GPUs
    if torch.cuda.device_count() > 1:
        logger.debug("Using multiple GPUs")
        model = torch.nn.DataParallel(model)
    # Send model to GPU or keep it to the CPU
    model = model.to(device=args.device)

    # Define loss function (criterion) and optimizer
    # Sending loss to cuda is unnecessary because loss function is not stateful
    # TODO test if it works without sending loss to GPU
    criterion = nn.CrossEntropyLoss().to(device=args.device)

    if args.optimizer == "ADAM":
        optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), args.learning_rate,
                         weight_decay=args.weight_decay)
    elif args.optimizer == "ADAGRAD":
        optimizer = Adagrad(filter(lambda p: p.requires_grad, model.parameters()), args.learning_rate,
                            weight_decay=args.weight_decay)
    else:
        # If we use feature extraction (features weights are frozen), we need to keep only differentiable params
        optimizer = SGD(filter(lambda p: p.requires_grad, model.parameters()), args.learning_rate,
                        momentum=args.momentum, weight_decay=args.weight_decay)

    # https://stackoverflow.com/questions/58961768/set-torch-backends-cudnn-benchmark-true-or-not
    # some boost when the network do not change
    # useless because cluster do not have cudnn
    # cudnn.benchmark = True

    logger.info("Model args: {}".format(args))

    if args.train_type == 'k-fold':
        logger.debug("K-fold training")
        train_k_fold(model, criterion, optimizer, args)
    elif args.train_type == 'hold-out':
        logger.debug("Hold-out training")
        train_hold_out(model, criterion, optimizer, args)
    elif args.train_type == 'full':
        logger.debug("Full training")
        train_all(model, criterion, optimizer, args)
    elif args.train_type == 'evaluate':
        logger.debug("Start evaluation on test set")
        test_model(model, criterion, args)
    elif args.train_type == 'aligned':
        logger.debug("Holt-out training on aligned set")
        train_hold_out_aligned(model, criterion,optimizer, args)
    elif args.train_type == "test":
        logger.debug("Start real time test")
        threshold_evaluation(model, args)
Esempio n. 21
0
def mode_def(mode, device, PATH, class_names):

    #Loading transformed dataset
    image_datasets, dataloaders, dataset_sizes = data_loader()

    if mode == 'train' or mode == 'test':

        if config.loaded_model == None:
            # Loading pretrained Resnet151 model
            model = transfer_learn()

        # If a loaded model exists
        elif config.model_ft_mode == None:
            model = config.loaded_model

        if mode == 'train':
            config.model_ft_mode, config.optimizer_ft_mode = train(
                dataloaders, dataset_sizes, model, device)
            print(
                "Training completed.... Do you want to save the model now. Y to save N to skip"
            )
            choice = str(input("Enter your Choice: Y or N\t "))

            if choice == 'Y' or 'y':
                #Mapping dataset classes from our dataset to model
                model.category_index = image_datasets['train'].class_to_idx
                # Saving Model
                save_model(PATH, config.model_ft_mode,
                           config.optimizer_ft_mode)
                config.flag = 1
                print("Model Saved")

        elif mode == 'test':

            if config.model_ft_mode:
                test_model(config.model_ft_mode, dataloaders, device)
                print("Testing is complete")

            elif config.loaded_model:
                test_model(config.loaded_model, dataloaders, device)
                print("Testing is complete")

    elif mode == 'save':

        if config.flag == 1:
            print("The model is already saved... Do you want to save again?")
            choice = str(
                input("Enter your choice Y to save again or N to skip"))
            if choice == 'Y' or 'y':
                model.category_index = image_datasets['train'].class_to_idx
                save_model(PATH, config.model_ft_mode,
                           config.optimizer_ft_mode)
                config.flag = 1
                print("Model Saved")

        elif path.exists(PATH):
            print("A saved model already exists")

        else:
            model.category_index = image_datasets['train'].class_to_idx
            save_model(PATH, config.model_ft_mode, config.optimizer_ft_mode)
            config.flag = 1
            print("Model Saved")

    elif mode == 'load':
        #Loading the saved model
        config.loaded_model, config.category_index = load_checkpoint(
            PATH, device)
        if config.loaded_model == None and config.category_index == None:
            print("Model not found, so cannot be loaded")
        else:
            #Fetching classification
            config.classifications = {
                values: key
                for key, values in config.category_index.items()
            }  #swapping key and values for the classes
            print("Loading model is complete")

    elif mode == 'inference':
        errorinf = {
            "loaded_model": "None",
            "class_names": "None",
            "config.classifications": "None"
        }
        for error in errorinf.keys():
            if errorinf[error] == None:
                print(error + " Not Declared properly")

        if ((config.loaded_model != None) and (class_names != None)
                and (config.classifications != None)):
            inference(config.loaded_model, class_names, config.classifications,
                      device)

    else:
        print("Wrong Choice")
Esempio n. 22
0
def main():
    global args
    global mse_policy
    parser = define_args()
    args = parser.parse_args()
    if not args.end_to_end:
        assert args.pretrained == False
    if args.clas:
        assert args.nclasses == 4
    if args.val_batch_size is None:
        args.val_batch_size = args.batch_size
    

    # Check GPU availability
    if not args.no_cuda and not torch.cuda.is_available():
        raise Exception("No gpu available for usage")
    torch.backends.cudnn.benchmark = args.cudnn

    # Define save path
    save_id = 'Mod_{}_opt_{}_loss_{}_lr_{}_batch_{}_end2end_{}_chol_{}_lanes_{}_pretrain{}_clas{}_mask{}_flip_on{}_activation_{}' \
            .format(args.mod, args.optimizer,
                    args.loss_policy,
                    args.learning_rate,
                    args.batch_size,
                    args.end_to_end,
                    args.use_cholesky,
                    args.nclasses,
                    args.pretrained,
                    args.clas,
                    args.mask_percentage,
                    args.flip_on,
                    args.activation_layer)
    

    train_loader, valid_loader, valid_idx = get_loader(args.num_train,
                                                       args.json_file, 'Labels/lanes_ordered.json',
                                                       args.image_dir, 
                                                       args.gt_dir,
                                                       args.flip_on, args.batch_size, args.val_batch_size,
                                                       shuffle=True, num_workers=args.nworkers,
                                                       end_to_end=args.end_to_end,
                                                       resize=args.resize,
                                                       nclasses=args.nclasses,
                                                       split_percentage=args.split_percentage)

    test_loader = get_testloader(args.test_dir, args.val_batch_size, args.nworkers)

    # Define network
    model = Net(args)
    define_init_weights(model, args.weight_init)

    if not args.no_cuda:
        # Load model on gpu before passing params to optimizer
        model = model.cuda()

    # Define optimizer and scheduler
    optimizer = define_optim(args.optimizer, model.parameters(),
                             args.learning_rate, args.weight_decay)
    scheduler = define_scheduler(optimizer, args)


    # Define loss criteria for multiple tasks
    criterion, criterion_seg = define_loss_crit(args)
    criterion_horizon = nn.BCEWithLogitsLoss().cuda()
    criterion_line_class = nn.BCEWithLogitsLoss().cuda()

    # Name
    global crit_string
    if args.loss_policy == 'area' and args.end_to_end:
        crit_string = 'AREA**2' 
    elif args.loss_policy == 'backproject' and args.end_to_end:
        crit_string = 'MSE' 
    else:
        crit_string = 'ENTROPY' 
    if args.clas:
        crit_string = 'TOT LOSS' 

    # Logging setup
    best_epoch = 0
    lowest_loss = np.inf
    losses_valid = np.inf
    highest_score = 0
    log_file_name = 'log_train_start_0.txt'
    args.save_path = os.path.join(args.save_path, save_id)
    mkdir_if_missing(args.save_path)
    mkdir_if_missing(os.path.join(args.save_path, 'example/'))
    mkdir_if_missing(os.path.join(args.save_path, 'example/train'))
    mkdir_if_missing(os.path.join(args.save_path, 'example/valid'))
    mkdir_if_missing(os.path.join(args.save_path, 'example/pretrain'))
    mkdir_if_missing(os.path.join(args.save_path, 'example/testset'))

    # Computes the file with lane data of the validation set
    validation_set_path = os.path.join(args.save_path , 'validation_set.json')
    load_valid_set_file_all(valid_idx, validation_set_path, args.image_dir) 
    global valid_set_labels
    global val_set_path
    global ls_result_path
    valid_set_labels = [json.loads(line) for line in open(validation_set_path).readlines()]
    val_set_path = os.path.join(args.save_path, 'validation_set_dst.json')
    ls_result_path = os.path.join(args.save_path, 'ls_result.json')

    # Train, evaluate or resume
    args.resume = first_run(args.save_path)
    if args.resume and not args.test_mode and not args.evaluate:
        path = os.path.join(args.save_path, 'checkpoint_model_epoch_{}.pth.tar'.format(
            int(args.resume)))
        if os.path.isfile(path):
            log_file_name = 'log_train_start_{}.txt'.format(args.resume)
            # Redirect stdout
            sys.stdout = Logger(os.path.join(args.save_path, log_file_name))
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(path)
            args.start_epoch = checkpoint['epoch']
            lowest_loss = checkpoint['loss']
            best_epoch = checkpoint['best epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            log_file_name = 'log_train_start_0.txt'
            # Redirect stdout
            sys.stdout = Logger(os.path.join(args.save_path, log_file_name))
            print("=> no checkpoint found at '{}'".format(path))

    # Only evaluate
    elif args.evaluate:
        skip = get_flags()
        files = glob.glob(os.path.join(args.save_path, 'model_best*'))
        if len(files) == 0:
            print('No checkpoint found!')
        else:
            best_file_name = files[0]
            if os.path.isfile(best_file_name):
                sys.stdout = Logger(os.path.join(args.save_path, 'Evaluate.txt'))
                print("=> loading checkpoint '{}'".format(best_file_name))
                checkpoint = torch.load(best_file_name)
                model.load_state_dict(checkpoint['state_dict'])
            else:
                print("=> no checkpoint found at '{}'".format(best_file_name))

        # validate(valid_loader, model, criterion, criterion_seg, 
                # criterion_line_class, criterion_horizon)

        if args.clas:
            test_model(test_loader, model, 
                       criterion, 
                       criterion_seg, 
                       criterion_line_class, 
                       criterion_horizon, args)
        return

    # Start training from clean slate
    else:
        # Redirect stdout
        sys.stdout = Logger(os.path.join(args.save_path, log_file_name))

    # INIT MODEL
    print(40*"="+"\nArgs:{}\n".format(args)+40*"=")
    print("Init model: '{}'".format(args.mod))
    print("Number of parameters in model {} is {:.3f}M".format(
        args.mod.upper(), sum(tensor.numel() for tensor in model.parameters())/1e6))

    # Define activation for classification branch
    if args.clas:
        Sigm = nn.Sigmoid()

    # Start training and validation for nepochs
    for epoch in range(args.start_epoch, args.nepochs):
        print("\n => Start train set for EPOCH {}".format(epoch + 1))
        print("Saving to: ", args.save_path)
        # Adjust learning rate
        if args.lr_policy is not None and args.lr_policy != 'plateau':
            scheduler.step()
            lr = optimizer.param_groups[0]['lr']
            print('lr is set to {}'.format(lr))

        skip = get_flags(epoch)

        # Define container objects to keep track of multiple losses/metrics
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()
        rmse_metric = AverageMeter()
        losses_skip = AverageMeter()

        # Specfiy operation modus
        model.train()

        # compute timing
        end = time.time()

        # Start training loop
        for i, (input, gt, lanes, idx, gt_line, gt_horizon, valid_points) in tqdm(enumerate(train_loader)):
            # Time dataloader
            data_time.update(time.time() - end)

            # Reset coordinates
            x_cal0, x_cal1, x_cal2, x_cal3 = [None]*4

            # Put inputs on gpu if possible
            if not args.no_cuda:
                input, lanes = input.cuda(), lanes.cuda()
                valid_points = valid_points.cuda()
                gt = gt.cuda().squeeze(1)
            assert lanes.size(1) == 4
            gt0, gt1, gt2, gt3 = lanes[:, 0, :], lanes[:, 1, :], lanes[:, 2, :], lanes[:, 3, :]

            # Skip LSQ layer to make sure matrix cannot be singular
            # TODO check if this is really necessary
            if skip:
                output_net = model(input, gt_line, args.end_to_end, early_return=True)
                loss = criterion_seg(output_net, gt)
                # Setup backward pass
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                losses_skip.update(loss.item(), input.size(0))
                # Plot
                if (i + 1) % args.save_freq == 0:
                    img = input[0].permute(1, 2, 0).data.cpu().numpy()
                    gt_orig = gt[0].data.cpu().numpy()
                    _, out = torch.max(output_net[0], dim=0)
                    out = out.data.cpu().numpy()
                    img = np.clip(img, 0, 1)
                    fig = plt.figure()
                    ax1 = fig.add_subplot(311)
                    ax2 = fig.add_subplot(312)
                    ax3 = fig.add_subplot(313)
                    ax1.imshow(img)
                    ax2.imshow(gt_orig)
                    ax3.imshow(out)
                    fig.savefig(args.save_path + '/example/pretrain/idx-{}_batch-{}'.format(0, i))
                    plt.clf()
                    plt.close(fig)

                # Skip rest
                continue

            # Run model
            try:
                beta0, beta1, beta2, beta3, weightmap_zeros, \
                output_net, outputs_line, outputs_horizon, output_seg = model(input, gt_line, args.end_to_end, gt=gt)
            except RuntimeError as e:
                print("Batch with idx {} skipped due to singular matrix".format(idx.numpy()))
                print(e)
                continue

            # Compute losses on parameters or on segmentation
            if args.end_to_end:
                loss_left, x_cal0 = criterion(beta0, gt0, valid_points[:, 0])
                loss_right, x_cal1 = criterion(beta1, gt1, valid_points[:, 1])
                if args.nclasses > 3:
                    # add losses of further lane lines
                    loss_left1, x_cal2 = criterion(beta2, gt2, valid_points[:, 2])
                    loss_right1, x_cal3 = criterion(beta3, gt3, valid_points[:, 3])
                    loss_left += loss_left1
                    loss_right += loss_right1
                # average loss over lanes
                loss = (loss_left + loss_right) / args.nclasses
            else:
                loss = criterion_seg(output_net, gt)
                with torch.no_grad():
                    loss_left, x_cal0 = criterion(beta0, gt0, valid_points[:, 0])
                    loss_right, x_cal1 = criterion(beta1, gt1, valid_points[:, 1])
                    if args.nclasses > 3:
                        # add losses of further lane lines
                        loss_left1, x_cal2 = criterion(beta2, gt2, valid_points[:, 2])
                        loss_right1, x_cal3 = criterion(beta3, gt3, valid_points[:, 3])
                        loss_left += loss_left1
                        loss_right += loss_right1
                    loss_metric = (loss_left + loss_right) / args.nclasses
                    rmse_metric.update(loss_metric.item(), input.size(0))

            # Horizon task & Line classification task
            if args.clas:
                gt_horizon, gt_line = gt_horizon.cuda(), \
                                      gt_line.cuda()
                loss_horizon = criterion_horizon(outputs_horizon, gt_horizon).double()
                loss_line = criterion_line_class(outputs_line, gt_line).double()
                loss = loss*args.weight_fit + (loss_line + loss_horizon)*args.weight_class
            else:
                line_pred = gt_line

            # Update loss
            losses.update(loss.item(), input.size(0))

            # Clip gradients (usefull for instabilities or mistakes in ground truth)
            if args.clip_grad_norm != 0:
                nn.utils.clip_grad_norm(model.parameters(), args.clip_grad_norm)

            # Setup backward pass
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # Time trainig iteration
            batch_time.update(time.time() - end)
            end = time.time()

            # Print info
            if (i + 1) % args.print_freq == 0:
                print('Epoch: [{0}][{1}/{2}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.8f} ({loss.avg:.8f})\t'
                      'rmse_metric {rmse.val:.8f} ({rmse.avg:.8f})'.format(
                       epoch+1, i+1, len(train_loader), batch_time=batch_time,
                       data_time=data_time, loss=losses, rmse=rmse_metric))

            # Plot weightmap and curves
            if (i + 1) % args.save_freq == 0:
                save_weightmap('train', weightmap_zeros, x_cal0, x_cal1, x_cal2, x_cal3,
                               gt0, gt1, gt2, gt3, gt, 0, i, input,
                               args.no_ortho, args.resize, args.save_path, args.nclasses, args.no_mapping)

        print("===> Average {}-loss on training set is {:.8f}".format(crit_string, losses.avg))
        if not skip:
            losses_valid, acc_hor_tot, acc_line_tot, rmse_metric_valid = validate(valid_loader,
                                                                                  model, criterion,
                                                                                  criterion_seg, 
                                                                                  criterion_line_class,
                                                                                  criterion_horizon,
                                                                                  epoch)
            print("===> Average {}-loss on validation set is {:.8f}".format(crit_string, losses_valid))
        else:
            print("===> Average segmentation-loss on training set is {:.8f}".format(losses_skip.avg))
        if not args.end_to_end and not skip:
            print("===> Average rmse on training set is {:.8f}".format(rmse_metric.avg))
            print("===> Average rmse on validation set is {:.8f}".format(rmse_metric_valid))

        if args.clas and len(valid_loader) != 0 :
            print("===> Average HORIZON ACC on val is {:.8}".format(acc_hor_tot))
            print("===> Average LINE ACC on val is {:.8}".format(acc_line_tot))

        print("===> Last best {}-loss was {:.8f} in epoch {}".format(
            crit_string, lowest_loss, best_epoch))

        total_score = losses_valid

        # TODO get acc
        if args.clas:
            metric = test_model(test_loader, model, 
                       criterion, 
                       criterion_seg, 
                       criterion_line_class, 
                       criterion_horizon, args)
            total_score = metric


        # Adjust learning_rate if loss plateaued
        if args.lr_policy == 'plateau':
            scheduler.step(total_score)
            lr = optimizer.param_groups[0]['lr']
            print('LR plateaued, hence is set to {}'.format(lr))

        # File to keep latest epoch
        with open(os.path.join(args.save_path, 'first_run.txt'), 'w') as f:
            f.write(str(epoch))
        # Save model
        to_save = False
        if total_score > highest_score:
            to_save = True
            best_epoch = epoch+1
            highest_score = total_score
        save_checkpoint({
            'epoch': epoch + 1,
            'best epoch': best_epoch,
            'arch': args.mod,
            'state_dict': model.state_dict(),
            'loss': lowest_loss,
            'optimizer': optimizer.state_dict()}, to_save, epoch)
def main():
    '''
    Run as: (python ./part2/main.py 2>&1) | tee /home/hdd/logs/openimg/$(date +'%y%m%d%H%M%S').txt
    '''
    # save the experiment time
    start_time = strftime("%y%m%d%H%M%S", localtime())

    # checks and logs
    pwd = os.getcwd()
    assert os.getcwd().endswith('VehicleRecognition')
    assert os.path.exists('./part2/experiments/')

    print(f'Working dir: {pwd}')
    # fix the random seed
    seed = 13
    torch.manual_seed(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # paths to dataset
    train_data_dir = '/home/nvme/data/openimg/train/train/'
    test_data_dir = '/home/nvme/data/openimg/test/testset/'

    # Number of classes in the dataset
    num_classes = len(os.listdir(train_data_dir))

    # define the paths
    save_pred_path = None
    save_pred_path = f'./part2/experiments/{start_time}.csv'
    save_best_model_path = f'/home/hdd/logs/openimg/{start_time}/best_model.pt'

    # backup the working directiory
    workdir_copy(pwd, os.path.split(save_best_model_path)[0])

    # resnet, alexnet, vgg, squeezenet, densenet, inception
    # 'resnext50_32x4d', 'resnext101_32x8d', 'resnext101_32x48d_wsl', 'resnext101_32x32d_wsl'
    # 'resnext101_32x16d_wsl
    model_name = "resnext101_32x16d_wsl"
    # Flag for feature extracting. When False, we finetune the whole model,
    #   when True we only update the reshaped layer params
    feature_extract = False
    # hyper parameters
    device = torch.device("cuda:0")
    valid_size = 0.10
    if model_name.startswith('resnext'):
        lr = 5e-7
        # batch_size = 32
        batch_size = 8
    elif model_name.startswith('densenet'):
        lr = 1e-5
        batch_size = 32
    else:
        lr = 1e-4
        batch_size = 64
    num_workers = 16
    pin_memory = True
    weighted_train_sampler = False
    weighted_loss = False
    num_epochs = 20

    # preventing pytorch from allocating some memory on default GPU (0)
    torch.cuda.set_device(device)

    # Initialize the model for this run
    model_ft, input_size = initialize_model(
        model_name, num_classes, feature_extract, use_pretrained=True)

    # Data augmentation and normalization for training
    # Just normalization for validation
    means = [0.485, 0.456, 0.406]
    stds = [0.229, 0.224, 0.225]
    data_transforms = {
        'train': transforms.Compose([
            transforms.Resize(input_size),
            transforms.RandomCrop(input_size),
            transforms.RandomHorizontalFlip(),
            ImgAugTransform(input_size, 0.25),
            transforms.ToPILImage(),
            transforms.ToTensor(),
            transforms.Normalize(means, stds),
        ]),
        'valid': transforms.Compose([
            transforms.Resize(input_size),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            transforms.Normalize(means, stds)
        ]),
    }

    train_loader, valid_loader = get_train_valid_loader(
        train_data_dir, batch_size, data_transforms, seed, weighted_train_sampler,
        valid_size=valid_size, shuffle=True, show_sample=True, num_workers=num_workers,
        pin_memory=pin_memory
    )

    test_loader = get_test_loader(
        test_data_dir, batch_size, data_transforms, num_workers=num_workers, pin_memory=pin_memory)

    dataloaders_dict = {
        'train': train_loader,
        'valid': valid_loader,
        'test': test_loader
    }

    # Send the model to GPU
    model_ft = model_ft.to(device)

    # Gather the parameters to be optimized/updated in this run. If we are
    #  finetuning we will be updating all parameters. However, if we are
    #  doing feature extract method, we will only update the parameters
    #  that we have just initialized, i.e. the parameters with requires_grad
    #  is True.
    params_to_update = model_ft.parameters()
    print("Params to learn:")
    if feature_extract:
        params_to_update = []
        for name, param in model_ft.named_parameters():
            if param.requires_grad == True:
                params_to_update.append(param)
                print("\t", name)
    else:
        for name, param in model_ft.named_parameters():
            if param.requires_grad == True:
                print("\t", name)

    # Observe that all parameters are being optimized
    optimizer_ft = optim.Adam(params_to_update, lr=lr)

    # Setup the loss fxn
    if weighted_loss:
        print('Weighted Loss')
        # {0: 0.010101, 1: 0.006622, 2: 0.0008244, 3: 0.00015335, 4: 0.0006253, 5: 0.00019665,
        # 6: 0.02631, 7: 0.00403, 8: 0.001996, 9: 0.01818, 10: 0.0004466, 11: 0.008771, 12: 0.01087,
        # 13: 0.006493, 14: 0.0017, 15: 0.000656, 16: 0.001200}
        cls_to_weight = train_loader.dataset.cls_to_weight
        weights = torch.FloatTensor([cls_to_weight[c] for c in range(num_classes)]).to(device)
    else:
        weights = torch.FloatTensor([1.0 for c in range(num_classes)]).to(device)

    criterion = nn.CrossEntropyLoss(weights)

    # print some things here so it will be seen in terminal for longer time
    print(f'Timestep: {start_time}')
    print(f'using model: {model_name}')
    print(f'Using optimizer: {optimizer_ft}')
    print(f'Device {device}')
    print(f'Batchsize: {batch_size}')
    print(f'Transforms: {data_transforms}')

    # Train and evaluate
    model_ft, hist = train_model(
        model_ft, dataloaders_dict, criterion, optimizer_ft, device, save_best_model_path,
        num_epochs=num_epochs, is_inception=(model_name == "inception")
    )

    # do test inference
    if save_pred_path is not None:
        test_model(model_ft, dataloaders_dict, device, save_pred_path,
                   is_inception=(model_name == "inception"))
Esempio n. 24
0
print("Compiling Data")
malaria = MalariaData()
dataloaders = malaria.compile_dataloaders(img_size=IMG_SIZE,
                                          batch_size=BATCH_SIZE,
                                          use_grayscale=USE_GRAYSCALE)

print("Starting Training")

trained_model, val_acc_history = train_model(net,
                                             dataloaders,
                                             loss_function,
                                             optimizer,
                                             device=device,
                                             num_epochs=EPOCHS)

loss, acc = test_model(trained_model, dataloaders["test"], optimizer,
                       loss_function, device)

torch.save(
    trained_model.state_dict(), "trained_networks/{}.pt".format(''.join(
        random.choices(string.ascii_uppercase + string.digits, k=10))))

with open("trained_networks/best.json",
          "w+",
          encoding='utf-8',
          errors='ignore') as f:
    try:
        f_json = json.load(f, strict=False)
    except json.JSONDecodeError:
        f_json = {"acc": 0}
    if acc > f_json["acc"]:
        json.dump({"acc": acc.item(), "model_name": MODEL_NAME}, f)
def imshow(inp, title=None):
    """Imshow for Tensor."""
    inp = inp.numpy().transpose((1, 2, 0))
    inp = np.clip(inp, 0, 1)
    plt.imshow(inp)
    if title is not None:
        plt.title(title)
    plt.pause(0.001)  # pause a bit so that plots are updated


if __name__ == '__main__':
    # Get a batch of training data
    inputs, classes = next(iter(dataloaders['train']))
    out = torchvision.utils.make_grid(inputs)
    imshow(out)
    print(classes)

    # Define the model
    model_ft = FCN()
    model_ft = model_ft.to(device)
    train_model(model_ft,
                torch.nn.CrossEntropyLoss(),
                torch.optim.SGD(params=model_ft.parameters(),
                                lr=0.0001,
                                momentum=0.9),
                num_epochs=2,
                dataloaders=dataloaders,
                device=device,
                dataset_sizes=dataset_sizes)
    test_model(model_ft, dataloaders, device, dataset_sizes)
Esempio n. 26
0
            # validate
            true_ys, pred_ys = predict(model, valid_loader, device)
            # scheduler
            if scheduler is not None:
                scheduler.step()

            # remember stuff
            epoch_valid_loss = loss_function_valid(pred_ys, true_ys)
            train_loss.append(cumulative_epoch_train_loss / len(train_dataset))
            valid_loss.append(epoch_valid_loss)

            logger_wrapper.logger.info(f'Epoch: {epoch}, train loss: {train_loss[-1]}, valid loss: {epoch_valid_loss}')

            if epoch_valid_loss < min_valid_loss:
                logger_wrapper.logger.info("Saving model")
                torch.save(model.state_dict(), best_model_path)
                min_valid_loss = epoch_valid_loss

        save_history(train_loss, valid_loss, fold_subdirectory)

        # testing on the test set
        # load the best version of the model, then repack data and run the test function
        model.load_state_dict(torch.load(best_model_path))
        model.eval()  # set dropout and batch normalization layers to evaluation mode before running inference
        # train gets new loader without shuffling so the order of smiles is OK # FIXME this is not ideal
        data = ((DataLoader(train_dataset, batch_size=model_config[optimizer_section]['batch_size']), train_smiles),
                (valid_loader, valid_smiles), (test_loader, test_smiles))
        test_model(model, data, device, fold_subdirectory,
                   calculate_parity=data_config[utils_section]["calculate_parity"],
                   calculate_rocauc=data_config[utils_section]["calculate_rocauc"])
Esempio n. 27
0
import record
import train
import test

print("enter your choice \n1. New User \n2. Existing User")
ch = input()

if ch == "1":
    record.face_record()
    train.train_model()
    mar = input("Want to mark your attendence ? (y\n)")
    if mar == "y" or mar == "Y":
        test.test_model()
    else:
        exit()

elif ch == "2":
    test.test_model()
Esempio n. 28
0
def main():
    map_name = 'DefeatRoaches'
    envs_num = 8
    max_windows = 1
    total_updates = -1
    env_args = dict(
        map_name=map_name,
        battle_net_map=False,
        players=[sc2_env.Agent(sc2_env.Race.terran)],
        agent_interface_format=sc2_env.parse_agent_interface_format(
            feature_screen=32,
            feature_minimap=32,
            rgb_screen=None,
            rgb_minimap=None,
            action_space=None,
            use_feature_units=False,
            use_raw_units=False),
        step_mul=8,
        game_steps_per_episode=None,
        disable_fog=False,
        visualize=False)
    vis_env_args = env_args.copy()
    vis_env_args['visualize'] = True
    num_vis = min(envs_num, max_windows)
    env_fns = [partial(make_sc2env, **vis_env_args)] * num_vis
    num_no_vis = envs_num - num_vis
    if num_no_vis > 0:
        env_fns.extend([partial(make_sc2env, **env_args)] * num_no_vis)
    envs = SubprocVecEnv(env_fns)
    # 一个随机的实现方式 用来debug
    '''agents=[]
    for i in range(envs_num):
        agent=RandomAgent()
        agents.append(agent)'''
    '''observation_spec = envs.observation_spec()
    action_spec = envs.action_spec()
    processor = pro(observation_spec)
    for agent,obs_spec,act_spec in zip(agents,observation_spec,action_spec):
        agent.setup(obs_spec[0],act_spec[0])
    try:
        while True:
            num_frames=0
            timesteps= envs.reset()
            for a in agents:
                a.reset()
            while True:
                num_frames+=1
                last_timesteps=timesteps
                actions= [agent.step(timestep) for agent,timestep in zip(agents,timesteps)]
                timesteps=envs.step(actions)
                obs=processor.preprocess_obs(timesteps)
                a=1
    except KeyboardInterrupt:
        pass'''
    while True:
        test_mark = 0
        better = 0
        agent = PPO(envs)
        agent.reset()
        # agent.net.load_state_dict(torch.load('./save/episode311_score36.2.pkl'))
        #try:
        while True:
            agent.train()
            if agent.sum_episode % 120 < 60:
                test_mark = 0
            if agent.sum_episode % 120 >= 60 and not test_mark:
                # print("###### I'm in!")
                test_mark = 1
                mean_score, _ = test_model(agent)
                if mean_score > 36 + better:
                    better += 1
                    if better > 70:
                        better = 70
                    torch.save(
                        agent.net.state_dict(),
                        './save/episode' + str(agent.sum_episode) + '_score' +
                        str(mean_score) + '.pkl')
                if mean_score < 20 + 0.01 * agent.sum_episode:
                    print("############################\n\n\n")
                    break

    #except :
    #print(agent.last_obs['available_actions'])

    envs.close()
Esempio n. 29
0
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    print('X_train shape:', X_train.shape)
    # np_utils.to_categorical将整型标签转为onehot。
    # 在这里将向量转成了矩阵
    Y_train = np_utils.to_categorical(y_train, 40)
    Y_val = np_utils.to_categorical(y_val, 40)
    Y_test = np_utils.to_categorical(y_test, 40)

    model = model.cnn_model()
    # 训练模型
    train.train_model(model, X_train, Y_train, X_val, Y_val, epochs)
    # 测试模型
    score = test.test_model(model, X_test, Y_test)
    print(score)
    # 加载训练好的模型
    model.load_weights('model_weights.h5')
    # 计算预测的类别
    classes = model.predict_classes(X_test, verbose=0)
    # 计算正确率
    test_accuracy = np.mean(np.equal(y_test, classes))
    print("last accuarcy:", test_accuracy)
    error_num = 0
    for i in range(0, 40):
        if y_test[i] != classes[i]:
            error_num += 1
            print(y_test[i], '被错误分成', classes[i])
    print("共有" + str(error_num) + "张图片被识别错了")
Esempio n. 30
0
import sys
from train import create_training_data, load_training_data, train_model, load_trained_model
from model import create_model
from test import create_testing_data, load_testing_data, test_model

# Why the fudge doesn't python have pattern matching

if len(sys.argv) == 0:
    print("Please provide an argument that relates to one of the methods.")
    exit
elif len(sys.argv) > 2:
    print("Please provide one argument.")
    exit

if sys.argv[1] == "create_training_data":
    create_training_data()
elif sys.argv[1] == "create_testing_data":
    create_testing_data()
elif sys.argv[1] == "train_new_model":
    training_data = load_training_data()
    train_model(create_model(training_data), training_data, 2)
elif sys.argv[1] == "train_old_model":
    training_data = load_training_data()
    train_model(load_trained_model(), training_data, 2)
elif sys.argv[1] == "test_model":
    testing_data = load_testing_data()
    test_model(load_trained_model(), testing_data)
else:
    print("Your argument doesn't match any available method.")