Ejemplo n.º 1
0
def extract_list():
    
    # Load options
    parser = argparse.ArgumentParser(description='Attribute Learner')
    parser.add_argument('--config', type = str, help = 'Path to config .opt file. Leave blank if loading from opts.py')
    parser.add_argument('--pth', type = str, help = 'Path to model checkpoint. Leave blank if testing bestmodel')
    parser.add_argument('--input_list', type = str, help = 'Path to list with image paths')
    parser.add_argument('--output_list', type = str, help = 'Path to list where to store results')
    conf = parser.parse_args()

    opt = torch.load(conf.config) if conf.config else get_opts()
    opt.ngpu = 1
    opt.batch_size=16
    print('Loading model ...')
    M = Model(opt)
    checkpoint = torch.load(conf.pth)
      
    try:
        checkpoint = {key.replace('module.', ''): value for key, value in checkpoint['state_dict'].items()}
    except:
        pass
    
    M.model.load_state_dict(checkpoint)
    M.model.eval()
    
    test_loader = datasets.generate_loader(opt, 'test', conf.input_list)
    
    torch.set_grad_enabled(False)
    out_f = open(conf.output_list,'w')
    
    for batch_idx, (data, target) in tqdm(enumerate(test_loader)):
        #print('Extracting batch # {batch_idx} ...')
        data=data.to(M.device)
        output = M.model(data)
        output = torch.cat(output,1).detach().cpu().numpy()
        log_str='\n'.join(map(lambda x: ','.join(map(str,x)),output))+'\n'
        out_f.write(log_str)
    out_f.close()
    
    print('Extracting done!')
def main():

    # Load options
    parser = argparse.ArgumentParser(description='Attribute Learner')
    parser.add_argument(
        '--config',
        type=str,
        help='Path to config .opt file. Leave blank if loading from opts.py')

    conf = parser.parse_args()
    opt = torch.load(conf.config) if conf.config else get_opts()

    print('===Options==')
    d = vars(opt)
    for k in d.keys():
        print(k, ':', d[k])

    # Fix seed
    random.seed(opt.manual_seed)
    np.random.seed(opt.manual_seed)
    torch.manual_seed(opt.manual_seed)
    torch.cuda.manual_seed_all(opt.manual_seed)
    cudnn.benchmark = True

    # Create working directories
    try:
        os.makedirs(opt.out_path)
        os.makedirs(os.path.join(opt.out_path, 'checkpoints'))
        os.makedirs(os.path.join(opt.out_path, 'log_files'))
        print('Directory {} was successfully created.'.format(opt.out_path))

    except OSError:
        print('Directory {} already exists.'.format(opt.out_path))
        pass

    # Training
    M = Model(opt)
    M.train()
    '''
Ejemplo n.º 3
0
def main(config):
    # device
    device = torch.device("cuda" if config.cuda else "cpu")

    # model
    if config.model == 'VAE':
        model = VAE().to(device)
    model = Model(model, config)

    # data
    data = MNISTDataModule(config)
    data.prepare_data()
    data.setup()
    train_loader = data.train_dataloader()
    val_loader = data.val_dataloader()
    test_loader = data.test_dataloader()

    # samples, _ = next(iter(test_loader))
    # samples = samples[:8, :]

    # logger
    wandb_logger = WandbLogger(project=config.project, name=config.model)

    # trainer
    trainer = pl.Trainer(
        logger=wandb_logger,  # W&B integration
        log_every_n_steps=10,  # set the logging frequency
        gpus=-1,  # use all GPUs
        max_epochs=config.epochs,  # number of epochs
        deterministic=True,  # keep it deterministic
        #callbacks=[ImagePredictionLogger(samples)]
    )

    # fit the model
    trainer.fit(model, train_loader, val_loader)

    wandb.finish()

    # save last model
    if config.save_model:
        torch.save(model.state_dict(), "VAE.pt")
Ejemplo n.º 4
0
def run():
    train_file = "train_sentiment.txt"
    test_file = "test_sentiment.txt"
    vocab_file = "sentiment.vocab"

    train_set = dataset.TextFileDataset()
    train_set.load_vocab(train_file, 5000)
    train_set.input_length = 30
    train_set.save_vocab(vocab_file)
    train_set.load(train_file)
    test_set = dataset.TextFileDataset()
    test_set.reload_vocab(vocab_file)
    test_set.load(test_file)

    trainer = Model()
    trainer.max_epoch = 10
    trainer.model = gen_lstm(train_set.vocab_size(), train_set.input_length,
                             train_set.num_classes())
    trainer.train(train_set, test_set, batch_size=32, verbose=True)
Ejemplo n.º 5
0
        if (result[0][v] == max_prob):
            print(classifiction[v])
            print "vechile classified"
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(img_final, classifiction[v],
                        (Y_new[i] + q1, X_new[i] + p1), font, 1, color, 2,
                        cv2.CV_AA)
            cv2.putText(img_final_1, classifiction[v],
                        (Y_new[i] + q1, X_new[i] + p1), font, 1, color, 2,
                        cv2.CV_AA)


if __name__ == '__main__':
    #cap = cv2.VideoCapture(0)
    #cascade_path = "/home/antara/Downloads/opencv-2.4.10/data/haarcascades/haarcascade_frontalface_alt.xml"
    model = Model()
    model_final = Model_final()
    model_classification = Model_classification()
    model.load()
    model_final.load()
    model_classification.load()
    i = 0
    key = 0
    count = 0
    car_count = 0
    while (count == 0):

        path = input('Enter the image path')
        img = cv2.imread(path)
        img_final_1 = cv2.imread(path)
        img_final = cv2.imread(path)
Ejemplo n.º 6
0
# -*- coding:utf-8 -*-
import cv2
from trainer import Model
from trainer import Model_final


dict = {0:"Backgound", 1:"Car"}

if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    #cascade_path = "/home/antara/Downloads/opencv-2.4.10/data/haarcascades/haarcascade_frontalface_alt.xml"
    model = Model()
    model_final = Model_final()
    model.load()
    model_final.load()
    i = 0 
    key = 0
    
    img1 = cv2.imread('1.png')
    img2 = cv2.imread('7.png')
    result = model.predict(img1)
    max_prob = max(result[0])
    for i in xrange(len(result[0])):
        if(result[0][i] == max_prob):
            key = i


    try:
        print (dict[key])
        #cv2.putText(frame,dict[key],tuple(rect[0:2]), font, 1,color,2,cv2.CV_AA)
    except KeyError:
            os.remove(video)
        except:
            failed.append(video)
    print("9")
    path_video_files = dppvm.DEST / 'videos'
    path_meta = dppvm.DEST / 'metadata' / 'all_meta.json'
    all_meta = pd.read_json(path_meta).T
    all_meta["path"] = path_video_files + r'/' + all_meta.index
    # Train the model
    print("10")
    val_msk = int(len(all_meta) * 0.9)
    gener = ppf.DataGenerator(all_meta[:val_msk].index,
                              video_path=all_meta[:val_msk].path,
                              meta=all_meta[:val_msk])
    val = ppf.DataGenerator(all_meta[val_msk:].index,
                            video_path=all_meta[val_msk:],
                            meta=all_meta[val_msk:])
    print("11")
    model = Model.make_model(n_frames, dims, channels)
    model.compile(optimizer=optimizer, loss=binloss, metrics=[acc])
    print("12")
    Model.train_and_evaluate(gener,
                             callbacks=callbacks_list,
                             validation_data=val,
                             use_multiprocessing=True,
                             workers=-1,
                             verbose=1,
                             epochs=500)

    # Make_predicctions