Example #1
0
def autonomous_driver_server(conn, model_file_name=None, linear=False):
    if model_file_name is None:
        return

    try:
        model = tf.keras.models.load_model(model_file_name)
    except:
        model = trainer.get_model(linear=linear)
        model.load_weights(model_file_name)

    while True:
        img = conn.recv()
        if img is None:
            conn.send(8)
            continue

        img = np.array(img)
        input_shape = model.layers[0].input_shape
        rows = input_shape[1]

        processed_img = trainer.proc_img(img,
                                         rows_removed=30 - rows,
                                         break_point=0.5)
        m = np.array(processed_img)
        raw_prediction = model.predict(m.reshape((1, rows, 30, 1)))
        prediction = None
        (_, c) = raw_prediction.shape
        if c == 1:
            prediction = np.round(raw_prediction, 0)
        else:
            prediction = np.argmax(raw_prediction, axis=1)[0]
        prediction += 1

        conn.send(prediction)
Example #2
0
def run_vcl(args, device, labels_list):
    task_final_accs = np.zeros((5, 5))  # Test accuracy after each task ends
    all_accs = []  # Test accuracy after every epoch

    coresets = []  # Coresets
    coreset_method = Coreset.rand_from_batch

    # Pretraining
    print("=============Pretraining ==================")
    model, name = trainer.get_model(args, device, mle=True)
    loss_fn = trainer.get_loss_fn(args, device, model)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    dataset = get_dataset(args, 0, 'trainval')
    fit(args, model, device, optimizer, loss_fn, coresets, dataset,
        labels_list, 0)

    # train all tasks incrementally
    for task_id, labels in enumerate(labels_list):
        print("===========TASK {}: {}=============".format(
            task_id + 1, labels))
        # Model
        old_model = model
        model, model_name = trainer.get_model(args, device)
        model.load_state_dict(old_model.state_dict())
        # Loss Function and Optimizer
        loss_fn = trainer.get_loss_fn(args, device, model, old_model)
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

        # Dataset and Coreset
        dataset = get_dataset(args, task_id, 'trainval')
        if coreset_method is not None:
            coresets, dataset = coreset_method(coresets, dataset,
                                               args.coreset_size)

        # Fit
        best_state, test_accs = fit(args, model, device, optimizer, loss_fn,
                                    coresets, dataset, labels_list, task_id)
        task_final_accs[task_id] = test_accs[-1]
        all_accs.extend(test_accs)

        # save model
        path = trainer.get_model_path(labels, model_name)
        torch.save(best_state, path)

    return task_final_accs, all_accs
Example #3
0
def choose_model(load):
    root = Tk()
    root.lift()
    if load.lower() == 'y':
        path = askopenfilename()
        root.destroy()
        return (path, get_model(path, create=False))
    else:
        print('Choose model save path (empty for no saving)')
        path = askdirectory()
        root.destroy()

        if path is None or path.replace(' ', '') == '':
            print('Empty path...')
            return

        if not os.path.exists(path):
            os.makedirs(path)

        return (path, get_model(path + '/', create=True))
Example #4
0
def run_vcl(args, device, labels_list):
    # TODO: Add classifier accuracy
    all_vis_images = []

    # train all tasks incrementally
    model, _ = trainer.get_model(args, device)
    for task_id, labels in enumerate(labels_list):
        print ("===========TASK {}: {}=============".format(task_id + 1, labels))
        # Model
        old_model = model
        model, model_name = trainer.get_model(args, device, task_id=task_id)
        model.load_state_dict(old_model.state_dict(), strict=False)
        
        # Loss Function and Optimizer
        loss_fn = trainer.get_loss_fn(args, device, model, old_model)
        optimizer = optim.Adam(model.parameters(), lr=args.lr)
        
        # Dataset
        dataset = get_dataset(args, task_id, 'trainval')

        # Fit
        best_state = fit(args, model, device, optimizer, loss_fn, dataset, labels_list, task_id)
        
        # Generate some images
        ims = test_all_tasks(args, model, device, loss_fn, labels_list, task_id, split=True)
        for test_task_id, im in enumerate(ims):
            utils.save_generate_ims(im, 'mnist_gen', task_id, test_task_id)
        all_vis_images.extend(select_and_pad_ims(ims))
        
        # save model
        path = trainer.get_model_path(labels, model_name)
        torch.save(best_state, path)
    
    # Save Final Image
    final_vis_im = torch.cat(all_vis_images)
    utils.save_generated_ims(final_vis_im, 'mnist_gen', 'final', 'all')
    
    return
Example #5
0
    def fit_coreset(coreset, labels, args, model, device):
        # Get Inference Model
        final_model, _ = trainer.get_model(args, device)
        final_model.load_state_dict(model.state_dict())
        final_model.set_range(labels)

        # Get Loss Function and Optimizer and Dataloader
        loss_fn = trainer.get_loss_fn(args, device, final_model, model)
        optimizer = optim.Adam(final_model.parameters(), lr=args.lr)
        coreset_loader = trainer.get_loader(coreset, args, device, 'coreset')

        for epoch in range(args.coreset_epochs):
            trainer.train(args,
                          final_model,
                          device,
                          coreset_loader,
                          optimizer,
                          epoch,
                          loss_fn,
                          verbose=False)

        return final_model
Example #6
0
def main():
    
    #train()

    parser = argparse.ArgumentParser(description='Controls a lego car autonomously.')
    parser.add_argument('-r', '--record', help='The directory in which the replay is to be stored')
    parser.add_argument('--show', help='Opens a windows that shows what the car sees', action='store_true')
    parser.add_argument('model')
    parser.add_argument('--linear', help='Needed if the model loaded is linear and it\'s weights are stored.', action='store_true')
    args = parser.parse_args()
    rec = None
    if args.record is not None:
        rec = Recorder(args.record)

    fd = os.open("/dev/ttyACM0", os.O_WRONLY|os.O_SYNC)
    cap = cv2.VideoCapture(0)

    try:
        model = tf.keras.models.load_model(args.model)
    except:
        model = get_model(linear=args.linear)
        model.load_weights(args.model) 
    
    first_run = True
    while True:
        input_shape = model.layers[0].input_shape
        rows = None
        if input_shape[1] > 30:
            rows = int(input_shape[1]/30)
        else:
            rows = input_shape[1]
        

        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        proper = cv2.resize(gray, (30, 30))
        vec = proc_img(proper, rows_removed=30-rows)
        
        if rec is not None:
            rec.store(vec.reshape(22,30)*255)

        if args.show:
            cv2.imshow('wind', vec.reshape(rows,30))


        raw_prediction = None
        if input_shape[1] > 30: # If the model expects a flattened out image. needed for backwards compatibility  
            raw_prediction = model.predict(vec.reshape(1,rows*30))
        else:
            m = np.array(vec)
            raw_prediction = model.predict(m.reshape((1, rows, 30, 1)))



        prediction = None
        (_,c) = raw_prediction.shape
        if c == 1:
            prediction = np.round(raw_prediction, 0)
        else:
            prediction = np.argmax(raw_prediction,axis=1)[0]
        prediction += 1
        print(prediction)
        if first_run:
            os.write(fd, bytes(str(17)+"\x0a\x0d", 'ASCII'))
            first_run=False 
        os.write(fd, bytes(str(prediction)+"\x0a\x0d", 'ASCII'))
        cv2.waitKey(30)

    if args.show:
        cv2.destroyAllWindows()
Example #7
0
            #image, feat = to_img(image), to_img(feat)
            ax.imshow(out, interpolation='bicubic')
            ax.axis('off')
            _pred = validset.classes[pred]
            _label = validset.classes[label]
            ax.set_title(
                f'Pred: {_pred}, Actual: {_label}, \nProb: {label_prob:.2f}')

        return axs


if __name__ == "__main__":
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = trainer.get_model(False)

    model.load_state_dict(torch.load('classify/saved_models/model_6.pt'))
    model = model.to(device)

    valid_transform = transforms.Compose([
        transforms.CenterCrop(447),
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    trainset = datasets.ImageFolder('data/recycle_classify/train',
                                    valid_transform)

    validset = datasets.ImageFolder('data/recycle_classify/valid',
    print(dev_str)
    device = torch.device(dev_str)

    return_label = True
    return_snp = False
    unsampled_test = True
    is_divergent_set = 'divergent_set' in model_path

    gen = SkelTrainer(fname=fname,
                      is_divergent_set=is_divergent_set,
                      return_label=return_label,
                      return_snp=return_snp,
                      unsampled_test=unsampled_test)

    model = get_model(model_name, gen.num_classes, gen.embedding_size)
    model = model.to(device)

    assert set_type in model_path
    state = torch.load(model_path, map_location=dev_str)
    model.load_state_dict(state['state_dict'])
    model.eval()

    #aw = np.load(model_path.replace('.pth.tar', '.npy'))

    gen.test()
    test_indexes = gen.valid_index

    all_res = []
    with torch.no_grad():
        pbar = tqdm.tqdm(test_indexes)