def find_by_regression(modelfile, input_npy_file):
        print('running')
        resized_npy, ori_size = BoundryFinder._resize_npy(input_npy_file)

        net_t = ModelLoader.load("FindBoundaryRegressioNet64128128")
        net_t = torch.nn.DataParallel(net_t)
        net_t = net_t.eval()
        state = torch.load(modelfile)
        # state = torch.load('/home/modelout/save_xyt/20180313_21/FindBoundaryRegression-Test/model/400_net_params.pkl')

        # data_sign = 'clean64128128.npy'
        # data_raw_sign = 'clean.npy'

        net_t.load_state_dict(state)
        net_t = torch.nn.DataParallel(net_t).cuda()

        img = resized_npy
        imgc = img.copy()
        imgc[imgc > 500] = 500
        imgc[imgc < -300] = -300
        imgctmp = np.reshape(imgc, (-1, imgc.shape[0]))
        imgc_mean = np.mean(imgctmp)
        imgc_std = np.std(imgctmp)
        newimg = (imgc - imgc_mean) / imgc_std

        newimg = newimg.astype('float32')
        newimgth = torch.autograd.Variable(torch.from_numpy(
            newimg[np.newaxis, np.newaxis]).cuda(),
                                           volatile=True)

        output = net_t(newimgth)
        outputc = output.view(-1)
        outputc_np = outputc.data.cpu().numpy()
        pt1 = outputc_np[0:3] * ori_size
        pt2 = outputc_np[3:6] * ori_size

        pt1 = pt1 - BUFFER
        pt1[pt1 < 0] = 0
        pt2 = pt2 + BUFFER
        index = 0
        for i in pt2:
            if i > ori_size[index]:
                pt2[index] = ori_size[index]
            index += 1

        pt1 = pt1.astype(int)
        pt2 = pt2.astype(int)
        print('DONE!')

        return pt1, pt2
Exemple #2
0
    def __call__(self, parser, args, values, option_string=None):
        data = ModelLoader.model_info(name=values[0])
        if data is not None:
            left_col_width = max([len(t[0]) for t in data])
            for (k, v) in data:
                if not isinstance(v, list):
                    print("{1:>{0}}: {2}".format(left_col_width, k, v))
                else:
                    if (len(v) <= 1):
                        continue

                    print("{1:>{0}}: {2}".format(left_col_width, k, v[0]))
                    for i in range(1, len(v)):
                        print("{1:>{0}}  {2}".format(left_col_width, "", v[i]))
        exit()
Exemple #3
0
    parser.add_argument('--no_segmentation_images', action='store_true', default=False, help='Reduce output files to minimum')

    args = parser.parse_args()

    # Title
    print("RootNav 2.0")
    sys.stdout.flush()

    # Input and output directory are required
    if not args.input_dir or not args.output_dir:
        parser.print_help()
        exit()

    # Check cuda configuration and notify if cuda is unavailable but they are trying to use it
    if not torch.cuda.is_available() and not args.no_cuda:
        print ("Cuda is not available, switching to CPU")
    use_cuda = torch.cuda.is_available() and not args.no_cuda

    # Check CRF flag
    use_crf = not args.no_crf

    # Load the model
    try:
        model_data = ModelLoader.get_model(args.model, gpu=use_cuda)
    except Exception as ex:
        print (ex)
        exit()

    # Process
    run_rootnav(model_data, use_cuda, use_crf, args.input_dir, args.output_dir, args.no_segmentation_images)
Exemple #4
0
 def __call__(self, parser, args, values, option_string=None):
     table_data = [("Model", "Description")] + [
         (name, desc) for name, desc in ModelLoader.list_models(True)
     ]
     print_table(table_data)
     exit()
Exemple #5
0
def prepare(config):
    if 'hardmining' in config.prepare and config.prepare['hardmining']:
        from datasets import myDataset2 as myDataset
    else:
        from datasets import myDataset
    from datasets import myDataset as myDataset_val
    env_utils.setEnvironment(config)
    if config.debug:
        model = ModelLoader.load(config.net["model"], config=config, abn=1)
    elif config.jit:
        model = ModelLoader.load(config.net["model"], config=config, abn=0)
    else:
        model = ModelLoader.load(config.net["model"], config=config)

    loss = LossLoader.load(config.net["rpnloss"], config)
    em_names = config.net['em']
    em_list = []
    for ems in em_names:
        em_list.append(LossLoader.load(ems, config))
    netio = netIOer(config)

    if config.half:
        model = model.half()
        model = BN_convert_float(model)

    if config.net["load_weight"] != '':
        model, config = netio.load_file(model, config.net["load_weight"])
    # optimizer = optim.SGD(model.parameters(), lr= config.train['lr_arg'], momentum=0.9,
    #                       weight_decay=config.train['weight_decay'])

    model = model.cuda()
    if config.jit:
        netio.trace(model)
        sys.exit()

    loss = loss.cuda()
    warp = warpLoss(model, loss, config.prepare['margin'])
    if not config.debug:
        warp = DataParallel(warp)

    trainer = Trainer(warp, config, netio, emlist=em_list)

    train_data = myDataset(config, 'train')
    if config.valtrain:
        val_data = myDataset_val(config, 'valtrain')
    else:
        val_data = myDataset_val(config, 'val')

    print(config.augtype)
    print(config.env['cpu_num'])
    train_loader = DataLoader(train_data,
                              batch_size=config.train['batch_size'],
                              shuffle=True,
                              num_workers=config.env['cpu_num'],
                              drop_last=True,
                              pin_memory=True,
                              worker_init_fn=np.random.seed)

    val_loader = DataLoader(val_data,
                            batch_size=1,
                            shuffle=False,
                            num_workers=5,
                            pin_memory=True,
                            collate_fn=lambda x: x)
    return config, model, loss, warp, trainer, train_data, val_data, train_loader, val_loader
Exemple #6
0
def train(model_name):
    # Data parameters
    #data_dir = r'C:\Users\Lorenz\Documents\Coding\data\jester_hand_gestures'
    data_dir = r'/Users/benja/code/jester/data'
    seq_length = 40
    n_videos = {'train': 1000, 'validation': 100}
    image_size = (50, 88)

    # Training parameters
    n_epochs = 50
    batch_size = 8
    steps_per_epoch = n_videos['train'] // batch_size

    # Load data generators
    data = CNN3DDataLoader(data_dir,
                           seq_length=seq_length,
                           n_videos=n_videos,
                           labels=labels_want)
    train_gen = data.sequence_generator('train', batch_size, image_size)
    validation_gen = data.sequence_generator('validation', batch_size,
                                             image_size)

    #optimizer = keras.optimizers.SGD(lr=0.1, momentum=0.9, decay= 1e-6, nesterov=True)
    #Load model
    optimizer = keras.optimizers.Adadelta()
    ml = ModelLoader(data.n_labels,
                     data.seq_length,
                     model_name,
                     image_size=image_size,
                     optimizer=optimizer)
    model = ml.model

    #Define callbacks
    checkpointer = ModelCheckpoint(filepath='model_name' +
                                   '-{epoch:03d}-{loss:.3f}.hdf5',
                                   verbose=1,
                                   save_best_only=True)
    tb = TensorBoard(log_dir='./models/logs')
    early_stopper = EarlyStopping(patience=2)
    csv_logger = CSVLogger('./models/logs/' + model_name + '-' + 'training-' + \
        str(time.time()) + '.log')

    callbacks = [tb, early_stopper, csv_logger, checkpointer]

    # Training
    print('Starting training')

    history = model.fit_generator(
        generator=train_gen,
        steps_per_epoch=20,
        #sample_per_epoch= 200,
        epochs=n_epochs,
        verbose=1,
        callbacks=callbacks,
        validation_data=validation_gen,
        validation_steps=10,
    )

    model.save('./my_model.h5')

    save_history(history, "c3d")
Exemple #7
0
    def _seg(self, net_name, model_file):
        self.cut_npy()
        print("running")
        net_t = ModelLoader.load(net_name)
        net_t = torch.nn.DataParallel(net_t)
        net_t = net_t.eval()
        state = torch.load(model_file)
        cropsize = np.array([64, 320, 320])
        stride_z = 64

        net_t.load_state_dict(state)
        net_t = torch.nn.DataParallel(net_t).cuda()

        sigmoid = torch.nn.Sigmoid()
        img = self.modified_npy

        img = (img + 300.0) / 800.0
        img[img > 1] = 1.0
        img[img < 0] = 0.0

        pred = img.copy()
        pred[pred != 0] = 0

        lpt1 = self.pt1
        lpt2 = self.pt2

        expend = np.array([10, 10, 10])
        lpt1, lpt2 = self._expandPt(lpt1, lpt2, expend, img.shape)

        num = int(img.shape[0] / stride_z)
        if num != img.shape[0] / stride_z:
            num = num + 1

        for i in range(num):

            lpt2_e = lpt2.copy()
            lpt2_e[1] = lpt1[1] + cropsize[1]
            lpt2_e[2] = lpt1[2] + cropsize[2]
            if lpt2_e[1] > img.shape[1]:
                lpt2_e[1] = img.shape[1]
                lpt1[1] = lpt2_e[1] - cropsize[1]
            if lpt2_e[2] > img.shape[2]:
                lpt2_e[2] = img.shape[2]
                lpt1[2] = lpt2_e[2] - cropsize[2]

            start_z = lpt1[0] + stride_z * i
            end_z = lpt1[0] + stride_z * (i + 1)
            if end_z > img.shape[0]:
                end_z = img.shape[0]
                start_z = end_z - cropsize[0]

            imgc = img[start_z:end_z, lpt1[1]:lpt2_e[1], lpt1[2]:lpt2_e[2]]

            print("imshape:", imgc.shape)

            imgc = imgc.astype("float32")
            imgcth = torch.autograd.Variable(torch.from_numpy(
                imgc[np.newaxis, np.newaxis]).cuda(),
                                             volatile=True)

            output = net_t(imgcth)
            Sigmoidout = sigmoid(output)
            prob5_np = Sigmoidout.data.cpu().numpy()

            prob_np = prob5_np[0][0]

            pred[start_z:end_z, lpt1[1]:lpt2_e[1],
                 lpt1[2]:lpt2_e[2]] += prob_np

        print("pred", pred.shape)

        pred[pred > 0.15] = 1
        pred[pred <= 0.15] = 0
        pred = self._cleanNoisePred(pred)
        return pred