Exemple #1
0
 def test_save_names_readback(self):
     fname = make_temp_filename(text=True)
     self.data.save_names(fname, "w")
     names = set(self.data.names)
     rb = read_names(fname)
     self.assertEqual(names, rb)
     os.remove(fname)
Exemple #2
0
def main():
    params = read_params(cfg_file_path)
    trainfiles, testfiles = filesplit(DATADIR)
    savefilenames(SAVEPATH + 'filenames/', trainfiles, testfiles)
    trainfiles = read_names('../../scratch/bd_lstm/filenames/trainfiles.txt')
    trainset = DataSet(root_dir=DATADIR,
                       files=trainfiles,
                       normalize=False,
                       seq_len=params['slice_size'],
                       stride=params['stride'])
    train(SAVEPATH + 'trainstats/', trainset, params)
Exemple #3
0
def main():
    params = read_params(cfg_file_path)
    TESTIOPATH = '../../scratch/bd_lstm/'
    testfiles = read_names('../../scratch/bd_lstm/filenames/testfiles.txt')
    evaluate(TESTIOPATH,testfiles,params)
Exemple #4
0
            break

    cv.destroyAllWindows()

# load model
test_video = True
tiny_model = True
class_name_file = 'model/coco_names.txt'
if tiny_model:
    anchor_file = './model/tiny_yolo_anchors.txt'
    weights_file = 'model/yolo-tiny.h5'
else:
    anchor_file = './model/yolo_anchors.txt'
    weights_file = 'model/yolo.h5'

class_name = read_names(class_name_file)
anchors = read_anchors(anchor_file)
yolov3 = YoloV3(input_shape=(416, 416, 3), 
        num_classes=len(class_name),
        anchors=anchors,
        training=False,
        tiny=tiny_model
        )
yolov3.model.load_weights(weights_file)

if not test_video:
    # test img
    detect_img(yolov3.predict_img, 'Fallout4-1024x576.jpg', class_name, yolov3.input_shape[:2])
else:
    # test video
    video_path = 'E:\\data\\180301_03_A_CausewayBay_06.mp4'
Exemple #5
0
if __name__ == '__main__':

    ratings = read_ratings(MOVIE_RATINGS_PATH)
    ratings = pd.DataFrame(data=ratings, columns=['user', 'movie', 'rating'])
    ratings = ratings.astype(int)

    print(ratings.head())

    # compute_similarities
    similarities = compute_similarities(ratings)
    similarities.to_csv(SIMILARITIES_PATH, index=False)
    print('Done computing similarities!')

    similarities = load_item_similarities(SIMILARITIES_PATH)
    movie_names = read_names(MOVIE_NAMES_PATH)

    print(similarities.head())

    user_id = 381
    movie_id = 12

    # existing (user, item) pair:
    user_id = 196
    movie_id = 242

    # recommend
    recommendations = recommend(movie_id,
                                similarities,
                                movie_names,
                                min_cooccurrence=MIN_OCCURRENCES,
Exemple #6
0
def evaluate_any_file():
    #os.system(scp )
    filepath = '../../original/processed_data/'
    weightpath = '../../scratch/bd_lstm/trainstats/weights_middle.pth'
    demoweights = '../../scratch/bd_lstm/trainstats/demoweights.pth'
    weightpath = demoweights
    parampath = '../../code/bdrnn/conf_model.cfg'
    filenamepath = '../../scratch/bd_lstm/filenames/testfiles.txt'
    minmaxdatapath = '../../original/minmaxdata/'

    #get best file
    filenames = read_names(filenamepath)
    print(len(filenames))
    filenamedict = make_dict(filenames)
    velocity = float(
        input(
            'Give rotational velocity between 4Hz and 18Hz and the closest one is used at evaluation.\n'
        ))
    filename, velocity = find_closest(filenamedict, velocity)
    files = [filename]

    #read parameters
    params = read_params(parampath)

    #init dataset with the file we selected and model
    dataset = DataSet(root_dir=filepath,
                      files=files,
                      normalize=False,
                      seq_len=params['slice_size'],
                      stride=1000)

    loader = DataLoader(dataset,
                        batch_size=int(params['batch_size']),
                        shuffle=True)

    model = LSTM_layers(input_size=int(params['input_size']),
                        hidden_size=int(params['hidden_size']),
                        num_layers=int(params['n_layers']),
                        dropout=float(params['dropout']),
                        output_size=int(params['output_size']),
                        batch_first=True,
                        bidirectional=True)
    #RuntimeError: Attempting to deserialize object on a
    #CUDA device but torch.cuda.is_available() is False.
    #If you are running on a CPU-only machine,
    #please use torch.load with map_location='cpu' to map your storages to the CPU.

    model.load_state_dict(torch.load(weightpath, map_location='cpu'))
    model.to(device)
    model.eval()
    losses = []

    for idx, sample in enumerate(loader):
        y = sample[:, :, :2].clone().detach().requires_grad_(True).to(device)
        x = sample[:, :, 2:].clone().detach().requires_grad_(True).to(device)
        h0 = model.init_hidden(int(params['batch_size']), None).to(device)
        c0 = model.init_cell(int(params['batch_size'])).to(device)

        #compute
        output = model.forward(x, (h0, c0))
        loss = F.mse_loss(output, y)
        losses.append(loss.item())

        output, y = scale_seqs(output, y, filename, minmaxdatapath)

        if (idx % 3) == 0:
            save_this_plot(0, 2763, output[0], y[0], loss.item(), velocity)
    print("Avg loss:", np.mean(losses))
Exemple #7
0
def waterfall():
    filepath = '../../original/processed_data/'
    minmaxdatapath = '../../original/minmaxdata/'
    filenamepath = '../../scratch/bd_lstm/filenames/testfiles.txt'
    weightpath = '../../scratch/bd_lstm/trainstats/weights_middle.pth'
    parampath = './conf_model.cfg'

    filenames = read_names(filenamepath)

    filenamedict = make_dict(filenames)

    vels = ascendingorder_wf(filenames)
    num_files = len(vels)

    params = read_params(parampath)
    model = LSTM_layers(input_size=int(params['input_size']),
                        hidden_size=int(params['hidden_size']),
                        num_layers=int(params['n_layers']),
                        dropout=float(params['dropout']),
                        output_size=int(params['output_size']),
                        batch_first=True,
                        bidirectional=True)

    model.load_state_dict(torch.load(weightpath, map_location='cpu'))
    model.to(device)
    model.eval()
    arr = None
    hack_idx = 0
    for velocity in vels:
        filename, velocity = find_closest(filenamedict, velocity)

        files = [filename]
        dataset = DataSet(root_dir=filepath,
                          files=files,
                          normalize=False,
                          seq_len=seq_len,
                          stride=max_stride)
        loader = DataLoader(dataset,
                            batch_size=int(params['batch_size']),
                            shuffle=True)
        for idx, sample in enumerate(loader):
            y = sample[:, :, :2].clone().detach().requires_grad_(True).to(
                device)
            x = sample[:, :,
                       2:].clone().detach().requires_grad_(True).to(device)
            h0 = model.init_hidden(int(params['batch_size']), None).to(device)
            c0 = model.init_cell(int(params['batch_size'])).to(device)

            #compute
            output = model.forward(x, (h0, c0))
            frq_pred, Y_pred, frq_true, Y_true = fft(output, y, velocity,
                                                     seq_len, filename,
                                                     minmaxdatapath)
            vel_pred = np.ones(len(frq_pred)) * velocity
            break
        if hack_idx == 0:
            arr_pred = np.vstack((vel_pred, frq_pred, Y_pred))
            arr_true = np.vstack((vel_pred, frq_true, Y_true))
        else:
            arr2_pred = np.vstack((vel_pred, frq_pred, Y_pred))
            arr2_true = np.vstack((vel_pred, frq_true, Y_true))
            arr_pred = np.hstack((arr_pred, arr2_pred))
            arr_true = np.hstack((arr_true, arr2_true))
        if hack_idx > limit:
            break
        else:
            hack_idx += 1
        print(velocity, hack_idx, '/', num_files)
    return arr_pred, arr_true