Пример #1
0
experiment_parameters['n_col'] = 6
experiment_parameters['network_type'] = 'DVAE'
experiment_parameters['save_interval'] = 5
experiment_parameters['fps'] = 25
experiment_parameters['n_clayers'] = 5
experiment_parameters['kernel1_size'] = 4

# Setting up data folder
resultdir = './results/cows/'
if not (path.exists(resultdir)):
    os.mkdir(resultdir)
datestring = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
os.mkdir(resultdir + datestring)

# Saving parameter file
helper.print_params('./results/cows/' + datestring + '/PARAMETERS.txt',
                    experiment_parameters)

# Running the experiments
files = glob.glob('./data/cows/*mp4')
files.sort()
vids = []
for f_i, file in enumerate(files):
    cv, _ = helper.readvid(file, scale=(224, 224))
    vids.append(cv)
dpref = resultdir + datestring

fpref = dpref + '/' + 'cows'
print('Processing now:', fpref)
print()
vid_pairs = []
for V in vids:
Пример #2
0
                        multiproc=True,
                        ncores=12,
                        filename=os.path.join(save_path, 'explore_perf.hdf5'))

traj = env.v_trajectory
pp.add_parameters(traj, parameter_dict)

explore_dict = pypet.cartesian_product(
    explore_dict, tuple(explore_dict.keys())
)  #if not all entry of dict need be explored through cartesian product replace tuple(.) only with relevant dict keys in tuple

explore_dict['name'] = pp.set_run_names(explore_dict, parameter_dict['name'])
traj.f_explore(explore_dict)
""" launch simulation with pypet for parameter exploration """
tic = time.time()
env.f_run(pp.launch_exploration, images_train, labels_train, images_test,
          labels_test, save_path)
toc = time.time()
""" save parameters to file """
helper.print_params(print_dict, save_path, runtime=toc - tic)
""" plot results """
name_best = pp.plot_results(folder_path=save_path)
pp.faceting(save_path)

print '\nrun name:\t' + parameter_dict['name']
print 'start time:\t' + time.strftime("%a, %d %b %Y %H:%M:%S",
                                      time.localtime(tic))
print 'end time:\t' + time.strftime("%a, %d %b %Y %H:%M:%S",
                                    time.localtime(toc))
print 'train time:\t' + str(datetime.timedelta(seconds=toc - tic))
Пример #3
0
experiment_parameters['n_col'] = 6
experiment_parameters['network_type'] = 'incomplete'
experiment_parameters['save_interval'] = 5
experiment_parameters['fps'] = 25
experiment_parameters['n_clayers'] = 5
experiment_parameters['kernel1_size'] = 8

# Setting up data folder
resultdir = './results/playing_incomplete/'
if not(path.exists(resultdir)):
    os.mkdir(resultdir)
datestring = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
os.mkdir(resultdir+datestring)

# Saving parameter file
helper.print_params('./results/playing_incomplete/'+datestring+'/PARAMETERS.txt',
                    experiment_parameters)

# Running the experiments
V, _ = helper.readvid('./data/playing_incomplete/playing_incomplete.avi')
M, _ = helper.readvid('./data/playing_incomplete/playing_mask.avi')
dpref = resultdir+datestring

fpref = dpref+'/'+'playing_incomplete'
print('Processing now:', fpref)
print()
vid_pairs = []
V_ = np.zeros((V.shape[0]-1, 6, V.shape[2], V.shape[3]))
V_[:, 0, :, :] = V[:-1, 0]
V_[:, 1, :, :] = V[:-1, 1]
V_[:, 2, :, :] = V[:-1, 2]
V_[:, 3, :, :] = V[1:, 0]
Пример #4
0
os.mkdir(resultdir + datestring)

# Running the experiments
files = glob.glob('./data/stgconv_data/*.avi')
files.sort()
for f_i, file in enumerate(files):
    V, fr = helper.readvid(file, scale=(128, 128))
    os.mkdir(resultdir + datestring + '/v' + str(f_i))
    dpref = resultdir + datestring + '/v' + str(f_i)
    fpref = dpref + '/vid' + str(f_i)
    print('Processing now:', fpref)
    print()
    V_ = np.zeros((V.shape[0] - 1, 6, V.shape[2], V.shape[3]))
    V_[:, 0, :, :] = V[:-1, 0]
    V_[:, 1, :, :] = V[:-1, 1]
    V_[:, 2, :, :] = V[:-1, 2]
    V_[:, 3, :, :] = V[1:, 0]
    V_[:, 4, :, :] = V[1:, 1]
    V_[:, 5, :, :] = V[1:, 2]

    experiment_parameters['fps'] = fr
    experiment_parameters['n_epochs'] = 62500 // V.shape[0]
    experiment_parameters[
        'save_interval'] = experiment_parameters['n_epochs'] // 20
    helper.print_params(dpref + '/PARAMETERS.txt', experiment_parameters)
    npdata = V_ * 2 - 1
    traindata = torch.from_numpy(npdata).float()
    ret = helper.run_experiment(traindata, fpref, experiment_parameters)
    if not ret:
        print('Could not synthesize', fpref, 'due to runtime error')
Пример #5
0
    total_err += np.abs(err)

    print(
        '(' + "({0:2d},{1:2d}) -> ({2:2d},{3:2d})".format(
            d[n][0], d[n][1], d[n][2], d[n][3]) + ') = ' +
        "{0:8.2f} / {1:8.2f}   diff: {2:8.2f} 1/cm".format(d[n][4], fit, err))

print("\nTotal error/no of lines = {0:6.5f}\n".format(total_err / len(d)))

###############################################
# calculate predictions
###############################################

print()
print_params(result)

(Yg, Ye) = make_params_dunham(result)

#print_matrix(Yg)
#print_matrix(Ye)

(Ug, Ue) = reduce_dunham(Yg, Ye)

print_matrix(Ug)
print_matrix(Ue)
make_report(Ug, Ue, vmax=1, Jmax=1, save_filename='lines_dunham.txt')

#print_matrix(Ug)
#print_matrix(Ue)