def main(cx,
         cy,
         obstacle_path,
         model,
         start=None,
         goal=None,
         num_evals=1,
         eval_mode=False,
         plotopt=False):
    """eval_mode is True when function is
        used for testing performance of the network
        When False, function takes in specified start and goal
        and generates a path
    """
    object_c = format_obstacles(load_poly=obstacle_path)
    valid_counter = 0
    path_set = []
    for i in range(num_evals):

        if eval_mode:
            start, goal = generate_samples(cx,
                                           cy,
                                           load_polys=obstacle_path,
                                           num_samples=2)

        start_ext = start[np.newaxis, np.newaxis, ...]
        goal_ext = goal[np.newaxis, np.newaxis, ...]
        s_pred = np.concatenate((start_ext, goal_ext), axis=-1)
        s_pred_var = Variable(torch.from_numpy(s_pred)).type(FloatTensor)
        goal_var = Variable(torch.from_numpy(goal_ext)).type(FloatTensor)
        # goal_var = Variable(torch.from_numpy(goal)).type(FloatTensor)

        s_path = [start]
        num_points = 0
        tstart = time.time()

        while True:
            out1_var, _ = model(s_pred_var, None, force=True)
            out1 = out1_var.data.cpu().numpy()
            s_path.append(out1[0, 0, :])
            s_pred_var = torch.cat((out1_var, goal_var), dim=-1)

            num_points += 1
            if np.linalg.norm(out1 - goal) < 1 or num_points > 50:
                break
        tend = time.time()
        # print('time elapsed for generated path: ', tend-tstart)
        s_path = np.asarray(s_path)
        path_set.append(s_path)

        if plotopt:
            plt.plot(s_path[:, 0], s_path[:, 1], 'k')
            plt.plot(start[0], start[1], 'g.', markersize=10)
            plt.plot(goal[0], goal[1], 'r.', markersize=10)

        if path_validity(s_path, object_c): valid_counter += 1
    return path_set, valid_counter / num_evals
Esempio n. 2
0
def main():
    ''''''
    # generate original samples
    gs.generate_samples(SAMPLE_DATA)

    # generate groups of features
    feature_dir = os.path.join(FEATURE_DATA_DIR, TIME_NOW)
    if not os.path.exists(feature_dir):
        os.mkdir(feature_dir)
    gf.generate_features(SAMPLE_DATA, feature_dir, FEATURE_DATA_PREFIX, FEATURE_CREATORS)

    # combine samples
    print 'Combine Samples'
    combined_file = '%s.cmb' % SAMPLE_DATA
    cs.combine_samples(SAMPLE_DATA, feature_dir, combined_file, FEATURE_DATA_PREFIX)

    # split samples
    print 'Split Samples to train, test or predict'
    train_file, test_file, predict_file = \
            ss.split_samples(combined_file, TEST_SAMPLES_RATIO, FORMATTED_DIR, ORIGINAL_PREFIX)

    # format samples as liblinear
    print 'Format Samples'
    train_fmt_file = '%s.fmt' % train_file
    test_fmt_file = '%s.fmt' % test_file
    predict_fmt_file = '%s.fmt' % predict_file 
    fmt.format_liblinear(train_file, train_fmt_file)
    fmt.format_liblinear(test_file, test_fmt_file)
    fmt.format_liblinear(predict_file, predict_fmt_file)

    print 'Train Model'
    mod_file = train(train_fmt_file)

    if not NEED_CROSS_VALIDATION:
        print 'Test'
        predicted_file = test_model(test_fmt_file, mod_file)
        auc.calc_auc(predicted_file)
Esempio n. 3
0
# defien model as the defined archtitecture of nvidia cnn which shoould be used for training
model = architecture()
num_epochs = 20
batch_size = 128
#define the input data for the model.fit.generator
print(" number of training samples: {}:".format(len(train_samples)))
samples_per_epoch = len(train_samples) - (len(train_samples) % batch_size)
print('samples_per_epoch', samples_per_epoch)
print(" number of validation samples: {}:".format(len(validation_samples)))
nb_val_samples = len(validation_samples) - (len(validation_samples) %
                                            batch_size)
print('nb_val_epoch', nb_val_samples)
print('number of epochs:', num_epochs)
print('I am before call of model.fit generator')
# training pipeline with keras using a seld defined fucnction call of generator_sample
history = model.fit_generator(generate_samples(train_samples),
                              samples_per_epoch=samples_per_epoch,
                              nb_epoch=num_epochs,
                              validation_data=generate_samples(
                                  validation_samples, augment=False),
                              nb_val_samples=nb_val_samples)
#conditioned save mdoel routine just when loss is better than before
val_loss = history.history['val_loss'][0]
if val_loss < val_best:
    val_best = val_loss
    save_model("model")

print('Model fit generator finished')
print(history.history.keys())  # print out hte key from the history dictionary

# ================================================================================================================
Esempio n. 4
0
            "Specify a different folder or delete the folder.")

        with open(samples_metadata_file) as f:
            samples_metadata = json.load(f)

        assert (config["samples_generation_args"] ==
                samples_metadata["generation_args"]), (
                    "The samples were not generated by the same argument. "
                    "Specify a different folder or clean-up first")

    else:

        print("Generating the samples first...")
        generate_samples(
            cmudir=args.cmudir,
            output_dir=samples_dir,
            **config["samples_generation_args"],
        )

    # create output directory
    if not os.path.exists(config["dir"]):
        os.mkdir(config["dir"])

    all_args = generate_arguments(args.config)

    # Create all the rooms in parallel
    """
    results = []
    for arg in all_args:
        results.append(create_mixture(arg))
    """
# get training samples
sample_labels = []
sample_pairs = []
sample_recipes = []
for name in train_names:
    ing_path = os.path.join(DATA_ROOT, "train", "ing_entity", name + ".ient")
    ins_path = os.path.join(DATA_ROOT, "train", "instruct_entity",
                            name + ".entity")
    link_path = os.path.join(DATA_ROOT, "train", "link", name + ".link")
    atts_path = os.path.join(DATA_ROOT, "train", "atts", name + ".atts")
    with open(ing_path) as ing, \
         open(ins_path) as ins, \
         open(link_path) as link, \
         open(atts_path) as atts:
        recipe = PreprocessedRecipe(ing, ins, link, atts)
        samples_in_recipe = generate_samples(recipe)
        for label, pair in samples_in_recipe:
            sample_labels.append(label)
            sample_pairs.append(pair)
            sample_recipes.append(recipe)

# Extract Features
features = extractor.extract_features(sample_pairs, sample_recipes)

# Training
cls = LinearSVMClassifier()
cls.train(sample_labels, features)

# Dev Testing
dev_labels = []
dev_pairs = []
Esempio n. 6
0
def prepare_data(n):
    data = gen.generate_samples(n, True, n_contributors=3)
    X = np.reshape(data[0], (n, 2, 10, 1))
    y = data[1]
    y = to_categorical(y)
    return X, y
Esempio n. 7
0
# Model setup
rnnNet = RNN2D.partial(L=L,units=rnnUnits)
_,params = rnnNet.init_by_shape(random.PRNGKey(0),[(1,L,L)])
rnnModel = nn.Model(rnnNet,params)

# Optimizer setup
optimizer = flax.optim.Adam(learning_rate=learningRate, beta1=beta1, beta2=beta2).create(rnnModel)

# Generate data
print("*** Generating samples")
numTestSamples=500000
if numTestSamples < numSamples:
    numTestSamples = numSamples
trainData, trainEnergies, testData, testEnergies =\
    generate_samples(numTestSamples,T,L,1234,3412) 
trainData = trainData[:numSamples]
trainEnergies = trainEnergies[:numSamples]
print("*** done.")

# RNN works with spin up/down = 1/0
trainData[trainData==-1]=0
testData[testData==-1]=0

# Reshape data into 2D configurations and training data into batches
trainData = np.reshape(trainData,(int(numSamples/batchSize),batchSize,L,L))
testData = np.reshape(testData,(testData.shape[0],L,L))

# Compute physical properties of the ensemble
S = physics.compute_entropy(L,T)
F = physics.compute_free_energy(L,T)
Esempio n. 8
0
def main():
    """ Main Training funtion. Parses inputs, inits logger, trains, and then generates some samples. """

    # torch.autograd.set_detect_anomaly(True)

    # Logger init
    logger.remove()
    logger.add(sys.stdout,
               colorize=True,
               format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | " +
               "<level>{level}</level> | " +
               "<light-black>{file.path}:{line}</light-black> | " +
               "{message}")

    # Parse arguments
    opt = get_arguments().parse_args()
    opt = post_config(opt)

    # Init wandb
    run = wandb.init(project="mario",
                     tags=get_tags(opt),
                     config=opt,
                     dir=opt.out)
    opt.out_ = run.dir

    # Init game specific inputs
    replace_tokens = {}
    sprite_path = opt.game + '/sprites'
    if opt.game == 'mario':
        opt.ImgGen = MarioLevelGen(sprite_path)
        replace_tokens = MARIO_REPLACE_TOKENS
        downsample = special_mario_downsampling
    elif opt.game == 'mariokart':
        opt.ImgGen = MariokartLevelGen(sprite_path)
        replace_tokens = MARIOKART_REPLACE_TOKENS
        downsample = special_mariokart_downsampling
    else:
        NameError("name of --game not recognized. Supported: mario, mariokart")

    # Read level according to input arguments
    real = read_level(opt, None, replace_tokens).to(opt.device)

    # Train!
    generators, noise_maps, reals, noise_amplitudes = train(real, opt)

    # Generate Samples of same size as level
    logger.info("Finished training! Generating random samples...")
    in_s = None
    generate_samples(generators,
                     noise_maps,
                     reals,
                     noise_amplitudes,
                     opt,
                     in_s=in_s)

    # Generate samples of smaller size than level
    logger.info("Generating arbitrary sized random samples...")
    scale_v = 0.8  # Arbitrarily chosen scales
    scale_h = 0.4
    real_down = downsample(1, [[scale_v, scale_h]], real, opt.token_list)
    real_down = real_down[0]
    # necessary for correct input shape
    in_s = torch.zeros(real_down.shape, device=opt.device)
    generate_samples(generators,
                     noise_maps,
                     reals,
                     noise_amplitudes,
                     opt,
                     in_s=in_s,
                     scale_v=scale_v,
                     scale_h=scale_h,
                     save_dir="arbitrary_random_samples")
Esempio n. 9
0
def run_one(T,n,r,kappa,if_show_plot,sce='gauss',w=0.0,mixed_truth=0):
    params = wrap_params(T,n,r,kappa,w)
    start_time = time.time()    
    ### Define parameters and functions     
    log_nDelta_min = -3
    log_nDelta_max = 9
    num_nDelta = 50
    ## number of trials
    # T = 50      
    ## sample size
    # n = 100      
    ## rank
    # r = 20 or r = 40      
    ## condition number 
    # kappa = 50 or kappa = 10
    d = 2*r
    eigen = np.zeros(d)
    #eigen2 = np.zeros(n)
    eigen[:r] = np.sqrt(kappa)
    #eigen2[:r] = 1./10
    eigen[r:2*r] = 1.
    #eigen2[r:2*r] = 1.
    cov = np.diag(eigen)
    #cov2 = np.diag(eigen2)
        
    #Run simulations
    
    err_val = []
    err_newton = []
    err_grad = []
    err_plug = []
    err_oracle = []
    
    dev_val = []
    dev_newton = []
    dev_grad = []
    dev_plug = []
    dev_oracle = []
    
    nDelta_values = []
    count = 0

    for nDelta in np.logspace(log_nDelta_min, log_nDelta_max, num_nDelta, base=2):
        test_val = []
        test_newton = []
        test_grad = []
        test_plug = []
        test_oracle = []
        theta = np.zeros(d)
        #theta[r+1] = sqrt_Delta
        delta = np.sqrt(nDelta/n)
        theta[r+1] = delta # np.sqrt(delta) # c * np.sqrt( 2.* r) / n
        nDelta_values.append(nDelta)
        
        for t in range(T):
            Y0,Y1,X0,X1,H0,H1 = generate_samples(n,cov,theta)
            
            if sce=='mixture': # mix the two
                if mixed_truth:
                    Y0,X0,H0 = mix_samples(w,n,Y1,Y0,X1,X0,H1,H0)
                else:
                    Y1,X1,H1 = mix_samples(w,n,Y0,Y1,X0,X1,H0,H1)
            
            test_val.append(eval_test_val(Y0,Y1))
            test_newton.append(eval_test_newton(H0,X0,H1,X1))
            test_grad.append(eval_test_grad(Y0,Y1,X0,X1))
            test_plug.append(eval_test_plug(Y0,Y1,X0,X1,H0,H1))
            test_oracle.append(eval_test_oracle(Y0,Y1,X0,X1,theta))
        
        err_val.append(np.mean(test_val))
        err_newton.append(np.mean(test_newton))
        err_grad.append(np.mean(test_grad))
        err_plug.append(np.mean(test_plug))
        err_oracle.append(np.mean(test_oracle))
        
        dev_val.append(np.std(test_val)/np.sqrt(T))        
        dev_newton.append(np.std(test_newton)/np.sqrt(T))
        dev_grad.append(np.std(test_grad)/np.sqrt(T))
        dev_plug.append(np.std(test_plug)/np.sqrt(T))
        dev_oracle.append(np.std(test_oracle)/np.sqrt(T))
        
        count+=1
        if count%10==0:
            print(count)

    elapsed = time.time() - start_time
    print("Elapsed: %.0f sec" %elapsed)
    # Save results
    
    fpath = make_path(sce,T,n,r,kappa,w)
    os.makedirs(fpath,exist_ok=True)
    
    np.savetxt(fpath+'nDelta.txt',nDelta_values)

    np.savetxt(fpath+'err_val.txt',err_val)
    np.savetxt(fpath+'err_newton.txt',err_newton)
    np.savetxt(fpath+'err_grad.txt',err_grad)
    np.savetxt(fpath+'err_plug.txt',err_plug)
    np.savetxt(fpath+'err_oracle.txt',err_oracle)
    
    np.savetxt(fpath+'dev_val.txt',dev_val)
    np.savetxt(fpath+'dev_newton.txt',dev_newton)
    np.savetxt(fpath+'dev_grad.txt',dev_grad)
    np.savetxt(fpath+'dev_plug.txt',dev_plug)
    np.savetxt(fpath+'dev_oracle.txt',dev_oracle)
    
    # PLot results
    plot_one(fpath,params,if_show_plot)
    
    return fpath, params
Esempio n. 10
0
# Optimizer setup
optimizer = flax.optim.Adam(learning_rate=learningRate,
                            beta1=beta1,
                            beta2=beta2).create(rnnModel)

# Load data
if inParameters['Training data']['training_data'] == "generate":
    print("*** Generating samples")
    numTestSamples = inParameters['Training data']['number_of_test_samples']
    if numTestSamples < numSamples:
        numTestSamples = numSamples
    trainData, trainEnergies, testData, testEnergies =\
        generate_samples(numTestSamples,T,L,
                        inParameters['Training data']['seed_training'],
                        inParameters['Training data']['seed_test'],
                        outDir=None, bc=bc, numSweeps=-1,
                        samplerType=inParameters['Training data']['sampler'])
    trainData = trainData[:numSamples]
    trainEnergies = trainEnergies[:numSamples]
    print("*** done.")
else:
    with open(trainDataFileName, 'rb') as dataFile:
        data = np.load(dataFile)
        trainData = data['trainSample'][:numSamples]
        testData = data['testSample']
        trainEnergies = data['trainEnergies'][:numSamples]
        testEnergies = data['testEnergies']

r = int(len(testEnergies)**(1. / 3))
R = len(testEnergies) // r