コード例 #1
0
 def paint_dmp(self):
     # ax = plt.subplot(projection='3d')
     dmp_traj = dmp_process(100, self.full_traj, self.dfull_traj, self.ddfull_traj)
     # ax.scatter(self.full_traj[0], self.full_traj[1], self.full_traj[2], c='r')
     # ax.scatter(dmp_traj[0], dmp_traj[1], dmp_traj[2], c='g')
     # plt.show()
     generate_data(dmp_traj, self.name)
コード例 #2
0
    def create_letter(self):
        seg = Segmentation(self.full_traj, self.dfull_traj, self.ddfull_traj)
        strokes = seg.segmentate_two()

        # create p
        for stroke in strokes:
            vel, acc = get_vel_and_acc(stroke, self.freq)
            if strokes.index(stroke) == 0:
                dmp_stroke = dmp_process(200, stroke, vel, acc)
                generate_data(dmp_stroke, self.name+"1")
            if strokes.index(stroke) == 1:
                z_change = dmp_process(200, stroke, vel, acc, 0, 0.15)
                dmp_stroke = dmp_process(200, stroke, vel, acc)
                dmp_stroke[2] = z_change[2]
                generate_data(dmp_stroke, self.name+"2")
            # plt.plot([ -i for i in stroke[0]], stroke[2], label='stroke %d' % strokes.index(stroke), linewidth=5, alpha=0.6)
            plt.plot([ -i for i in dmp_stroke[0]], dmp_stroke[2], label='DMP_stroke %d' % strokes.index(stroke))

        # create B
        # for stroke in strokes:
        #     vel, acc = get_vel_and_acc(stroke, self.freq)
        #     if strokes.index(stroke) == 0:
        #         dmp_stroke = dmp_process(200, stroke, vel, acc)
        #     if strokes.index(stroke) == 1:
        #         z_change = dmp_process(200, stroke, vel, acc, 0, 0.15)
        #         dmp_stroke = dmp_process(200, stroke, vel, acc)
        #         dmp_stroke[2] = z_change[2]
        #     # plt.plot([ -i for i in stroke[0]], stroke[2], label='stroke %d' % strokes.index(stroke), linewidth=5, alpha=0.6)
        #     plt.plot([ -i for i in dmp_stroke[0]], dmp_stroke[2], label='DMP_stroke %d' % strokes.index(stroke))
        
        # zz_change = dmp_process(200, strokes[-1], vel, acc, -0.15, 0)
        # dmp_stroke2 = dmp_process(200, strokes[-1], vel, acc)
        # dmp_stroke2[2] = zz_change[2]
        # plt.plot([ -i for i in dmp_stroke2[0]], dmp_stroke2[2], label='DMP_stroke 3')

        # create D
        # for stroke in strokes:
        #     vel, acc = get_vel_and_acc(stroke, self.freq)
        #     zdmp_stroke = dmp_process(200, stroke, vel, acc, 0.0, -0.1)
        #     xdmp_stroke = dmp_process(200, stroke, vel, acc, 0.0, 0.1)
        #     plt.plot([ -i for i in xdmp_stroke[0]], zdmp_stroke[2], label='DMP_stroke %d' % strokes.index(stroke))



        plt.xticks(np.arange(0.35, 0.70, step=0.05))
        plt.yticks(np.arange(0.10, 0.40, step=0.05))
        plt.xlabel('x(m)')
        plt.ylabel('y(m)')
        # plt.title('Create new letter by DMP of writing belongs to %s' % self.name[2:])
        plt.legend(loc='upper right', frameon=False)
        # plt.savefig('/home/jingwu/Desktop/CS8803/Project/Dec/%s_new_letter.png' % self.name)
        # plt.savefig('/home/jingwu/Desktop/CS8803/Project/Dec/%s_B.png')
        plt.show()
コード例 #3
0
ファイル: eval_kernel.py プロジェクト: yux94/opt-mmd
 def gen_data(n=None, dtype=learn_kernel.floatX):
     return generate.generate_data(args, n or args.n_test, dtype=dtype)
コード例 #4
0
ファイル: db_thread.py プロジェクト: yaesemin/for_all
from threading import Thread
from generate import generate_data, call_insert_into_table
import datetime

print('start generate data', str(datetime.datetime.now())[0:19])
x = generate_data(500000)
print('start inset data   ', str(datetime.datetime.now())[0:19])
#call_insert_into_table(x)
#print('end inset data     ', str(datetime.datetime.now())[0:19])

t1 = Thread(target=call_insert_into_table(x))
t2 = Thread(target=call_insert_into_table(x))

t1.start()
t2.start()
t1.join()
t2.join()

print('end inset data     ', str(datetime.datetime.now())[0:19])
コード例 #5
0
ファイル: db.py プロジェクト: yaesemin/for_all
from app import *
from generate import generate_data, call_insert_into_table
import datetime

print('start generate data', str(datetime.datetime.now())[0:19])
x = generate_data(1000000)
print('start inset data   ', str(datetime.datetime.now())[0:19])
call_insert_into_table(x)
print('end inset data     ', str(datetime.datetime.now())[0:19])

#print('x', x)

コード例 #6
0
ファイル: learn_kernel.py プロジェクト: yangerkun/opt-mmd
def main():
    import argparse
    parser = argparse.ArgumentParser(description='''
        Learn a kernel function to maximize the power of a two-sample test.
        '''.strip())
    net = parser.add_argument_group('Kernel options')
    g = net.add_mutually_exclusive_group()
    g.add_argument('--net-version',
                   choices=sorted(net_versions),
                   default='nothing',
                   help="How to represent the values before putting them in "
                   "the kernel. Options defined in this file; "
                   "default '%(default)s'.")
    g.add_argument('--net-file',
                   help="A Python file containing a net_custom function "
                   "that does the representation; see existing options "
                   "for examples. (Same API: net_custom(in_p, in_q) "
                   "needs to return net_p, net_q, reg_term.)")
    g = net.add_mutually_exclusive_group(required=True)
    g.add_argument('--max-ratio',
                   '-r',
                   dest='criterion',
                   action='store_const',
                   const='ratio',
                   help="Maximize the t-statistic estimator.")
    g.add_argument('--max-mmd',
                   '-m',
                   dest='criterion',
                   action='store_const',
                   const='mmd',
                   help="Maximize the MMD estimator.")
    g.add_argument('--max-hotelling',
                   dest='criterion',
                   action='store_const',
                   const='hotelling',
                   help="Maximize the Hotelling test statistics; only works "
                   "with a linear kernel.")

    g = net.add_mutually_exclusive_group()
    g.add_argument('--rbf-kernel',
                   action='store_false',
                   dest='linear_kernel',
                   help="Use an RBF kernel; true by default.")
    g.add_argument('--linear-kernel', default=False, action='store_true')

    g = net.add_mutually_exclusive_group()
    g.add_argument('--biased-est',
                   default=True,
                   action='store_true',
                   help="Use the biased quadratic MMD estimator.")
    g.add_argument('--unbiased-est',
                   dest='biased_est',
                   action='store_false',
                   help="Use the unbiased quadratic MMD estimator.")
    g.add_argument('--streaming-est',
                   default=False,
                   action='store_true',
                   help="Use the streaming estimator for the MMD; faster "
                   "but much less powerful.")

    net.add_argument('--hotelling-reg',
                     type=float,
                     default=0,
                     help="Regularization for the inverse in the Hotelling "
                     "criterion; default %(default)s.")

    g = net.add_mutually_exclusive_group()
    g.add_argument('--opt-sigma',
                   default=False,
                   action='store_true',
                   help="Optimize the bandwidth of an RBF kernel; "
                   "default don't.")
    g.add_argument('--no-opt-sigma', dest='opt_sigma', action='store_false')

    g = net.add_mutually_exclusive_group()

    def context_eval(s):
        return eval(s)

    g.add_argument('--sigma',
                   default='1',
                   type=context_eval,
                   help="The initial bandwidth. Evaluated as Python, so you "
                   "could do e.g. --sigma 'np.random.lognormal()'.")
    g.add_argument('--init-sigma-median',
                   action='store_true',
                   default=False,
                   help="Initialize the bandwidth as the median of pairwise "
                   "distances between representations of the training "
                   "data.")

    g = net.add_mutually_exclusive_group()
    g.add_argument('--opt-log',
                   default=True,
                   action='store_true',
                   help="Optimize the log of the criterion; true by default.")
    g.add_argument('--no-opt-log', dest='opt_log', action='store_false')

    opt = parser.add_argument_group('Optimization')
    opt.add_argument('--num-epochs', type=int, default=10000)
    opt.add_argument('--batchsize', type=int, default=200)
    opt.add_argument('--val-batchsize', type=int, default=1000)
    opt.add_argument('--opt-strat', default='adam')
    opt.add_argument('--learning-rate', type=float, default=.01)
    opt.add_argument('--opt-args', type=ast.literal_eval, default={})

    data = parser.add_argument_group('Data')
    generate.add_problem_args(data)
    data.add_argument('--n-train', type=int, default=500)
    data.add_argument('--n-test', type=int, default=500)

    test = parser.add_argument_group('Testing')
    test.add_argument('--null-samples', type=int, default=1000)

    parser.add_argument('--seed', type=int, default=np.random.randint(2**31))
    g = parser.add_mutually_exclusive_group()
    g.add_argument('--log-params',
                   default=False,
                   action='store_true',
                   help="Log the network parameters at every iteration. Only "
                   "do this if you really want it; parameters are always "
                   "saved at the end.")
    g.add_argument('--no-log-params', dest='log_params', action='store_false')
    parser.add_argument('outfile',
                        help="Where to store the npz file of results.")

    args = parser.parse_args()
    if args.linear_kernel and (args.opt_sigma or args.sigma != 1):
        parser.error("Linear kernel and sigma are incompatible")
    if (not args.linear_kernel) and args.criterion == 'hotelling':
        parser.error("Hotelling criterion only available for linear kernel")

    n_train = args.n_train
    n_test = args.n_test
    np.random.seed(args.seed)
    X, Y = generate.generate_data(args, n_train + n_test, dtype=floatX)
    is_train = np.zeros(n_train + n_test, dtype=bool)
    is_train[np.random.choice(n_train + n_test, n_train, replace=False)] = True
    X_train = X[is_train]
    Y_train = Y[is_train]
    X_test = X[~is_train]
    Y_test = Y[~is_train]

    if args.net_file:
        # This should be a python file with a net_custom function taking in
        # in_p, in_q and return net_p, net_q, reg_term.
        with open(args.net_file) as f:
            code = f.read()
        register_custom_net(code)
        args.net_version = 'custom'
        args.net_code = code

    params, param_names, get_rep, value_log, sigma = train(
        X_train,
        Y_train,
        X_test,
        Y_test,
        criterion=args.criterion,
        biased=args.biased_est,
        hotelling_reg=args.hotelling_reg,
        streaming_est=args.streaming_est,
        linear_kernel=args.linear_kernel,
        opt_log=args.opt_log,
        init_log_sigma=np.log(args.sigma),
        init_sigma_median=args.init_sigma_median,
        opt_sigma=args.opt_sigma,
        net_version=args.net_version,
        num_epochs=args.num_epochs,
        batchsize=args.batchsize,
        val_batchsize=args.val_batchsize,
        opt_strat=args.opt_strat,
        log_params=args.log_params,
        **args.opt_args)

    print("Testing...", end='')
    sys.stdout.flush()
    try:
        p_val, stat, null_samps = eval_rep(
            get_rep,
            X_test,
            Y_test,
            linear_kernel=args.linear_kernel,
            sigma=sigma,
            hotelling=args.criterion == 'hotelling',
            null_samples=args.null_samples)
        print("p-value: {}".format(p_val))
    except ImportError as e:
        print()
        print("Couldn't import shogun:\n{}".format(e), file=sys.stderr)
        p_val, stat, null_samps = None, None, None

    to_save = dict(null_samps=null_samps,
                   test_stat=stat,
                   sigma=sigma,
                   p_val=p_val,
                   params=params,
                   param_names=param_names,
                   X_train=X_train,
                   X_test=X_test,
                   Y_train=Y_train,
                   Y_test=Y_test,
                   value_log=value_log,
                   args=args)

    try:
        dirname = os.path.dirname(args.outfile)
        if dirname and not os.path.isdir(dirname):
            os.makedirs(dirname)
        np.savez(args.outfile, **to_save)
    except Exception as e:
        with tempfile.NamedTemporaryFile(delete=False) as tmp:
            name = tmp.name
        msg = "Couldn't save to {}:\n{}\nSaving to {} instead"
        print(msg.format(args.outfile, e, name), file=sys.stderr)
        np.savez(name, **to_save)
コード例 #7
0
ファイル: main.py プロジェクト: LR0455/ParameterValueVector
import threading
# import self function
import generate as gn
import pvv_model as pvv

if __name__ == '__main__':
    print("get...")

    data = gn.generate_data()

    pvv_model = pvv.ParameterValueVector(2, 1, 64, 2, 100, 300, 2048)
    pvv_model.pvv_model_train(data)

    data = gn.generate_data()
    pvv_model.pvv_model_predict(data)
コード例 #8
0
ファイル: main.py プロジェクト: xdbdilab/ppdrl
            csvfile = open(config.result_dir, "w", newline="")
            writer = csv.writer(csvfile)
            writer.writerows(pointer)
            csvfile.close()
            print("WRITED IN SUIJI CSV !")
            #saver.save(sess, config.save_to + "/model_over.ckpt")
            print('==================================== reinforcement completed =====================================')



if __name__ == "__main__":
    #tfe.enable_eager_execution()
    node = '10'
    config, _ = get_config()
    config.node_num = int(node)
    rootdir = 'testmore/'+node
    calattr = Calattr()
    training_set = SC_DataGenerator()
    for num in range(5):
        for listFile in os.listdir(rootdir):
            evaluations_number = 0
            print(listFile)
            config.ist_nodeset = node + '/' + listFile
            #print('nodeset: ',config.ist_nodeset)
            config.temp_best_dir = 'testlog/new/calattar'+node+'_'+listFile+'_'+str(num)+'.txt'
            calattr.init(config.node_num, config.train_from, config.ist_nodeset)
            generate.generate_data(calattr,config)
            inputs, num, count = training_set.train_batch(config.input_dimension, config.train_from, config.ist_nodeset)
            config.all_node_num = count
            main(config,calattr,inputs,num,evaluations_number)