Ejemplo n.º 1
0
def load_datalist(path, option, class_map=None):
    """
    Load a list of dataset. All lines starting with '#' will be ignored. Each line of the list file contains the
    folder path w.r.t the list file and the device placement.
    
    :param path: path to the list file.
    :param option: An instant of td.TrainingDataOption.
    :param class_map: a dictionary represents the class map. If None, the class map will be built.
    :return: Nxd array of feature vectors, Nx1 integer array of labels, Nxc of responses and the class map.
    """
    root_dir = os.path.dirname(path)
    with open(path) as f:
        dataset_list = [s.strip('\n') for s in f.readlines()]
    feature_all = []
    label_all = []
    responses_all = []
    build_classmap = False
    if class_map is None:
        class_map = {}
        build_classmap = True
    imu_columns = [
        'gyro_x', 'gyro_y', 'gyro_z', 'linacce_x', 'linacce_y', 'linacce_z'
    ]
    for dataset in dataset_list:
        if len(dataset) > 0 and dataset[0] == '#':
            continue
        info = dataset.split(',')
        if len(info) != 2:
            warnings.warn('Line ' + dataset +
                          ' has the wrong format. Skipped.')
            continue
        data_path = root_dir + '/' + info[0] + '/processed/data.csv'
        if not os.path.exists(data_path):
            warnings.warn('File ' + data_path + ' does not exist. Skipped.')
            continue
        print('Loading dataset ' + data_path + ', type: ' + info[1])
        if info[1] not in class_map:
            if build_classmap:
                class_map[info[1]] = len(class_map)
            else:
                warnings.warn('Class %s not found in the class map. Skipped' %
                              info[1])
                continue
        data_all = pandas.read_csv(data_path)
        extra_args = {'target_smooth_sigma': 30.0, 'feature_smooth_sigma': 2.0}
        feature, target = td.get_training_data(data_all=data_all,
                                               imu_columns=imu_columns,
                                               option=option,
                                               extra_args=extra_args)
        feature_all.append(feature)
        responses_all.append(target)
        label = class_map[info[1]]
        label_all.append(np.array([label for _ in range(feature.shape[0])]))
    feature_all = np.concatenate(feature_all, axis=0)
    label_all = np.concatenate(label_all, axis=0)
    responses_all = np.concatenate(responses_all, axis=0)
    return feature_all, label_all, responses_all, class_map
Ejemplo n.º 2
0
    def gen(path):
        bin_generator = NNUEBinData(path)
        for data in bin_generator:
            board = data[0]
            move = data[1]
            result = data[2]
            score = data[3]
            try:
                #x, x2, x3 = get_training_data(board)
                x, x2 = get_training_data(board)
            except Exception as e:
                continue

            #yield (x, x2, x3), score
            yield (x, x2), score
Ejemplo n.º 3
0
    def gen(path):
        bin_generator = NNUEBinData(path)
        for data in bin_generator:

            board = data[0]
            move = data[1]
            result = data[2]
            score = data[3]
            y = float(score) / 100
            try:
                x, x1 = get_training_data(board)
            except Exception as e:
                continue

            yield (x, x1), y
Ejemplo n.º 4
0
def train_model(nb_epochs=0):
    model_y = get_model()[0]
    model_acc = get_model()[1]
    train_data = td.get_training_data()
    x_train = train_data['x_train']
    y_train = train_data['y_train']
    x_test = train_data['x_test']
    y_test = train_data['y_test']
    if nb_epochs > 0:
        model_y.fit(x_train, y_train, batch_size=24, epochs=nb_epochs)
    eval_train = model_y.evaluate(x_train, y_train, verbose=2)
    predict_train = tf.math.argmax(model_y.predict(x_train), 1)
    conf_train = tf.math.confusion_matrix(y_train, predict_train)
    #(conf_train[0, 0] + conf_train[1, 1] + conf_train[2, 2]) / np.sum(conf_train)

    eval_test = model_y.evaluate(x_test, y_test, verbose=2)
    predict_test = tf.math.argmax(model_y.predict(x_test), 1)
    conf_test = tf.math.confusion_matrix(y_test, predict_test)
    #(conf_test[0, 0] + conf_test[1, 1] + conf_test[2, 2]) / np.sum(conf_test)
    model_y.save('data/model.h5')
    nb_epochs_tot = nb_epochs
    if model_acc.shape[0] > 0:
        nb_epochs_tot = nb_epochs_tot + model_acc['nb_epochs'][
            model_acc.shape[0] - 1]
    model_acct = pd.DataFrame(
        data={
            'loss_train': [eval_train[0]],
            'accuracy_train': [eval_train[1]],
            'nb_train': [x_train.shape[0]],
            'loss_test': [eval_test[0]],
            'accuracy_test': [eval_test[1]],
            'nb_test': [x_test.shape[0]],
            'nb_epochs': [nb_epochs_tot]
        })
    model_acc = model_acc.append(model_acct, sort=False, ignore_index=True)
    mu.mutex_update(model_acc, 'data/model.pkl')
    return {'conf_train': conf_train, 'conf_test': conf_test}
"""
linear_regression.py

Run linear regression to learn a linear function from
features on a pair of consecutive repetitions to the
interval of time between the repetitions.

Sheila Ramaswamy <*****@*****.**>
"""
import numpy as np
from sklearn import linear_model
import training_data

(x, y) = training_data.get_training_data(1000)
x_train = x[:-200]
y_train = y[:-200]
x_test = x[-200:]
y_test = y[-200:]


regr = linear_model.LinearRegression()
regr.fit(x_train, y_train)

print 'Coefficients:'
print regr.coef_
print "Residual sum of squares: %.2f" % np.mean((regr.predict(x_test) - y_test) ** 2)
print 'Variance score: %.2f' % regr.score(x_test, y_test)
print 'Variance score: %.2f' % regr.score(x_train, y_train)
Ejemplo n.º 6
0
  images_A,landmarks_A = load_images( images_A[:minImages] ) 
  images_B,landmarks_B = load_images( images_B[:minImages] )

  print('Images A', images_A.shape)
  print('Images B', images_B.shape)

  images_A = images_A/255.0
  images_B = images_B/255.0

  images_A[:,:,:3] += images_B[:,:,:3].mean( axis=(0,1,2) ) - images_A[:,:,:3].mean( axis=(0,1,2) )

  print( "press 'q' to stop training and save model" )

  batch_size = int(32)

  warped_A, target_A, mask_A = get_training_data( images_A,  landmarks_A,landmarks_B, batch_size )
  warped_B, target_B, mask_B  = get_training_data( images_B, landmarks_B,landmarks_A, batch_size )


  print(warped_A.shape, target_A.shape, mask_A.shape)

  figWarped = numpy.stack([warped_A[:6],warped_B[:6]],axis=0 )
  figWarped = numpy.clip( figWarped * 255, 0, 255 ).astype('uint8')
  figWarped = stack_images( figWarped )
  cv2.imshow( "w", figWarped )

  print(warped_A.shape)
  print(target_A.shape)

  from model import autoencoder_A
  from model import autoencoder_B
Ejemplo n.º 7
0
                         , lr=5e-5, betas=(0.5, 0.999))
optimizer_2 = optim.Adam([{'params': model.encoder.parameters()},
                          {'params': model.decoder_B.parameters()}]
                         , lr=5e-5, betas=(0.5, 0.999))

# print all the parameters im model
# s = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Number of params: %d' % s)

if __name__ == "__main__":
    print('Start training, press \'q\' to stop')
    # training Encoder, Decoder
    # get deep fake images figure
    for epoch in range(start_epoch, args.epochs):
        batch_size = args.batch_size
        warped_A, target_A = get_training_data(images_A, batch_size)
        warped_B, target_B = get_training_data(images_B, batch_size)
        warped_A, target_A = toTensor(warped_A), toTensor(target_A)
        warped_B, target_B = toTensor(warped_B), toTensor(target_B)


        if args.cuda:
            warped_A = warped_A.cuda()
            target_A = target_A.cuda()
            warped_B = warped_B.cuda()
            target_B = target_B.cuda()

        warped_A, target_A, warped_B, target_B = Variable(warped_A.float()), Variable(target_A.float()), \
                                                 Variable(warped_B.float()), Variable(target_B.float())

        optimizer_1.zero_grad()
Ejemplo n.º 8
0
m1 = Input(shape=(128, 128, 1))
sumModel = Model([input_warped, input_examples, m1], [x])
print(sumModel.summary())
try:
    sumModel.load_weights('weights.dat')
except:
    print('Weights not found')

DSSIM = DSSIMObjective()

sumModel.compile(optimizer=optimizer, loss=['mae'])

from keras_contrib.losses import DSSIMObjective

for n in range(900000):
    imaegsAGen = get_training_data(images_A, landmarks_A, batch_size)
    imaegsBGen = get_training_data(images_B, landmarks_B, batch_size)
    imaegsCGen = get_training_data(images_C, landmarks_C, batch_size)
    imaegsDGen = get_training_data(images_D, landmarks_D, batch_size)

    xa, xae, ya, ma = next(imaegsAGen)
    xb, xbe, yb, mb = next(imaegsBGen)
    xc, xce, yc, mc = next(imaegsCGen)
    xd, xde, yd, md = next(imaegsDGen)

    xl = numpy.concatenate((xa, xb, xc, xd), axis=0)
    xel = numpy.concatenate((xae, xbe, xce, xde), axis=0)
    yl = numpy.concatenate((ya, yb, yc, yd), axis=0)
    ml = numpy.concatenate((ma, mb, mc, md), axis=0)

    indices = numpy.random.choice(range(0, xl.shape[0]),
Ejemplo n.º 9
0
optimizer_1 = optim.Adam([{'params': model.encoder.parameters()},   #A的优化器(待优化参数的参数组的dict[],lr学习率5*10^-5,
                          {'params': model.decoder_A.parameters()}]         # betas用于计算梯度以及梯度平方的运行平均值的系数)
                         , lr=5e-5, betas=(0.5, 0.999))
optimizer_2 = optim.Adam([{'params': model.encoder.parameters()},    #B的优化器
                          {'params': model.decoder_B.parameters()}]
                         , lr=5e-5, betas=(0.5, 0.999))


if __name__ == "__main__":

    print('Start training, press \'q\' to stop')

    for epoch in range(start_epoch, args.epochs):
        batch_size = args.batch_size

        warped_A, target_A = get_training_data(images_A, batch_size)#warped是数据增强之后的图片,需要有一个和他对应的目标图片target(因为二者不能完全一样)
        warped_B, target_B = get_training_data(images_B, batch_size)

        warped_A, target_A = toTensor(warped_A), toTensor(target_A)#转化为tensor张量
        warped_B, target_B = toTensor(warped_B), toTensor(target_B)

        if args.cuda:
            warped_A = warped_A.to(device).float()# 将所有最开始读取数据时的tensor变量copy一份到device所指定的GPU上去,之后的运算都在GPU上进行
            target_A = target_A.to(device).float()# .float()将该tensor投射为float类型 
            warped_B = warped_B.to(device).float()# variable是floattensor的封装
            target_B = target_B.to(device).float()# 似乎这里就已将把tensor转换为Variable了?

        optimizer_1.zero_grad()
        optimizer_2.zero_grad()

        warped_A = model(warped_A, 'A')#使用A解码器训练A
Ejemplo n.º 10
0
    #                                                position=position_tango,
    #                                                orientation=orientation)
    # predicted_local_speed = gaussian_filter1d(predicted_local_speed, sigma=10.0, axis=0)
    # predicted_local_speed = predicted_local_speed[constraint_ind]

    # regress the local speed
    predicted_local_speed = np.empty([constraint_ind.shape[0], 3], dtype=float)
    for i in range(3):
        model_path = '../../../models/model_direct_local_speed_w200_s10_{}_cv.yml'.format(
            i)
        # regressor = joblib.load(model_path)
        with open(model_path) as f:
            regressor = cv2.ml.SVM_load(model_path)
            print(model_path + ' loaded')
            print('Predicting channel ', i)
            test_feature, _ = td.get_training_data(data_all, imu_columns,
                                                   options, constraint_ind)
            predicted_local_speed[:, i] = regressor.predict(
                test_feature.astype(np.float32))[1][:, 0]

    # NOTICE: the values inside the cos_array are not all valid (the speed direction is undefined for static camera).
    #         Therefore it is necessary to construct a separate constraint index array
    constraint_ind_angle = constraint_ind[valid_array]
    predicted_cos_array = predicted_cos_array[valid_array]

    # write predicted speed to file for c++ optimizer
    # with open(FLAGS.dir + '/processed/speed_magnitude.txt', 'w') as f:
    #     f.write('{:d}\n'.format(constraint_ind.shape[0]))
    #     for i in range(constraint_ind.shape[0]):
    #         f.write('{:d} {:.8f}\n'.format(constraint_ind[i], predicted_speed_margnitude[i]))
    #
    # with open(FLAGS.dir + '/processed/vertical_speed.txt', 'w') as f:
                                                 'decoder_B/')
            if ckpt and ckpt.model_checkpoint_path:
                saver_decoder_B.restore(sess, ckpt.model_checkpoint_path)
                print("Model saver_decoder_B restored...")
    #    saver = tf.train.import_meta_graph(saved_ckpt_path + '66700model-66700.meta')
    #    saver.restore(sess,tf.train.latest_checkpoint(saved_ckpt_path))

    #    train_summaryA_writer = tf.summary.FileWriter(saved_summary_train_path, sess.graph)
    #    train_summaryB_writer = tf.summary.FileWriter(saved_summary_train_path, sess.graph)

        print("press 'q' to stop training and save model")

        for epoch in range(start_step, training_epochs):
            # TRAIN ENCONDER + DECODER_A WITH BATCH_A
            batch_images = sess.run(train_image_batch_A)
            warped_A, target_A = get_training_data(batch_images, batch_size)

            feeds = {X: warped_A, Y: target_A}
            sess.run(optimizer_A, feed_dict=feeds)

            # TRAIN ENCONDER + DECODER_B WITH BATCH_B
            batch_images = sess.run(train_image_batch_B)
            warped_B, target_B = get_training_data(batch_images, batch_size)

            feeds = {X: warped_B, Y: target_B}
            sess.run(optimizer_B, feed_dict=feeds)

            print("Epoch %02d/%02d average cost: %.4f"
                   % (epoch, training_epochs, sess.run(loss_all_B, \
                                                       feed_dict={X: warped_B,\
                                                                  Y: target_B})))
Ejemplo n.º 12
0
import model as mod
import download as dl
import training_data as td

#scores = dl.get_all_data()
tdata = td.get_training_data()

conf = mod.train_model(0)
conf
mm = mod.get_model()[0]
mm.summary()

mod.model_predict()

for i in range(200):
    conf = mod.train_model(50)
    print(conf)
    print(mod.get_model()[1])
    mod.model_predict()

# print(mod.get_model()[1])
mod.model_predict()
Ejemplo n.º 13
0
    grads = grads / tf.reduce_mean(tf.abs(grads), [1, 2, 3], keep_dims=True)
    past_grads = m * past_grads + grads
    return past_grads


images_A = get_image_paths("data/trump")
images_B = get_image_paths("data/cage")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0

images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

print("press 'q' to stop training and save model")

batch_size = 14
w1, target_A, w2, w3 = get_training_data(images_A, batch_size)
warped_B, target_B, _, _ = get_training_data(images_B, batch_size)

tga = target_A.copy()

# iter_num = 255
past_grads = numpy.zeros_like(target_A)
m = 0.9
alpha = 0.01
epsilon = 0.2
num_iter = 31
beta_1, beta_2, accum_s, accum_g = tf.cast(0.9, tf.float64), tf.cast(
    0.999, tf.float64), numpy.zeros_like(target_A), numpy.zeros_like(target_A)

# print(target_A.shape)
# n = []
Ejemplo n.º 14
0
if args.cuda:
    model.cuda()
    cudnn.benchmark=True
cirterion=nn.L1Loss()
optimizer_1=torch.optim.Adam([{'params':model.encoder.parameters()},
                                {'params':model.decoder_a.parameters()}],
                        lr=5e-5,betas=(0.5,0.999))
optimizer_2=torch.optim.Adam([{'params':model.encoder.parameters()},
                            {'params':model.decoder_b.parameters()}],
                        lr=5e-5,betas=(0.5,0.999))                        
if __name__=="__main__":
    files=open('log.txt','a+')
    batch_size=args.batch_size
    start=0
    for epoch in range(start,args.epochs):
        wrap_a,target_a=get_training_data(images_a,batch_size)
        wrap_b,target_b=get_training_data(images_b,batch_size)
        
        wrap_a,target_a=toTensor(wrap_a),toTensor(target_a)
        wrap_b,target_b=toTensor(wrap_b),toTensor(target_b)

        if args.cuda:
            wrap_a=wrap_a.cuda()
            wrap_b=wrap_b.cuda()
            target_a=target_a.cuda()
            target_b=target_b.cuda()

        wrap_a,target_a=Variable(wrap_a.float()),Variable(target_a.float())
        wrap_b,target_b=Variable(wrap_b.float()),Variable(target_b.float())

        optimizer_1.zero_grad()
Ejemplo n.º 15
0
    decoder_A.save_weights( "models/decoder_A.h5" )
    decoder_B.save_weights( "models/decoder_B.h5" )
    print( "save model weights" )

images_A = get_image_paths( "data/trump" )
images_B = get_image_paths( "data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

print( "press 'q' to stop training and save model" )

for epoch in range(1000000):
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )

    loss_A = autoencoder_A.train_on_batch( warped_A, target_A )
    loss_B = autoencoder_B.train_on_batch( warped_B, target_B )
    print( loss_A, loss_B )

    if epoch % 100 == 0:
        save_model_weights()
        test_A = target_A[0:14]
        test_B = target_B[0:14]

    figure_A = numpy.stack([
        test_A,
        autoencoder_A.predict( test_A ),
        autoencoder_B.predict( test_A ),
Ejemplo n.º 16
0
    training_set_all = []

    for data in datasets:
        if len(data) == 0:
            continue
        if data[0] == '#':
            continue
        info = data.strip('\n').split(',')
        data_path = data_root + '/' + info[0] + '/processed/data.csv'
        if not os.path.exists(data_path):
            warnings.warn('File ' + data_path + ' does not exist, omit the folder')
            continue

        print('Loading ' + info[0])
        data_all = pandas.read_csv(data_root + '/' + info[0].strip('\n') + '/processed/data.csv')
        training_set_all.append(td.get_training_data(data_all, imu_columns=imu_columns, option=options))

    training_set_all = np.concatenate(training_set_all, axis=0)
    print('Brief: training sample: {}, feature dimension: {}'
          .format(training_set_all.shape[0], training_set_all.shape[1]-1))

    # Train the gaussian process
    regressor = gaussian_process.GaussianProcessRegressor(alpha=args.alpha)
    print('Fitting Gaussian Process...')
    start_t = time.clock()
    regressor.fit(training_set_all[:, :-1], training_set_all[:, -1])
    print('Done in {:.2f} seconds'.format(time.clock() - start_t))

    if len(args.output) > 0:
        joblib.dump(regressor, args.output)
        print('Model saved at ' + args.output)