def get_compensate_force(raw_plan, part1_index, part2_index, model='linear'):
    #Take out what we need:
    raw_data_X = np.array([
        raw_plan['axc_speed_%s' % args.axis_num],
        raw_plan['axc_torque_ffw_gravity_%s' % args.axis_num],
        raw_plan['Temp'], raw_plan['axc_pos_%s' % args.axis_num]
    ])
    raw_data_X = np.insert(raw_data_X, 0, 1, axis=0)
    raw_data_Y = raw_plan['need_to_compensate'].values

    #Normalize data:
    normer = data_stuff.normalizer(raw_data_X)
    normed_data_X, normed_data_Y = normer.normalize_XY(raw_data_X, raw_data_Y)

    #Get model:
    params_part1 = get_part1_model_params(model)
    params_part2 = get_part2_model_params(model)

    #Forward to get output:
    input = normed_data_X
    if model == 'linear':
        c0, c1, c2, c3, c4 = params_part1[0], params_part1[1], params_part1[
            2], params_part1[3], params_part1[4]
        output_part1 = classical_model.linear_friction_model(
            input[:, part1_index], c0, c1, c2, c3, c4)
        c0, c1, c2, c3, c4 = params_part2[0], params_part2[1], params_part2[
            2], params_part2[3], params_part2[4]
        output_part2 = classical_model.linear_friction_model(
            input[:, part2_index], c0, c1, c2, c3, c4)
    elif model == 'nonlinear':
        c0, v_brk, F_brk, F_C, c1, c2, c3, c4 = params_part1[0], params_part1[
            1], params_part1[2], params_part1[3], params_part1[
                4], params_part1[5], params_part1[6], params_part1[7]
        output_part1 = classical_model.nonlinear_friction_model(
            input[:, part1_index], c0, v_brk, F_brk, F_C, c1, c2, c3, c4)
        c0, v_brk, F_brk, F_C, c1, c2, c3, c4 = params_part2[0], params_part2[
            1], params_part2[2], params_part2[3], params_part2[
                4], params_part2[5], params_part2[6], params_part2[7]
        output_part2 = classical_model.nonlinear_friction_model(
            input[:, part2_index], c0, v_brk, F_brk, F_C, c1, c2, c3, c4)
    else:
        print("Either linear or nonlinear should be given in args")
    reference = normer.denormalize_Y(normed_data_Y)
    compensate_part1 = normer.denormalize_Y(output_part1)
    compensate_part2 = normer.denormalize_Y(output_part2)

    #Denormalize Safety restrictions:
    compensate_part1 = np.clip(compensate_part1, -args.max_force,
                               args.max_force)
    compensate_part2 = np.clip(compensate_part2, -args.max_force,
                               args.max_force)

    #Compose together:
    #TODO: solve the switch point.
    compensate_full_series = np.zeros(len(raw_plan))
    compensate_full_series[part1_index] = compensate_part1.reshape(-1)
    compensate_full_series[part2_index] = compensate_part2.reshape(-1)
    reference_full_series = raw_plan['need_to_compensate'].values
    return compensate_full_series, reference_full_series
示例#2
0
def get_compensate_force(raw_plan, part1_index, part2_index):
    #Take out what we need:
    raw_data_X = np.empty(shape=(0, len(raw_plan)))
    for i in [0, 1, 2, 3, 4, 5]:
        raw_data_X = np.vstack((raw_data_X, [
            raw_plan['axc_speed_%s' % i], raw_plan['axc_pos_%s' % i],
            raw_plan['axc_torque_ffw_gravity_%s' % i],
            raw_plan['axc_torque_ffw_%s' % i]
        ]))
    raw_data_X = np.vstack((raw_data_X, raw_plan['Temp']))
    raw_data_Y = raw_plan['need_to_compensate'].values

    #Normalize data:
    normer = data_stuff.normalizer(raw_data_X)
    normed_data_X, normed_data_Y = normer.normalize_XY(raw_data_X, raw_data_Y)

    #Get model:
    model_part1 = get_part1_model()
    model_part2 = get_part2_model()

    #Forward to get output:
    inputs = torch.FloatTensor(normed_data_X.T)
    output_part1 = model_part1.cpu()(inputs[part1_index]).detach().numpy()
    output_part2 = model_part2.cpu()(inputs[part2_index]).detach().numpy()
    reference = normer.denormalize_Y(normed_data_Y)
    compensate_part1 = normer.denormalize_Y(output_part1)
    compensate_part2 = normer.denormalize_Y(output_part2)

    #Save for Production:
    to_C(model_part1, model_part2, inputs)

    #Denormalize Safety restrictions:
    compensate_part1 = np.clip(compensate_part1, -args.max_force,
                               args.max_force)
    compensate_part2 = np.clip(compensate_part2, -args.max_force,
                               args.max_force)

    #Compose together:
    #TODO: solve the switch point.
    compensate_full_series = np.zeros(len(raw_plan))
    compensate_full_series[part1_index] = compensate_part1.reshape(-1)
    compensate_full_series[part2_index] = compensate_part2.reshape(-1)
    reference_full_series = raw_plan['need_to_compensate'].values
    return compensate_full_series, reference_full_series
def get_data_one(args, raw_plan, mode):
    #Take out what we need:
    #Make inputs:
    raw_data_X = np.empty(shape=(0, len(raw_plan)))
    input_columns_names = []
    #data are index from 0, thus
    local_axis_num = args.axis_num - 1
    input_columns_names += ['axc_pos_%s' % local_axis_num]
    input_columns_names += ['axc_speed_%s' % local_axis_num]
    input_columns_names += ['axc_torque_ffw_gravity_%s' % local_axis_num]
    input_columns_names += ['axc_torque_ffw_%s' % local_axis_num]
    input_columns_names += ['Temp']
    raw_data_X = raw_plan[input_columns_names].values.T
    raw_data_Y = raw_plan['need_to_compensate'].values

    #Normalize data:
    normer = data_stuff.normalizer(raw_data_X, raw_data_Y, args)
    normer.get_statistics(raw_data_X.shape[1])
    raw_secure_range = normer.get_raw_secure()
    judge_features_secure(args, raw_data_X, raw_secure_range,
                          input_columns_names)
    normed_data_X, normed_data_Y = normer.normalize_XY(raw_data_X, raw_data_Y)
    return normed_data_X, normed_data_Y, normer
 def __init__(self, model_name, time_num=None):
     self.model = NN_model.NeuralNet(input_size=25, hidden_size=25, hidden_depth=3, output_size=1, device=torch.device(device_type))
     self.model.load_state_dict(torch.load(model_name).state_dict())
     #self.model = torch.load(model_name, map_location=torch.device(device_type))
     self.normer = data_stuff.normalizer(np.zeros((input_dim, buffer_length)))
     print("Model Intialized", model_name)
示例#5
0
def main():
    parser = argparse.ArgumentParser(description='Friction.')
    parser.add_argument('--learning_rate', '-LR', type=float, default=5e-2)
    parser.add_argument('--test_ratio', '-TR', type=float, default=0.2)
    parser.add_argument('--max_epoch', '-E', type=int, default=12)

    parser.add_argument('--hidden_width_scaler', type=int, default=5)
    parser.add_argument('--hidden_depth', type=int, default=3)
    parser.add_argument('--axis_num', type=int, default=4)
    parser.add_argument('--Cuda_number', type=int, default=0)
    parser.add_argument('--num_of_batch', type=int, default=100)
    parser.add_argument('--log_interval', type=int, default=100)
    parser.add_argument('--VISUALIZATION',
                        "-V",
                        action='store_true',
                        default=False)
    parser.add_argument('--NO_CUDA', action='store_true', default=False)
    parser.add_argument('--Quick_data',
                        "-Q",
                        action='store_true',
                        default=False)
    parser.add_argument('--mode',
                        type=str,
                        choices=["acc_uniform", "low_high"],
                        required=True)
    parser.add_argument('--further_mode',
                        type=str,
                        choices=["acc", "uniform", "low", "high", "all"],
                        required=True)
    parser.add_argument('--finetune', "-F", action='store_true', default=False)
    args = parser.parse_args()
    if not args.finetune:
        args.pool_name = "data-j%s" % args.axis_num
    else:
        print(
            "Running as finetune and restart, ignore validate loss which is trival"
        )
        args.pool_name = "finetune_path/"
        args.learning_rate *= 0.1
        args.test_ratio = 0.05
        args.restart_model_path = "../models/NN_weights_best_all_%s" % args.axis_num
    args.rated_torque = [5.7, 5.7, 1.02, 0.318, 0.318,
                         0.143][args.axis_num - 1]

    print("Start...%s" % args)
    if not args.NO_CUDA:
        device = torch.device("cuda", args.Cuda_number)
        print("Using GPU")
    else:
        device = torch.device("cpu")
        print("Using CPU")
    #-------------------------------------Do NN--------------------------------:
    #Get data:
    mode = ['train', args.mode]
    raw_data, part1_index, part2_index = data_stuff.get_data(args, mode)
    if args.further_mode == "acc" or args.further_mode == "low":
        this_part = part1_index
    elif args.further_mode == "uniform" or args.further_mode == "high":
        this_part = part2_index
    else:  # args.further_mode=="all"
        this_part = part1_index + part2_index

    #Take variables we concerned:
    #Make inputs:
    print("PART start...%s" % args.further_mode)
    raw_data_part = raw_data.iloc[this_part]
    data_X = np.empty(shape=(0, len(raw_data_part)))
    input_columns_names = []
    #data is index from 0, thus:
    local_axis_num = args.axis_num - 1
    input_columns_names += ['axc_pos_%s' % local_axis_num]
    input_columns_names += ['axc_speed_%s' % local_axis_num]
    input_columns_names += ['axc_torque_ffw_gravity_%s' % local_axis_num]
    input_columns_names += ['axc_torque_ffw_%s' % local_axis_num]
    input_columns_names += ['Temp']
    output_columns_names = ['need_to_compensate']
    data_X = raw_data_part[input_columns_names].values.T
    data_Y = raw_data_part[output_columns_names].values
    print("Shape of all input: %s, shape of all output: %s" %
          (data_X.shape, data_Y.shape))
    #import my_analyzer
    #my_analyzer.show_me_input_data(raw_data_part[input_columns_names])

    #Normalize data:
    normer = data_stuff.normalizer(data_X, data_Y, args)
    if not args.finetune:
        normer.generate_statistics()
    normer.get_statistics(data_X.shape[1])
    normer.generate_raw_secure()
    X_normed, Y_normed = normer.normalize_XY(data_X, data_Y)

    #MUST CHECK INPUT DISTRIBUTION!!!!!!
    print("Checking distribution of all data...")
    plot_utils.check_distribution(X_normed, input_columns_names, args)
    plot_utils.check_distribution(Y_normed.T, output_columns_names, args)

    #mannual split dataset:    #Don't push dataset on cuda now, do so later after form dataset_loader
    X_train, Y_train, X_val, Y_val, raw_data_train, raw_data_val = data_stuff.split_dataset(
        args, X_normed, Y_normed, raw_data_part)
    nn_X_train = torch.autograd.Variable(torch.FloatTensor(X_train.T))
    nn_Y_train = torch.autograd.Variable(torch.FloatTensor(Y_train)).reshape(
        -1, 1)
    nn_X_val = torch.autograd.Variable(torch.FloatTensor(X_val.T))
    nn_Y_val = torch.autograd.Variable(torch.FloatTensor(Y_val)).reshape(-1, 1)
    #Just for showup high/low region
    _ = evaluate.evaluate_error_rate(args,
                                     Y_val.reshape(-1) * 0,
                                     Y_val,
                                     normer,
                                     raw_data_val,
                                     showup=args.VISUALIZATION)

    #Form pytorch dataset:
    train_dataset = Data.TensorDataset(nn_X_train, nn_Y_train)
    validate_dataset = Data.TensorDataset(nn_X_val, nn_Y_val)
    _batch_size = int(len(train_dataset) / args.num_of_batch)
    train_loader = Data.DataLoader(dataset=train_dataset,
                                   batch_size=_batch_size,
                                   shuffle=True,
                                   drop_last=False,
                                   num_workers=4,
                                   pin_memory=True)
    validate_loader = Data.DataLoader(dataset=validate_dataset,
                                      batch_size=_batch_size,
                                      shuffle=True,
                                      drop_last=False,
                                      num_workers=4,
                                      pin_memory=True)
    #Model:
    input_size = nn_X_train.shape[1]
    hidden_size = nn_X_train.shape[1] * args.hidden_width_scaler
    hidden_depth = args.hidden_depth
    output_size = nn_Y_train.shape[1]
    model = NN_model.NeuralNetSimple(input_size, hidden_size, hidden_depth,
                                     output_size, device)
    if args.finetune:
        print("Loading resume model:", args.restart_model_path)
        model.load_state_dict(torch.load(args.restart_model_path).state_dict())
        #embed()
    optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)
    if not args.finetune:
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer,
            'min',
            patience=max(int(args.max_epoch / 10), 3),
            factor=0.7)
    else:
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         'min',
                                                         patience=1e9,
                                                         factor=0.7)
    print(model)

    #embed()
    #Train and Validate:
    print("Now training...")
    plt.ion()
    train_loss_history = []
    validate_loss_history = []
    train_error_history = []
    validate_error_history = []
    history_error_ratio_val = []
    overall_error_history = []
    #This is for save gif, always on:
    if args.VISUALIZATION:
        plt.figure(figsize=(14, 8))
    for epoch in range(int(args.max_epoch + 1)):
        print("Epoch: %s" % epoch, "TEST AND SAVE FIRST")
        #Test first:
        #Push ALL data together through the network -- wtf just doesn't make sense GPU will explode, all on cpu now.
        predicted_train = np.array(model.cpu()(
            nn_X_train.cpu()).detach().cpu()).reshape(-1)
        predicted_val = np.array(model.cpu()(
            nn_X_val.cpu()).detach().cpu()).reshape(-1)
        _, _, error_ratio_train = evaluate.evaluate_error_rate(args,
                                                               predicted_train,
                                                               nn_Y_train,
                                                               normer,
                                                               raw_data_train,
                                                               showup=False)
        _, _, error_ratio_val = evaluate.evaluate_error_rate(args,
                                                             predicted_val,
                                                             nn_Y_val,
                                                             normer,
                                                             raw_data_val,
                                                             showup=False)
        train_error_history.append(error_ratio_train)
        validate_error_history.append(error_ratio_val)
        overall_error_history.append(
            (error_ratio_train + error_ratio_val).mean())
        scheduler.step(error_ratio_val)
        print("Using lr:", optimizer.state_dict()['param_groups'][0]['lr'])
        model.eval()
        print("Train set error ratio:", error_ratio_train)
        print("Validate set error ratio:", error_ratio_val)
        if not args.finetune:
            if epoch >= 1:
                if overall_error_history[-1] < np.array(
                        overall_error_history[:-1]).min():
                    torch.save(
                        model.eval(), "../models/NN_weights_best_%s_%s" %
                        (args.further_mode, args.axis_num))
                    print("***MODEL SAVED***")
        else:
            if epoch > 0:
                if train_error_history[-1] < np.array(
                        train_error_history[:-1]).min():
                    #validate loss is not refered to as its very few.
                    torch.save(
                        model.eval(),
                        "../models/NN_weights_best_%s_%s_finetune" %
                        (args.further_mode, args.axis_num))
                    print("***MODEL SAVED***")
        pd.DataFrame(np.vstack(
            (predicted_val, np.array(nn_Y_val.detach().cpu()).reshape(-1))).T,
                     columns=['predicted', 'target']).to_csv(
                         "../output/best_val_predicted_vs_target.csv",
                         index=None)
        #Always save figure:
        #plot_utils.visual(nn_Y_val, predicted_val, 'NN', args, title=error_ratio_val, epoch=epoch)
        history_error_ratio_val.append(error_ratio_val)
        #Train/Val then:
        train_loss, train_outputs, train_targets = train(
            args, model, device, train_loader, optimizer, epoch)
        validate_loss, validate_outputs, validate_targets = validate(
            args, model, device, validate_loader)
        #Infos:
        train_loss_history.append(train_loss)
        validate_loss_history.append(validate_loss)
        print("Train set  Average loss: {:.8f}".format(train_loss))
        print('Validate set Average loss: {:.8f}'.format(validate_loss))
    if args.VISUALIZATION:
        plt.close()
        plt.ioff()
        plt.clf()
        plt.plot(train_loss_history, label='train loss')
        plt.plot(validate_loss_history, label='val loss')
        plt.title("Train/Val loss history")
        plt.xlabel("Epoch")
        plt.ylabel("Loss")
        plt.legend()
        #plt.draw()
        #plt.pause(2)
        plt.show()
        plt.close()

    names_note = "NN weights"
    print("Names note:", names_note)
    print("NN:", "NONE")
    if not args.finetune:
        print("Error rate:",
              np.array(history_error_ratio_val).min(), "at index",
              np.array(history_error_ratio_val).argmin())
    else:
        pass
示例#6
0
    plt.figure(figsize=(14, 8))
    for _idx_, this_part in enumerate([part1_index, part2_index]):
        print("PART start...")
        raw_data_part = raw_data.iloc[this_part]
        data_Y = raw_data_part['need_to_compensate'].values

        #Take variables we concerned:
        data_X = np.array([
            raw_data_part['axc_speed_%s' % args.axis_num],
            raw_data_part['axc_torque_ffw_gravity_%s' % args.axis_num],
            raw_data_part['Temp'], raw_data_part['axc_pos_%s' % args.axis_num]
        ])
        data_X = np.insert(data_X, 0, 1, axis=0)

        #Normalize data:
        normer = data_stuff.normalizer(data_X)
        X_normed, Y_normed = normer.normalize_XY(data_X, data_Y)
        #mannual split dataset:
        X_train, Y_train, X_val, Y_val, raw_data_part_train, raw_data_part_val = data_stuff.split_dataset(
            args, X_normed, Y_normed, raw_data_part)

        #Train linear model:
        print("Training on linear...")
        linear_names_note = ['c0', 'c1', 'c2', 'c3', 'c4']
        linear_weights, linear_cov = curve_fit(
            classical_model.linear_friction_model,
            X_train,
            Y_train,
            maxfev=5000000)
        #Train nonlinear model:
        print("Training on nonlinear...")
    axis_num = 4
    axis_num = axis_num - 1
    data_path = "../data/planning.csv"

    #Get data:
    raw_data = data_stuff.get_planning_data(data_path, axis_num)
    raw_data_X = np.array([
        raw_data['axc_speed_%s' % axis_num],
        raw_data['axc_torque_ffw_gravity_%s' % axis_num], raw_data['Temp'],
        raw_data['axc_pos_%s' % axis_num]
    ])  #TODO: this is axis 4
    raw_data_X = np.insert(raw_data_X, 0, 1, axis=0)
    raw_data_Y = raw_data['need_to_compensate'].values

    #Normalize data:
    normer = data_stuff.normalizer(raw_data_X)
    normed_data_X, normed_data_Y = normer.normalize_XY(raw_data_X, raw_data_Y)

    #Get model:
    params = get_model_params()

    #Forward to get output:
    c0, c1, c2, c3, c4 = params[0], params[1], params[2], params[3], params[4]
    input = normed_data_X
    output = classical_model.linear_friction_model(input, c0, c1, c2, c3, c4)

    #Denormalize Safety restrictions:
    compensate = normer.denormalize_Y(output)
    compensate = np.clip(compensate, -max_force, max_force)

    print("Done")