示例#1
0
def train(dataset: 'Dataset', epochs: int = 10):
    loader = DataLoader(dataset, batch_size=2, shuffle=True)

    model = NNModel(n_input=2, n_output=3)
    # model.to(device='cpu')

    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    criterion = torch.nn.CrossEntropyLoss()

    start_tm = time.time()
    for epoch in range(1, epochs + 1):
        train_loss = 0.0
        train_acc = 0
        for x, y in loader:
            optimizer.zero_grad()

            y_pred = model(x)
            y = torch.max(torch.squeeze(y, dim=1), dim=1).indices

            loss = criterion(y_pred, y)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
            train_acc += (y_pred.argmax(1) == y).sum().item()
        print(f'[epoch {epoch:02d}]\tloss:{train_loss}\taccuracy:{train_acc}')
    finish_tm = time.time()
    print(f'train finished.({finish_tm-start_tm}sec)')
示例#2
0
文件: gym_task.py 项目: xynmxy/ZOOpt
    def new_nnmodel(self, layers):
        """
        Generate a new model

        :param layers: layer information
        :return: no return
        """
        # initialize NN model as policy
        self.__policy_model = NNModel()
        self.__policy_model.construct_nnmodel(layers)

        return
示例#3
0
    def __init__(self, args, rate=1):
        self.node = rospy.init_node(self.node_name)
        self.joint_sub = JointSubscriber(self.joint_topic)
        self.img_sub = ImageSubscriber(self.img_topic)
        self.rate = rate

        self.gen_motion = GenMotion(self.joint_service, self.gripper_service, rate=rate) 
        self.model = NNModel(args)

        self.app = QApplication([])
        self.window = GoalWidget(self.model.goal_img)

        self.crop_size = ((36, 36 + 260), (250, 250 + 260))
示例#4
0
    def make_predictions(self, hotel_name, platforms):
        all_reviews = self.read_from_platforms(hotel_name, platforms)
        reviews_df = self.make_dataframe(all_reviews)

        nnm = NNModel()
        predited_df = nnm.generate_predictions(reviews_df)

        print(predited_df.shape)

        predited_df.to_csv(
            'C:/Users/acfelk/Documents/IIT_Files/final year/FYP/fyp_workfiles/final_project/backend/predicted_data/'
            + hotel_name + '_' + platforms + '_predicted.csv')

        return predited_df
示例#5
0
def check_predict_ratings():
    data = pd.read_csv('test_data/test_revs.csv')

    nn_model = NNModel()

    predicted_df = nn_model.generate_predictions(data)

    predicted_df.to_csv('test_data/predicted_revs.csv')

    data = pd.read_csv('test_data/predicted_revs.csv')

    if data['pred_rating'] is not None:
        print('Testing passed - Reviews Precited Successfully !')
        os.remove('test_data/predicted_revs.csv')
    else:
        print('Review Prediction has failed')
示例#6
0
def main():
    X = torch.randn(1, 1, 32, 32)
    y = torch.randn(10).view(1, -1)

    model = NNModel()
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.01)

    optimizer.zero_grad()
    y_pred = model(X)
    loss = criterion(y_pred, y)

    print(y_pred)
    print(loss)

    loss.backward()
    optimizer.step()

    print('DONE')
示例#7
0
def main():
    s_dim = 4
    a_dim = 2
    batch_size = 64

    env = "../envs/point_mass2d.xml"
    sim = Simulation(env, s_dim, a_dim, None, False)

    length = 500
    rb = ReplayBuffer(length,
                      env_dict={"obs": {"shape": (s_dim, 1)},
                      "act": {"shape": (a_dim, 1)},
                      "rew": {},
                      "next_obs": {"shape": (s_dim, 1)},
                      "done": {}})

    x = sim.getState()
    for _ in range(length):
        u = np.random.rand(1, a_dim, 1)
        x_next = sim.step(u)

        rb.add(obs=x, act=u, rew=0, next_obs=x_next, done=False)
        x = x_next


    model = NNModel(dt=0.1, state_dim=s_dim, action_dim=a_dim, name="nn_model")

    stamp = datetime.now().strftime("%Y.%m.%d-%H:%M:%S")
    logdir = "../graphs/test_training/{}".format(stamp)

    writer = tf.summary.create_file_writer(logdir)
    log = True

    epochs = 1000
    for e in range(epochs):
        sample = rb.sample(batch_size)
        gt = sample['next_obs']
        x = sample['obs']
        u = sample['act']
        model.train_step(gt, x, u, e, writer, log)
示例#8
0
文件: gym_task.py 项目: zhusj/ZOOpt
    def new_nnmodel(self, layers):
        # initialize NN model as policy
        self.__policy_model = NNModel()
        self.__policy_model.construct_nnmodel(layers)

        return
示例#9
0
def main():
    if len(sys.argv) > 1 and any(["--help" in argv for argv in sys.argv[1:]]):
        help_and_exit()

    EPOCHS_NUM, HIDDEN_SIZE, HIDDEN_TYPE, VOCAB_FILE_FILENAME, PLOT_EPOCHS = initialize(
    )

    data, alphabet = load_data(VOCAB_FILE_FILENAME)
    training_data = [(entry[:-1], entry[1:]) for entry in data]

    model = NNModel(alphabet, HIDDEN_SIZE, activation=HIDDEN_TYPE)

    # region train the model
    for epoch_num, epoch_loss in model.train(training_data, EPOCHS_NUM):
        print('\t'.join(
            [f"epoch_num: {epoch_num}", f"epoch_loss: {epoch_loss}"]))

        # region plot loss
        if PLOT_EPOCHS:
            plt.plot([epoch_num], [epoch_loss], 'rx')
            plt.draw()
            plt.pause(0.01)
        # endregion
    # endregion

    hidden_unit_activation_for_char = [{} for unit_idx in range(HIDDEN_SIZE)]
    output_unit_activation_for_char = [{} for unit_idx in range(len(alphabet))]

    for char in alphabet:
        predicted_chars, units_activations, weights = model.sample_with_logging(
            char, 1)
        predicted_char = predicted_chars[0]
        hidden_activations = units_activations["hidden_layer"]
        output_activations = units_activations["output_layer"]

        hidden_units_activations = [
            h_u_act[0] for h_u_act in hidden_activations[0]
        ]
        output_units_activations = [
            o_u_act[0] for o_u_act in output_activations[0]
        ]

        for unit_idx, unit_activation in enumerate(hidden_units_activations):
            hidden_unit_activation_for_char[unit_idx][
                char] = hidden_units_activations[unit_idx]

        for unit_idx, unit_activation in enumerate(output_units_activations):
            output_unit_activation_for_char[unit_idx][
                char] = output_units_activations[unit_idx]

    # region log model state
    for unit_idx, unit_activations in enumerate(
            hidden_unit_activation_for_char):
        for char in alphabet:
            print(f"activation of HIDDEN unit {unit_idx} for char {char}" +
                  '\t' + str(hidden_unit_activation_for_char[unit_idx][char]))

    for unit_idx, unit_activations in enumerate(
            output_unit_activation_for_char):
        for char in alphabet:
            output_char = model.ix_to_char[unit_idx]
            print(
                f"activation of OUTPUT unit {unit_idx} (represents char {output_char}) for char {char}"
                + '\t' + str(output_unit_activation_for_char[unit_idx][char]))

    for hidden_idx, from_input_to_idxth_hidden in enumerate(model.W_ih):
        for char_idx, weight in enumerate(from_input_to_idxth_hidden):
            input_char = model.ix_to_char[char_idx]
            print(
                f"weight INPUT unit {char_idx} (represents char {input_char}) to HIDDEN unit {hidden_idx}"
                + '\t' + str(weight))

    for hidden_tgt_idx, from_hidden_to_idxth_hidden in enumerate(model.W_hh):
        for hidden_src_idx, weight in enumerate(from_hidden_to_idxth_hidden):
            print(
                f"weight HIDDEN unit {hidden_src_idx} to HIDDEN unit {hidden_tgt_idx}"
                + '\t' + str(weight))

    for output_idx, from_hidden_to_idxth_output in enumerate(model.W_ho):
        for hidden_idx, weight in enumerate(from_hidden_to_idxth_output):
            output_char = model.ix_to_char[output_idx]
            print(
                f"weight HIDDEN unit {hidden_idx} to OUTPUT unit {output_idx} (represents char {output_char})"
                + '\t' + str(weight))
示例#10
0
def load_model():
    model = NNModel()
    model.load_model("./model_51_file_training.h5")
    print(model.model.summary())
    return model
示例#11
0
 def __init__(self, name, acc, trans, noits, meats, u0, v0_prob, wk_prob,
              vk_prob, total_frame, param):
     super().__init__(name, acc, trans, noits, meats, u0, v0_prob, wk_prob,
                      vk_prob, total_frame)
     self.model = NNModel(param)
示例#12
0
    train_df["target"] = np.log1p(train_df.price)
    train_df = preprocess(train_df)
    train_df, val_df = train_test_split(train_df,
                                        random_state=123,
                                        train_size=0.99)

    wbm = WordBatchModel()
    wbm.train(train_df)
    predsFM_val = wbm.predict(val_df)

    nnp = NNPreprocessor()
    train_df, WC = nnp.fit_transform(train_df)
    val_df = nnp.transform(val_df)

    nnm = NNModel(train_df=train_df,
                  word_count=WC,
                  batch_size=batch_size,
                  epochs=epochs)
    X_train = nnm.get_nn_data(train_df)
    Y_train = train_df.target.values.reshape(-1, 1)

    X_val = nnm.get_nn_data(val_df)
    Y_val = val_df.target.values.ravel()

    rnn_model = nnm.new_rnn_model(X_train)
    rnn_model.fit(X_train,
                  Y_train,
                  epochs=epochs,
                  batch_size=batch_size,
                  validation_data=(X_val, Y_val),
                  verbose=1)
    Y_val_preds_rnn = rnn_model.predict(X_val, batch_size=batch_size).ravel()
示例#13
0
def eval_experiments(path, iterations=20):

    import os
    from nn_model import NNModel
    experiments = {}
    for dr in os.listdir(path):
        full_path = path + dr
        dr_parts = dr.split('_')

        if len(dr_parts) != 8:
            continue
        [name, dataset, padding, classes, angles, fold, performance,
         date] = dr_parts

        experiment_id = '_'.join([name, classes, padding, angles])

        print(dr)
        for i in range(iterations):
            model = None
            try:
                model = NNModel(None,
                                None,
                                None,
                                mode='test',
                                model_state_path=full_path)
            except:
                continue

            scores = model.get_raw_eval()

            if experiment_id not in experiments:
                experiments[experiment_id] = {
                    'actual': [],
                    'original': [],
                    'predicted': [],
                    'actual_labels': scores['actual_labels'],
                    'original_labels': scores['original_labels'],
                    'class_map': scores['class_map'],
                    'accuracy': [],
                    'f1': [],
                    'precision': [],
                    'recall': [],
                }

            if scores['original'] is not None:
                experiments[experiment_id]['original'].extend(
                    scores['original'])

            experiments[experiment_id]['actual'].extend(scores['actual'])
            experiments[experiment_id]['predicted'].extend(scores['predicted'])
            experiments[experiment_id]['accuracy'].append(
                accuracy_score(scores['actual'], scores['predicted']))
            experiments[experiment_id]['f1'].append(
                f1_score(scores['actual'],
                         scores['predicted'],
                         average='macro'))
            experiments[experiment_id]['precision'].append(
                precision_score(scores['actual'],
                                scores['predicted'],
                                average='macro'))
            experiments[experiment_id]['recall'].append(
                recall_score(scores['actual'],
                             scores['predicted'],
                             average='macro'))

    print("\n\n\n=============== Final report ===============")
    for exp_id in experiments:
        print_experiment(exp_id, experiments[exp_id])
    print("\n\n\n======================================")

    return experiments
示例#14
0
# Using n_files/2 as the min_freq is a rule of thumb I determined empirically to keep the training time reasonable
txtdata = data_interp.simplify_text_data(txtdata, min_freq=n_files / 2)
# Set the number of words to keep based on the number of words that appear more often min_feq
vocab = data_interp.set_num_words(txtdata, min_freq=n_files / 2)
vocab_size = len(vocab) + 1
# Convert the data to sequences of integers with some maximum length
max_length, sequences = data_interp.training_data_to_padded_sequences(
    txtdata, max_len=15, shuffle_data=True)
# Break up the sequences into input (sequence of n words) and output (single word to test against)
input_data, output = sequences[:, :-1], sequences[:, -1]
output = to_categorical(output, num_classes=vocab_size)

# Save the tokenizer for later use, in case we randomized the training data
# If the training data was randomized we will need to know the words and word_index later for testing
tokenizer_json = data_interp.tokenizer.to_json()
with open("./tokenizer_%s_file_training.json" % n_files, "w",
          encoding="utf-8") as jsonf:
    jsonf.write(dumps(tokenizer_json, ensure_ascii=False))

# Prepare the model
model = NNModel()
# Input layer should have max_length - 1 neurons, output layer should have one neuron per word token
# Hidden layer size determined by the 2/3*(input layer + output layer) rule of thumb
model.prepare_model(max_length - 1,
                    vocab_size,
                    hidden_layer_size=int(
                        (vocab_size + max_length - 1) * 2 / 3))
# Fit on training data
model.fit_model(input_data, output)
# Save model, can be loaded later for testing without re-training
model.save_model("./model_%s_file_training.h5" % str(n_files))