def forward(self, input_seq, train=False):
        loss = 0
        reconst_seq = []

        # encoding phase
        for t in range(input_seq.shape[1]):
            hE = self.encoder(input_seq[:, t])

        # Set the initial state of the decoder's LSTM to the final state of the encoder's LSTM.
        self.decoder._z = hE
        self.decoder._state = self.encoder._state

        # reconstruction phase
        # Note: reconstruction is done in reverse order
        reconst = self.linear(hE)
        reconst_seq.append(reconst.as_ndarray())
        loss += rm.mse(reconst, input_seq[:, -1])

        for t in range(1, input_seq.shape[1]):
            hD = self.decoder(
                input_seq[:, -t]) if train else self.decoder(reconst)
            reconst = self.linear(hD)
            reconst_seq.append(reconst.as_ndarray())
            loss += rm.mse(reconst, input_seq[:, -(t + 1)])

        reconst_seq = reconst_seq[::-1]
        reconst_seq = np.transpose(
            reconst_seq,
            (1, 0,
             2))  # (time_index, batch, value) => (batch, time_index, value)
        return loss, reconst_seq
Пример #2
0
 def forward(self, x):
     el_out = rm.sigmoid(self._encodelayer(x))
     l = rm.sigmoid(self._encodedlayer(el_out))
     dl_out = rm.sigmoid(self._decodelayer(l))
     g = self._decodedlayer(dl_out)
     loss = rm.mse(g, x)
     return loss
Пример #3
0
def calc_importances(X_valid, y_valid, best_loss, model, modeldef, session):
    NUM_PERM = 3
    importances = []
    for i in range(X_valid.shape[1]):
        tl = 0
        for k in range(NUM_PERM):
            p = np.random.permutation(X_valid.shape[0])
            X_randomized = np.copy(X_valid.T)
            X_randomized[i] = X_randomized[i, p]
            X_randomized = X_randomized.T
            pred = model(X_randomized.reshape(-1, 1, X_randomized.shape[1], 1))
            tl += float(rm.mse(pred, y_valid))
        los = tl / NUM_PERM - best_loss
        if los < 0:
            importances.append(float(0))
        else:
            importances.append(float(los))

    sum = np.sum(np.array(importances))
    if sum != 0:
        importances = np.array(importances) / sum
    else:
        importances_0 = []
        ev_len = len(importances)
        for j in range(ev_len):
            importances_0.append(float(1 / ev_len))
        importances = importances_0
    modeldef.importances = pickle.dumps(np.round(importances, 3).tolist())
    session.add(modeldef)
    session.commit()
Пример #4
0
 def forward(self, x):
     el1_out = rm.sigmoid(self._encodelayer1(x))
     el2_out = rm.sigmoid(self._encodelayer2(el1_out))
     el3_out = rm.sigmoid(self._encodelayer3(el2_out))
     l = rm.sigmoid(self._encodedlayer(el3_out))
     dl1_out = rm.sigmoid(self._decodelayer1(l))
     dl2_out = rm.sigmoid(self._decodelayer2(dl1_out))
     dl3_out = rm.sigmoid(self._decodelayer3(dl2_out))
     g = self._decodedlayer(dl3_out)
     loss = rm.mse(g, x)
     return loss
Пример #5
0
def main():
    df = pd.read_csv("crx.data", header=None, index_col=None)
    df = df.applymap(lambda d: np.nan if d == "?" else d)
    df = df.dropna(axis=0)
    sr_labels = df.iloc[:, -1]
    labels = sr_labels.str.replace("+", "1").replace("-",
                                                     "0").values.astype(float)
    data = df.iloc[:, :-1].values.astype(str)

    pattern_continuous = re.compile("^\d+\.?\d*\Z")
    continuous_idx = {}
    for i in range(data.shape[1]):
        is_continuous = True if pattern_continuous.match(data[0][i]) else False
        if is_continuous and i == 0:
            X = data[:, i].astype(float)
        elif not is_continuous and i == 0:
            X = pd.get_dummies(data[:, i]).values.astype(float)
        elif is_continuous and i != 0:
            X = np.concatenate((X, data[:, i].reshape(-1, 1).astype(float)),
                               axis=1)
        elif not is_continuous and i != 0:
            X = np.concatenate(
                (X, pd.get_dummies(data[:, i]).values.astype(float)), axis=1)
    print("X:{X.shape}, y:{labels.shape}".format(**locals()))

    X = X
    y = labels.reshape(-1, 1)

    model = AutoEncoder()
    batch_size = 128
    epoch = 50
    N = len(X)
    optimizer = Adam()
    for i in range(epoch):
        perm = np.random.permutation(N)
        loss = 0
        for j in range(0, N // batch_size):
            train_batch = X[perm[j * batch_size:(j + 1) * batch_size]]
            with model.train():
                l = rm.mse(model(train_batch), train_batch)
            grad = l.grad()
            grad.update(optimizer)
            loss += l.as_ndarray()
        train_loss = loss / (N // batch_size)
        print("epoch:{:03d}, train_loss:{:.4f}".format(i, float(train_loss)))
    model.visualize(X)
Пример #6
0
def random_forest(session, modeldef, n_estimators, max_depth, X_train, y_train,
                  X_valid, y_valid):
    if modeldef.algorithm == RANDOM_FOREST:
        regr = RandomForestRegressor(n_estimators=n_estimators,
                                     max_depth=max_depth)
        filename = 'rf_' + str(modeldef.id) + '.pickle'
    elif modeldef.algorithm == XGBOOST:
        regr = xgb.XGBRegressor(n_estimators=n_estimators, max_depth=max_depth)
        filename = 'xgb_' + str(modeldef.id) + '.pickle'

    if y_train.shape[1] == 1:
        model = regr.fit(X_train, y_train.ravel())
    else:
        model = regr.fit(X_train, y_train)

    train_predicted = model.predict(X_train)
    train_predicted = train_predicted.reshape(-1, y_train.shape[1])
    predicted = model.predict(X_valid)
    predicted = predicted.reshape(-1, y_valid.shape[1])

    modeldef = train_task.prediction_sample_graph(modeldef, predicted, y_valid,
                                                  train_predicted, y_train)
    if not os.path.isdir(DB_DIR_ML_MODELS):
        os.makedirs(DB_DIR_ML_MODELS)
    filepath = os.path.join(DB_DIR_ML_MODELS, filename)
    with open(filepath, mode='wb') as f:
        pickle.dump(model, f)

    valid_loss = rm.mse(predicted, y_valid)

    feature_importances = model.feature_importances_.astype(float)
    modeldef.importances = pickle.dumps(
        np.round(feature_importances, 3).tolist())

    train_task.update_model(session, modeldef, predicted, y_valid, None,
                            valid_loss, None, filename)
Пример #7
0
def _train(session, taskstate, model_id):
    modeldef = session.query(db.Model).get(model_id)

    total_batch = 0
    best_loss = None
    train_loss_list = []
    valid_loss_list = []

    with open(os.path.join(DATASRC_DIR, 'data.pickle'), mode='rb') as f:
        data = pickle.load(f)

    explanatory_column_ids = pickle.loads(modeldef.dataset.explanatory_column_ids)
    X = split_target(np.array(data), explanatory_column_ids)
    y = split_target(np.array(data), pickle.loads(modeldef.dataset.target_column_ids))

    selected_scaling = modeldef.dataset.selected_scaling
    if selected_scaling != 1:
        filename_y = modeldef.dataset.filename_y
        filename_X = modeldef.dataset.filename_X
        y = scaling_again(y, filename_y)
        X = scaling_again(X, filename_X)

    X_train = X[pickle.loads(modeldef.dataset.train_index)]
    X_valid = X[pickle.loads(modeldef.dataset.valid_index)]
    y_train = y[pickle.loads(modeldef.dataset.train_index)]
    y_valid = y[pickle.loads(modeldef.dataset.valid_index)]
    valid_true = y_valid

    taskstate.algorithm = modeldef.algorithm
    algorithm_params = pickle.loads(modeldef.algorithm_params)
    algorithm_params["num_target"] = y_train.shape[1]

    if modeldef.algorithm == USER_DEFINED:
        num_neighbors = int(algorithm_params["num_neighbors"])
        feature_graph = get_corr_graph(X_train, num_neighbors, explanatory_column_ids)
        algorithm_params["feature_graph"] = feature_graph.tolist()
        model = _load_usermodel(algorithm_params)

    elif modeldef.algorithm in [RANDOM_FOREST, XGBOOST]:
        n_estimators = int(algorithm_params["n_estimators"])
        if algorithm_params["max_depth"] == "":
            algorithm_params["max_depth"] = "None"
        if algorithm_params["max_depth"] == "None":
            max_depth = None
        else:
            max_depth = int(algorithm_params["max_depth"])
        modeldef.algorithm_params = pickle.dumps(algorithm_params)
        taskstate.signal()
        ml_task.random_forest(session, modeldef, n_estimators, max_depth,
                              X_train, y_train, X_valid, y_valid)
        taskstate.state = RunningState.FINISHED
        taskstate.signal()
        return

    else:
        num_neighbors = int(algorithm_params["num_neighbors"])
        if modeldef.algorithm == C_GCNN:
            feature_graph = get_corr_graph(X_train, num_neighbors, explanatory_column_ids)
        elif modeldef.algorithm == Kernel_GCNN:
            feature_graph = get_kernel_graph(X_train, num_neighbors, 0.01)
        elif modeldef.algorithm == DBSCAN_GCNN:
            feature_graph = get_dbscan_graph(X_train, num_neighbors)
        else:
            raise ValueError("{} is not supported algorithm id.".format(modeldef.algorithm))

        model = GCNet(feature_graph, num_target=y_train.shape[1],
                      fc_unit=[int(u) for u in algorithm_params["fc_unit"]],
                      neighbors=num_neighbors,
                      channels=[int(u) for u in algorithm_params["channels"]])

        # update network params for prediciton
        algorithm_params["feature_graph"] = feature_graph.tolist()

    modeldef.algorithm_params = pickle.dumps(algorithm_params)

    filename = '{}.h5'.format(int(time.time()))
    optimizer = Adam()

    taskstate.total_epoch = modeldef.epoch
    for e in range(modeldef.epoch):
        taskstate.nth_epoch = e
        N = X_train.shape[0]
        perm = np.random.permutation(N)
        loss = 0
        train_true_list = None
        train_predicted_list = None

        total_batch = N // modeldef.batch_size
        taskstate.total_batch = total_batch
        for j in range(total_batch):
            if taskstate.canceled:
                calc_importances(X_valid, y_valid, best_loss, model, modeldef, session)
                return

            taskstate.nth_batch = j
            taskstate.signal()

            index = perm[j * modeldef.batch_size:(j + 1) * modeldef.batch_size]
            train_batch_x = X_train[index].reshape(-1, 1, X_train.shape[1], 1)
            train_batch_y = y_train[index]

            # Loss function
            model.set_models(inference=False)
            with model.train():
                train_predicted = model(train_batch_x)
                batch_loss = rm.mse(train_predicted, train_batch_y)
                taskstate.train_loss = batch_loss.tolist()

                if rm.is_cuda_active():
                    train_predicted = train_predicted.as_ndarray()

                if train_predicted_list is None:
                    train_predicted_list = train_predicted
                else:
                    train_predicted_list = np.concatenate([train_predicted_list, train_predicted])

                if train_true_list is None:
                    train_true_list = train_batch_y
                else:
                    train_true_list = np.concatenate([train_true_list, train_batch_y])

            # Back propagation
            grad = batch_loss.grad()

            # Update
            grad.update(optimizer)
            loss += batch_loss.as_ndarray()

            taskstate.signal()

        train_loss = loss / total_batch
        train_loss_list.append(float(train_loss))

        # validation
        model.set_models(inference=True)
        N = X_valid.shape[0]
        perm = np.random.permutation(N)
        total_batch = N // modeldef.batch_size
        loss = 0
        valid_predicted = None
        valid_true = None
        for j in range(total_batch):
            if taskstate.canceled:
                calc_importances(X_valid, y_valid, best_loss, model, modeldef, session)
                return

            index = perm[j * modeldef.batch_size:(j + 1) * modeldef.batch_size]
            batch_x = X_valid[index]
            batch_y = y_valid[index]

            predicted = model(batch_x.reshape(-1, 1, batch_x.shape[1], 1))
            loss += rm.mse(predicted, batch_y)

            if rm.is_cuda_active():
                predicted = predicted.as_ndarray()

            if valid_predicted is None:
                valid_predicted = predicted
                valid_true = batch_y
            else:
                valid_predicted = np.concatenate([valid_predicted, predicted], axis=0)
                valid_true = np.concatenate([valid_true, batch_y], axis=0)

        valid_loss = loss / total_batch
        valid_loss_list.append(float(valid_loss))

        # update model info
        modeldef.train_loss_list = pickle.dumps(train_loss_list)
        modeldef.valid_loss_list = pickle.dumps(valid_loss_list)

        modeldef = prediction_sample_graph(modeldef, valid_predicted, valid_true,
                                           train_predicted_list, train_true_list)

        session.add(modeldef)
        session.commit()

        if e % 10 == 0:
            print("epoch: {}, valid_loss: {}".format(e, valid_loss))

        # update best loss model info
        if best_loss is None or best_loss > valid_loss:
            model.save(os.path.join(DB_DIR_TRAINED_WEIGHT, filename))
            update_model(session, modeldef, valid_predicted, valid_true, e,
                         valid_loss, filename, None)
            best_loss = valid_loss

    # calc importances
    taskstate.nth_epoch = taskstate.total_epoch
    taskstate.nth_batch = taskstate.total_batch
    taskstate.signal()
    calc_importances(X_valid, y_valid, best_loss, model, modeldef, session)

    taskstate.state = RunningState.FINISHED
    taskstate.signal()
epoch = 0
loss_prev = np.inf
learning_curve, test_curve = [], []
while (epoch < max_epoch):
    epoch += 1
    perm = np.random.permutation(train_size)
    train_loss = 0
    for i in range(train_size // batch_size):
        batch_x = X_train[perm[i * batch_size:(i + 1) * batch_size]]
        batch_y = y_train[perm[i * batch_size:(i + 1) * batch_size]]
        l = 0
        z = 0
        with model.train():
            for t in range(look_back):
                z = model(batch_x[:, t])
                l = rm.mse(z, batch_y)
            model.truncate()
        l.grad().update(optimizer)
        train_loss += l.as_ndarray()
    train_loss /= (train_size // batch_size)
    learning_curve.append(train_loss)

    # test
    l = 0
    z = 0
    for t in range(look_back):
        z = model(X_test[:, t])
        l = rm.mse(z, y_test)
    model.truncate()
    test_loss = l.as_ndarray()
    test_curve.append(test_loss)
#train loop
while(i < max_epoch):
    i+=1
    perm = np.random.permutation(train_size)
    train_loss = 0

    for j in range(train_size // batch_size):
        batch_x = X_train[perm[j*batch_size : (j+1)*batch_size]]
        batch_y = y_train[perm[j*batch_size : (j+1)*batch_size]]

        #Forward propagation
        l = 0
        z = 0
        for t in range(look_back):
            z = model(X_test[:,t].reshape(test_size,-1))
            l = rm.mse(z, y_test)
        model.truncate()
        test_loss = l.as_ndarray()
        test_curve.append(test_loss)

        if i % period == 0:
            print('epoch : {}, train loss : {}, test loss {}'.format(i ,train_loss,test_loss))
            draw_pred_curve(i)
            if test_loss > loss_prev * 0.99:
                print('Stop learning ')
                break
            else:
                loss_prev = deepcopy(test_loss)

# predicted curve
plt.xlabel('x')
Пример #10
0
batch = 1
N = len(X)

optimizer = Sgd(lr=0.05, momentum=0.0)

network = Mnist()

learning_curve = []
for i in range(epoch):
    perm = np.random.permutation(N)
    loss = 0
    for j in range(0, N // batch):
        train_batch = X[perm[j * batch:(j + 1) * batch]]
        response_batch = y[perm[j * batch:(j + 1) * batch]]
        with network.train():
            result = network(train_batch)
            #l = rm.sigmoid_cross_entropy(result, response_batch)
            l = rm.mse(result, response_batch)
        grad = l.grad()
        grad.update(optimizer)
        loss += l
    train_loss = loss / (N // batch)
    print("train_loss:{}".format(train_loss))
    learning_curve.append(train_loss)
print(network(X))
plt.plot(learning_curve, linewidth=3, label="train")
plt.ylabel("error")
plt.xlabel("epoch")
plt.legend()
plt.show()