Ejemplo n.º 1
0
    x = UpSampling1D(2)(x)
    x = Conv1D(16, kernel1, activation="relu", dilation_rate=dil1, padding='same')(x)
    x = UpSampling1D(2)(x)

    out = Conv1D(6, kernel1, activation='softmax', padding='same')(x)
    outputs.append(out)
    outputs.append(y_out)

    autoencoder = Model(inputs, outputs)
    autoencoder.compile(optimizer='adam', loss='categorical_crossentropy')
    return autoencoder


def autocorr(x):
    result = np.correlate(x, x, mode='full')
    return result[len(result) // 2:]


if __name__ == "__main__":
    import numpy as np

    MODEL_PATH = "triple_detection"

    model = build_triple_detection_network((4096, 1))
    plot_model(model)
    model = load_model("models\\" + MODEL_PATH + "_ma.h5")
    model.summary()
    X, _ = load_split()

    model = train_eval(model, X, only_eval=True, save_path=MODEL_PATH, generator=artefact_for_detection_3_in_2_out,
                       size=4096, epochs=150)
Ejemplo n.º 2
0
    x = Conv1D(32, 20, activation="relu", padding='same')(x)
    x = MaxPool1D(2)(x)

    x = Bidirectional(LSTM(32, return_sequences=True))(x)

    x = Conv1D(32, 20, activation="relu", padding='same')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(64, 20, activation="relu", padding='same')(x)
    x = UpSampling1D(2)(x)
    decoded = Conv1D(6, 20, activation='softmax', padding='same')(x)

    autoencoder = Model(inputs, decoded)
    autoencoder.compile(optimizer='adam', loss='categorical_crossentropy')
    return autoencoder


if __name__ == "__main__":
    MODEL_PATH = "recurrent_detection"

    model = build_recurrent_network((None, 1))
    # model = load_model("models\\" + MODEL_PATH + "_ma.h5")
    model.summary()

    X = load_good_holter()
    train_eval(model,
               X,
               only_eval=True,
               save_path=MODEL_PATH,
               size=2048,
               epochs=150)
Ejemplo n.º 3
0
        if name == "dual_detection":
            generator = artefact_for_detection_dual
        elif name == "triple_detection":
            generator = artefact_for_detection_3_in_2_out
        else:
            generator = artefact_for_detection

        res, _ = load_split()
        path = "C:\\Users\\donte_000\\Downloads\\Telegram Desktop\\noise_lbl.csv"
        idxs = pd.read_csv(path, encoding='utf-8', sep=";")['i']
        X = load_dataset()["x"]
        x = X[np.where(idxs == 0, True, False), :, 0]
        x = np.expand_dims(x, 2)
        res1 = [0, 0]
        res1[0], res1[1] = train_test_split(x, test_size=0.25, random_state=42)

        res2 = load_good_holter()

        res3 = [0, 0]
        X = load_mit()
        res3[0], res3[1] = train_test_split(X, test_size=0.1, random_state=32)

        train_eval(model, (res1[0], res1[1]),
                   only_eval=True,
                   save_path=name + "_mit_test2",
                   generator=generator,
                   size=4096,
                   epochs=100,
                   noise_prob=[1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5, 0],
                   noise_type='em')
Ejemplo n.º 4
0
    def run(self, ):
        # self.model.result_embed = torch.load('./results/top40_best_movie/result_emb.pt')
        # pdb.set_trace()
        max_precision = 0.0
        max_recall = 0.0
        max_NDCG = 0.0
        num_decreases = 0
        max_val = 0
        while os.path.exists('./results/' + self.varient) == False:
            os.mkdir('./results/' + self.varient)
        result_log_path = './results/' + self.varient + f'/train_log_({args.l_r:.6f}_{args.weight_decay:.6f}_{args.sampling:.6f}_{args.dataset:.9s}_{self.num_traces}_{self.back_pro}_{self.seed}).txt'
        result_path = './results/' + self.varient + '/result_{0}_{1}_{2}.txt'.format(
            args.dataset, self.num_traces, self.back_pro)
        result_best_path = './results/' + self.varient + '/result_best_{0}_{1}_{2}.txt'.format(
            args.dataset, self.num_traces, self.back_pro)
        user_cons = 0
        print(args.l_r)
        print(args.weight_decay)
        print(args.dataset)

        with open(result_log_path, "a") as f:
            f.write(result_log_path)
            f.write("\n")
        for epoch in range(self.num_epoch):

            self.model.train()
            user_graph, user_weight_matrix = self.topk_sample(self.K)
            print('Now, training start ...')
            pbar = tqdm(total=len(self.train_dataset))
            sum_loss = 0.0
            sum_reg_loss = 0.0

            for data in self.train_dataloader:
                self.optimizer.zero_grad()
                if self.model_name == 'DualGNN':
                    self.loss, reg_loss = self.model.loss(data,
                                                          user_graph,
                                                          user_weight_matrix,
                                                          user_cons=user_cons)
                else:
                    self.loss, reg_loss = self.model.loss(data)
                self.loss.backward()
                self.optimizer.step()
                pbar.update(self.batch_size)
                sum_loss += self.loss
                sum_reg_loss += reg_loss
            print('avg_loss:', sum_loss / self.batch_size)
            print('avg_reg_loss:', sum_reg_loss / self.batch_size)
            pbar.close()
            if torch.isnan(sum_loss / self.batch_size):
                with open(result_path, 'a') as save_file:
                    save_file.write(
                        'lr: {0} \t Weight_decay:{1} \t sampling:{2} \t SEED:{3} is Nan'
                        .format(args.l_r, args.weight_decay, args.sampling,
                                self.seed))
                break

            ranklist_tra, ranklist_vt, ranklist_tt = self.model.gene_ranklist(
                self.val_dataset, self.test_dataset)

            train_eval(epoch, self.model, 'Train', ranklist_tra, args,
                       result_log_path)
            _, val_recall_10, _ = test_eval(epoch, self.model,
                                            self.val_dataset, 'Val',
                                            ranklist_vt, args, result_log_path,
                                            0)
            test_precision_10, test_recall_10, test_ndcg_score_10 = test_eval(
                epoch, self.model, self.test_dataset, 'Test', ranklist_tt,
                args, result_log_path, 0)

            if self.model_name == 'DualGNN':
                if self.construction == 'weighted_sum':
                    attn_u = F.softmax(self.model.weight_u, dim=1)
                    attn_u = torch.squeeze(attn_u)

                    attn_u_max = torch.max(attn_u, 0)
                    attn_u_max_num = torch.max(attn_u, 0).indices[0]
                    attn_u_min = torch.min(attn_u, 0)
                    with open(result_log_path, "a") as f:
                        f.write(
                            '---------------------------------attn_u_max: {0}-th epoch {1}-th user 0 visual:{2:.4f} acoustic:{3:.4f} text:{4:.4f}---------------------------------'
                            .format(epoch, 10, float(attn_u_max[0][0]),
                                    float(attn_u_max[0][1]),
                                    float(attn_u_max[0][2])))  # 将字符串写入文件中
                        f.write("\n")
                    with open(result_log_path, "a") as f:
                        f.write(
                            '---------------------------------attn_u_num: {0}-th epoch {1}-th user 0 visual:{2:.4f} acoustic:{3:.4f} text:{4:.4f}---------------------------------'
                            .format(
                                epoch, 10, float(attn_u[attn_u_max_num][0]),
                                float(attn_u[attn_u_max_num][1]),
                                float(attn_u[attn_u_max_num][2])))  # 将字符串写入文件中
                        f.write("\n")
                    with open(result_log_path, "a") as f:
                        f.write(
                            '---------------------------------attn_u_min: {0}-th epoch {1}-th user 0 visual:{2:.4f} acoustic:{3:.4f} text:{4:.4f}---------------------------------'
                            .format(epoch, 10, float(attn_u_min[0][0]),
                                    float(attn_u_min[0][1]),
                                    float(attn_u_min[0][2])))  # 将字符串写入文件中
                        f.write("\n")
            self.test_recall.append(test_recall_10)
            self.test_ndcg.append(test_ndcg_score_10)
            # pdb.set_trace()
            if val_recall_10 > max_val:
                max_precision = test_precision_10
                max_recall = test_recall_10
                max_NDCG = test_ndcg_score_10
                max_val = val_recall_10
                num_decreases = 0
                best_embed = self.model.result_embed
            else:
                if num_decreases > 20 and self.model_name != 'Stargcn':
                    with open(result_path, 'a') as save_file:
                        save_file.write(
                            'lr: {0} \t Weight_decay:{1} \t sampling:{2}=====> Precision:{3} \t Recall:{4} \t NDCG:{5} \t SEED:{6}\r\n'
                            .format(args.l_r, args.weight_decay, args.sampling,
                                    max_precision, max_recall, max_NDCG,
                                    self.seed))
                    # torch.save(best_attn,'./results/'+self.varient+'/u_prefer_weight.pt')
                    torch.save(best_embed,
                               './results/' + self.varient + '/result_emb.pt')
                    while os.path.exists(result_best_path) == False:
                        with open(result_best_path, 'a') as save_file:
                            save_file.write(
                                'Recall:{0}\r\n'.format(max_recall))
                    # pdb.set_trace()
                    file = open(result_best_path)
                    maxs = file.readline()
                    maxvalue = float(maxs.strip('[Recall:\n]'))
                    break
                else:
                    num_decreases += 1
            if epoch > 990:
                with open(result_path, 'a') as save_file:
                    save_file.write(
                        'lr: {0} \t Weight_decay:{1} \t sampling:{2}=====> Precision:{3} \t Recall:{4} \t NDCG:{5} \t SEED:{6}\r\n'
                        .format(args.l_r, args.weight_decay, args.sampling,
                                max_precision, max_recall, max_NDCG,
                                self.seed))
                file = open(result_best_path)
                maxvalue = float(maxs.strip('[Recall:\n]'))
                if max_recall >= maxvalue:
                    np.save('./results/' + self.varient + '/recall.npy',
                            self.test_recall)
                    np.save('./results/' + self.varient + '/ndcg.npy',
                            self.test_ndcg)
                    torch.save(self.model.result_embed,
                               './results/' + self.varient + '/result_emb.pt')
                break
        return max_recall, max_precision, max_NDCG
Ejemplo n.º 5
0
    x = UpSampling1D(2)(x)
    x = Conv1D(16,
               kernel1,
               activation="relu",
               dilation_rate=dil1,
               padding='same')(x)
    x = UpSampling1D(2)(x)

    x = Conv1D(6, kernel1, activation='softmax', padding='same')(x)

    autoencoder = Model(inputs, x)
    autoencoder.compile(optimizer='adam', loss='categorical_crossentropy')
    return autoencoder


if __name__ == "__main__":
    MODEL_PATH = "dual_input_detection"

    model = build_dual_input_network((None, 1))
    # model = load_model("models\\" + MODEL_PATH + "_ma.h5")
    model.summary()

    X = load_good_holter()
    train_eval(model,
               X,
               only_eval=False,
               save_path=MODEL_PATH,
               generator=artefact_for_detection_dual,
               size=4096,
               epochs=100)