コード例 #1
0
def main():

    ########### 读取配置文件 ##########
    ch = config.ConfigHandler("./config.ini")
    ch.load_config()


    ########### 读取参数 ##########
    train_batch_size = int(ch.config["model"]["train_batch_size"])
    val_batch_size = int(ch.config["model"]["val_batch_size"])
    num_epochs = int(ch.config["model"]["num_epochs"])
    learning_rate = float(ch.config["model"]["learning_rate"])
    class_size = int(ch.config["model"]["class_size"])
    log_interval = int(ch.config["log"]["log_interval"])


    ########### 获取数据loader ##########
    data_loader = dataset.MyDataset(train_batch_size, val_batch_size)

    data_loader_train =data_loader.load_train_data()
    data_loader_test = data_loader.load_test_data()


    ########### 训练和评价 ##########
    train.train_and_test(num_epochs, learning_rate, class_size, data_loader_train, data_loader_test, log_interval).train_epoch()
コード例 #2
0
def main():
    ########### 读取配置文件 ##########
    ch = config.ConfigHandler("./config.ini")
    ch.load_config()

    ########### 读取参数 ##########
    train_batch_size = int(ch.config["model"]["train_batch_size"])
    test_batch_size = int(ch.config["model"]["test_batch_size"])

    num_epochs = int(ch.config["model"]["num_epochs"])
    learning_rate = float(ch.config["model"]["learning_rate"])
    class_size = int(ch.config["model"]["class_size"])

    ########### 读取log和model ##########
    log_interval = int(ch.config["log"]["log_interval"])
    version_name = ch.config["log"]["version_name"]

    train_file = ch.config["data"]["train_file"]
    test_file = ch.config["data"]["test_file"]

    ########### 获取训练数据loader ##########
    data_train = Dataset.ImageDataset(train_file, train=True)
    data_loader_train = torch.utils.data.DataLoader(
        dataset=data_train, batch_size=train_batch_size, shuffle=True)

    ########### 获取测试数据loader ##########
    data_test = Dataset.ImageDataset(test_file, train=False)
    data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
                                                   batch_size=test_batch_size,
                                                   shuffle=False)

    ########### 训练和评价 ##########
    train.train_and_test(num_epochs, learning_rate, class_size,
                         data_loader_train, data_loader_test, log_interval,
                         version_name).train_epoch()
コード例 #3
0
ファイル: BP.py プロジェクト: LSSxyz/AI-assignment
 def Input_lr_and_epoch(self, mes1, selection):
     dlg = wx.TextEntryDialog(None, u"请输入学习率和迭代次数(空格间隔):", u"神经网络BP算法",
                              u"示例:0.1 100")
     mes2 = ""
     if dlg.ShowModal() == wx.ID_OK:
         mes2 = dlg.GetValue()  #获取文本框中输入的值
         train_and_test(mes1, mes2, selection, self.FileName.GetValue())
コード例 #4
0
def try_CNN():
    cfg = CNNConfig()
    train.train_and_test(load_existing_dataset=False,
                         load_existing_model=False,
                         train_model=True,
                         check_accuracy=True,
                         modelType=CNNModel3D,
                         dataProviderType=FFT3DDataProvider,
                         labelProviderType=SimpleDNNLabelProvider,
                         config=cfg)
コード例 #5
0
def try_SRF():
    cfg = SimpleRandomForest()
    train.train_and_test(load_existing_dataset=False,
                         load_existing_model=False,
                         train_model=True,
                         check_accuracy=True,
                         modelType=SimpleRFModel,
                         dataProviderType=FFTDataProvider,
                         labelProviderType=SRFLabelProvider,
                         config=cfg)
コード例 #6
0
def try_LSTM():
    cfg = LSTMConfig()
    train.train_and_test(load_existing_dataset=True,
                         load_existing_model=True,
                         train_model=True,
                         check_accuracy=False,
                         modelType=LSTMModel,
                         dataProviderType=FFT2DDataProvider,
                         labelProviderType=SimpleDNNLabelProvider,
                         config=cfg)
コード例 #7
0
ファイル: main.py プロジェクト: zhangtongzan/attention_unet
def train():
    model = AttentionUNet()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
    criterion = FocalLoss()

    trained_model = train_and_test(model, dataloaders, optimizer, criterion, num_epochs=epochs)

    return trained_model
コード例 #8
0
ファイル: main.py プロジェクト: ShaoQiBNU/pytorch_cnn
def main():
    ########### 读取配置文件 ##########
    ch = config.ConfigHandler("./config.ini")
    ch.load_config()

    ########### 读取参数 ##########
    train_batch_size = int(ch.config["model"]["train_batch_size"])
    valid_batch_size = int(ch.config["model"]["valid_batch_size"])
    test_batch_size = int(ch.config["model"]["test_batch_size"])

    num_epochs = int(ch.config["model"]["num_epochs"])
    learning_rate = float(ch.config["model"]["learning_rate"])
    class_size = int(ch.config["model"]["class_size"])

    ########### 读取log和model ##########
    log_interval = int(ch.config["log"]["log_interval"])
    version_name = ch.config["log"]["version_name"]

    train_file = ch.config["data"]["train_file"]
    valid_file = ch.config["data"]["valid_file"]
    test_file = ch.config["data"]["test_file"]

    ########### 预测结果输出 ##########
    pred_file = ch.config["save"]["pred_file"]

    ########### 获取训练数据loader ##########
    data_train = Dataset.ImageDataset(train_file, train=True)
    data_loader_train = torch.utils.data.DataLoader(
        dataset=data_train, batch_size=train_batch_size, shuffle=True)

    ########### 获取验证数据loader ##########
    data_valid = Dataset.ImageDataset(valid_file, train=False)
    data_loader_valid = torch.utils.data.DataLoader(
        dataset=data_valid, batch_size=valid_batch_size, shuffle=True)

    ########### 获取测试数据loader ##########
    data_test = Dataset.ImageDataset(test_file, train=False)
    data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
                                                   batch_size=test_batch_size,
                                                   shuffle=False)

    ########### 训练和评价 ##########
    trainer = train.train_and_test(num_epochs, learning_rate, class_size,
                                   data_loader_train, data_loader_valid,
                                   data_loader_test, log_interval,
                                   version_name, pred_file)

    ########## start train ###########
    print("start train")
    begin_time = time()
    trainer.train_epoch()
    end_time = time()
    run_time = end_time - begin_time
    print('cost time:', run_time)

    ########## start eval ###########
    print("start test")
    trainer.test()
コード例 #9
0
def run_experiment(x_train, y_train, x_valid, y_valid, embeddings, _layers):
    # Model parameters
    model_name = "mlp"
    layers = _layers

    # Training parameters
    learning_rate = 1e-3  # learning rate
    batch_size = 64  # batch size
    num_epochs = args.epochs  # no. of training epochs

    # Regularization parameters
    dropout_keep_prob = 0.5  # dropout keep probability
    l2_reg_lambda = 0.0  # L2 regularization lambda

    # Training
    # ==================================================

    with tf.Graph().as_default():
        tf.set_random_seed(42)  # set random seed for consistent initialization(s)

        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=False)
        sess = tf.Session(config=session_conf)

        with sess.as_default():
            # Init model
            mlp = MLP(vocab_size=len(train.vocab),
                      num_classes=len(train.class_names),
                      layers=layers,
                      l2_reg_lambda=l2_reg_lambda)

            # Convert sparse matrices to arrays
            x_train = x_train.toarray()
            x_valid = x_valid.toarray()

            # Output directory for models and summaries
            timestamp = str(int(time.time()))
            out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", args.dataset, model_name,
                                                   timestamp))

            # Train and test model
            max_accuracy = train_and_test(sess, mlp, x_train, y_train, x_valid, y_valid, learning_rate,
                                          batch_size, num_epochs, dropout_keep_prob, out_dir)

            return timestamp, max_accuracy
コード例 #10
0
with tf.Graph().as_default():
    session_conf = tf.ConfigProto(allow_soft_placement=allow_soft_placement,
                                  log_device_placement=log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        mlp = MLP(vocab_size=len(train.vocab),
                  num_classes=len(train.class_names),
                  layers=layers,
                  l2_reg_lambda=l2_reg_lambda)

        # Convert sparse matrices to arrays
        x_train = x_train.toarray()
        x_test = x_test.toarray()

        # Output directory for models and summaries
        timestamp = str(int(time.time()))
        out_dir = os.path.abspath(
            os.path.join(os.path.curdir, "runs", args.dataset, model_name,
                         timestamp))

        # Train and test model
        max_accuracy = train_and_test(sess, mlp, x_train, y_train, x_test,
                                      y_test, learning_rate, batch_size,
                                      num_epochs, dropout_keep_prob, out_dir)

        # Output for results.csv
        hyperparams = "{{layers: {}}}".format(layers)
        utils.print_result(args.dataset, model_name, max_accuracy, data_str,
                           timestamp, hyperparams, args, args.notes)
コード例 #11
0
def run_experiment(x_train, y_train, x_valid, y_valid, embeddings, _num_edges,
                   _filter_size, _num_features):
    # Feature graph parameters
    num_edges = _num_edges
    coarsening_levels = 0

    # Model parameters
    filter_name = "chebyshev"  # name of graph conv filter
    model_name = "gcnn_chebyshev"  # append filter name to model name
    filter_sizes = [_filter_size]  # filter sizes
    num_features = [_num_features]  # number of features per GCL
    pooling_sizes = [1]  # pooling sizes (1 (no pooling) or power of 2)
    fc_layers = []  # fully-connected layers

    # Training parameters
    learning_rate = 1e-3  # learning rate
    batch_size = 64  # batch size
    num_epochs = args.epochs  # no. of training epochs

    # Regularization parameters
    dropout_keep_prob = 0.5  # dropout keep probability
    l2_reg_lambda = 0.0  # L2 regularization lambda

    # Feature Graph
    # ==================================================

    # Construct graph
    dist, idx = graph.distance_sklearn_metrics(embeddings,
                                               k=num_edges,
                                               metric="cosine")
    A = graph.adjacency(dist, idx)
    A = graph.replace_random_edges(A, 0)

    # Compute coarsened graphs
    graphs, perm = coarsening.coarsen(A,
                                      levels=coarsening_levels,
                                      self_connections=False)
    laplacians = [graph.laplacian(A, normalized=True) for A in graphs]

    # Override filter sizes for non-param Fourier filter
    if filter_name == "fourier":
        filter_sizes = [l.shape[0] for l in laplacians]

    del dist, idx, A, graphs  # don't need these anymore

    # Reindex nodes to satisfy a binary tree structure
    x_train = scipy.sparse.csr_matrix(
        coarsening.perm_data(x_train.toarray(), perm))
    x_valid = scipy.sparse.csr_matrix(
        coarsening.perm_data(x_valid.toarray(), perm))

    # Training
    # ==================================================

    with tf.Graph().as_default():
        tf.set_random_seed(
            42)  # set random seed for consistent initialization(s)

        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=False)
        sess = tf.Session(config=session_conf)

        tf.set_random_seed(42)  # set random seed for consistent results

        with sess.as_default():
            # Init model
            gcnn = GraphCNN(filter_name=filter_name,
                            L=laplacians,
                            K=filter_sizes,
                            F=num_features,
                            P=pooling_sizes,
                            FC=fc_layers,
                            batch_size=batch_size,
                            num_vertices=x_train.shape[1],
                            num_classes=len(train.class_names),
                            l2_reg_lambda=l2_reg_lambda)

            # Convert sparse matrices to arrays
            x_train = np.squeeze([x_i.toarray() for x_i in x_train])
            x_valid = np.squeeze([x_i.toarray() for x_i in x_valid])

            # Output directory for models and summaries
            timestamp = str(int(time.time()))
            out_dir = os.path.abspath(
                os.path.join(os.path.curdir, "runs", args.dataset, model_name,
                             timestamp))

            # Train and test model
            max_accuracy = train_and_test(sess, gcnn, x_train, y_train,
                                          x_valid, y_valid, learning_rate,
                                          batch_size, num_epochs,
                                          dropout_keep_prob, out_dir)

            return timestamp, max_accuracy