コード例 #1
0
ファイル: train.py プロジェクト: gredx/FCN_CRF
def train():
    # 读取训练数据和label数据
    print("loading data.......")
    imgs_train = get_data("F:\liuyang\MyNet\\U-net\\train_data.npy",(512,512))
    imgs_label = get_data("F:\liuyang\MyNet\\U-net\\train_label.npy",(512,512))
    # get Unet model
    print("getting model...")
    model = Unet(512,512,optimizer=Adam(1e-4),loss=dice_coef_loss,metrics=dice_coef)
    # 保存权值
    checkPoint = ModelCheckpoint('weighs.h5',monitor='val_loss',save_best_only=True)
    # 开始训练
    print("start training.......")
    model.fit(imgs_train,imgs_label,batch_size=1,epochs=10,verbose=1, shuffle=True,callbacks=[checkPoint],validation_split=0.1)
コード例 #2
0
    def __init__(self) -> None:
        """ Get data from helper -> the source data """
        list_of_dataframes = get_data()
        self.df_confirmed = list_of_dataframes['confirmed']
        self.df_deaths = list_of_dataframes['deaths']
        self.df_recovered = list_of_dataframes['recovered']

        list_of_time_series = get_data(time_series=True)
        self.df_time_series_confirmed = list_of_time_series['confirmed']
        self.df_time_series_deaths = list_of_time_series['deaths']
        self.df_time_series_recovered = list_of_time_series['recovered']

        self.datetime_raw = self.df_confirmed['datetime'].unique().tolist()[0]
        self.timestamp = datetime.strptime(self.datetime_raw, '%m/%d/%y').timestamp()
コード例 #3
0
def test_model_from_keras(
        subject,
        batch_size=32,
        folder='/scratch/sem19h24/EEGNet_reformat/EEGnet/initialModels/'):
    """
    Import the keras model and test it

    Parameters:
        subject:    Number between 1 and 9
        batch_size: Batch Size to test the model
        folder:     Folder where the model files are found.

    returns: loss, accuracy
    """

    # Just use the default values and hope all dimensions are fine...
    model = EEGNet(activation='elu', constrain_w=False, permuted_flatten=True)
    model.load_model_params_from_keras(f"{folder}model{subject-1}_torch.npz")
    if t.cuda.is_available():
        model = model.cuda()

    print_summary(model, None, None, None)

    # get dataloader
    test_samples, test_labels = get_data(subject, training=False)
    test_loader = as_data_loader(test_samples,
                                 test_labels,
                                 batch_size=batch_size)

    # prepare loss function, no optimizer necessary
    loss_function = t.nn.CrossEntropyLoss()
    return _test_net(model, test_loader, loss_function, train=False)
コード例 #4
0
def get_all_book(url_book: list, rows: list):
    """[From a list of book urls, it finds all the information and adds them to rows]

    Args:
        url_book (list): [A list of book urls]
        rows (list) : [A list of data:
        product_page_url,
        upc,
        title,
        price_including_tax,
        price_excluding_tax,
        number_available,
        product_description,
        category,
        reviews_rating,
        image_url,
        filename]
    """
    # loop from book url
    for url in url_book:
        soup = get_data(url)
        # instence class BookFetcher
        book_info = BookFetcher(url, soup)
        book = book_info.get_book_info
        # write each result to rows
        rows.append(book)
コード例 #5
0
def srap_books(url: str, rows: list):
    """[From a book category url, call url_book (), get_all_book () and get_next_page ()]

    Args:
        url (str): [url of a book category]
        rows (list) : [A list of data:
        product_page_url,
        upc,
        title,
        price_including_tax,
        price_excluding_tax,
        number_available,
        product_description,
        category,
        reviews_rating,
        image_url,
        filename]
    """
    url_book = get_all_page(url)
    get_all_book(url_book, rows)
    # loop to get the next pages
    while True:
        soup = get_data(url)
        url = get_next_page(soup, url)
        if not url:
            break
        url_book = get_all_page(url)
        get_all_book(url_book, rows)
    del url_book[:]
コード例 #6
0
def predict():
    # get model
    model = Unet(512,
                 512,
                 optimizer=Adam(1e-4),
                 loss=dice_coef_loss,
                 metrics=dice_coef)
    # get test images
    print("getting test images...")
    testImagesPath = 'E:\ljs\\u-net\\train_data.npy'
    imgs_test = get_data(testImagesPath, (512, 512))
    # load weighs
    print("loading weighs...")
    model.load_weights('weighs.h5')
    # predict
    print("predicting ...")
    imgs_predict = model.predict(imgs_test, batch_size=1, verbose=1)

    # 把输出结果保存到一个文件夹里
    print('saving output images...')
    dir = get_randstr()
    os.mkdir(dir)

    for id, image in enumerate(imgs_predict):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        # plt 得到的结果默认是彩色存储
        #plt.imsave(os.path.join(dir,str(id)+'.png'),image)
        # skimage存灰度
        imsave(os.path.join(dir, str(id) + '.png'), image)
コード例 #7
0
def get_url_category(url: str) -> list:
    """[Find all urls for each category of books]

    Args:
        url_index (str): [url of the main book.toscrap page]

    Returns:
        list: [a list with the urls of each category]
    """
    category_url = []
    # request get
    soup = get_data(url)
    lis = soup.find('ul', class_='nav-list').find_all('li')
    # loop over results
    for li_ in lis:
        category_link = li_.find('a')['href']
        category_link = 'http://books.toscrape.com/' + \
            category_link
        category_url.append(category_link)
    return category_url
コード例 #8
0
def get_url_book(url: str) -> list:
    """[Find all book urls on a page]

    Args:
        url (str): [url of a book category page]

    Returns:
        list: [Returns a list with all the urls of the books on a page]
    """
    book_url = []
    # request get
    soup = get_data(url)
    articles = soup.find_all('article', class_='product_pod')
    for article in articles:
        link = article.find('a')['href']
        # remove unwanted characters
        link = 'http://books.toscrape.com/catalogue/' + \
            link.strip('../../..')
        book_url.append(link)
    return book_url
コード例 #9
0
def train_subject_specific_quant(subject,
                                 epochs=500,
                                 batch_size=32,
                                 lr=0.001,
                                 silent=False,
                                 plot=True,
                                 **kwargs):
    """
    Trains a subject specific model for the given subject

    Parameters:
     - subject:    Integer in the Range 1 <= subject <= 9
     - epochs:     Number of epochs to train
     - batch_size: Batch Size
     - lr:         Learning Rate
     - silent:     bool, if True, hide all output including the progress bar
     - plot:       bool, if True, generate plots
     - kwargs:     Remaining arguments passed to the EEGnet model

    Returns: (model, metrics)
     - model:   t.nn.Module, trained model
     - metrics: t.tensor, size=[1, 4], accuracy, precision, recall, f1
    """
    # load the data
    train_samples, train_labels = get_data(subject, training=True)
    test_samples, test_labels = get_data(subject, training=False)
    train_loader = as_data_loader(train_samples,
                                  train_labels,
                                  batch_size=batch_size)
    # test_loader = as_data_loader(test_samples, test_labels, batch_size=test_labels.shape[0])
    test_loader = as_data_loader(test_samples,
                                 test_labels,
                                 batch_size=batch_size)

    # prepare quantization configuration
    qconfig = tq.QConfig(
        activation=tq.MinMaxObserver.with_args(dtype=t.quint8),
        weight=tq.MinMaxObserver.with_args(dtype=t.qint8))

    # prepare the model
    model = EEGNetQuant(T=train_samples.shape[2], qconfig=qconfig, **kwargs)
    model.initialize_params()
    if t.cuda.is_available():
        model = model.cuda()

    # prepare the quantization
    tq.prepare_qat(model, inplace=True)

    # prepare loss function and optimizer
    loss_function = t.nn.CrossEntropyLoss()
    optimizer = t.optim.Adam(model.parameters(), lr=lr, eps=1e-7)
    scheduler = None

    # print the training setup
    print_summary(model, optimizer, loss_function, scheduler)

    # prepare progress bar
    with tqdm(desc=f"Subject {subject}",
              total=epochs,
              leave=False,
              disable=silent,
              unit='epoch',
              ascii=True) as pbar:

        # Early stopping is not allowed in this mode, because the testing data cannot be used for
        # training!
        model, metrics, _, history = _train_net(subject,
                                                model,
                                                train_loader,
                                                test_loader,
                                                loss_function,
                                                optimizer,
                                                scheduler=scheduler,
                                                epochs=epochs,
                                                early_stopping=False,
                                                plot=plot,
                                                pbar=pbar)

    # convert the model into a quantized model
    model = model.cpu()
    tq.convert(model, inplace=True)

    metrics = get_metrics_from_model(model, test_loader)

    if not silent:
        print(f"Subject {subject}: accuracy = {metrics[0, 0]}")
    return model, metrics, history
コード例 #10
0
def train_subject_specific_cv(subject,
                              n_splits=4,
                              epochs=500,
                              batch_size=32,
                              lr=0.001,
                              early_stopping=True,
                              silent=False,
                              plot=True,
                              **kwargs):
    """
    Trains a subject specific model for the given subject, using K-Fold Cross Validation

    Parameters:
     - subject:        Integer in the Range 1 <= subject <= 9
     - n_splits:       Number of splits for K-Fold CV
     - epochs:         Number of epochs to train
     - batch_size:     Batch Size
     - lr:             Learning Rate
     - early_stopping: bool, approximate the number of epochs to train the network for the subject.
     - silent:         bool, if True, generate no output, including progress bar
     - plot:           bool, if True, generates plots
     - kwargs:         remaining parameters are passed to the EEGnet model

    Returns: (models, metrics, epoch)
     - models:  List of t.nn.Module, size = [n_splits]
     - metrics: t.tensor, size=[1, 4], accuracy, precision, recall, f1, averaged over all splits
     - epoch:   integer, number of epochs determined by early_stopping. If early_stopping is not
                used, then this value will always be equal to the parameter epochs

    Notes:
     - Early Stopping: Uses early stopping to determine the best epoch to stop training for the
       given subject by averaging over the stopping epoch of all splits.
    """

    # load the raw data
    samples, labels = get_data(subject, training=True)
    samples, labels = as_tensor(samples, labels)

    # prepare the models
    models = [EEGNet(T=samples.shape[2], **kwargs) for _ in range(n_splits)]
    metrics = t.zeros((n_splits, 4))
    best_epoch = np.zeros(n_splits)

    # prepare KFold
    kfcv = KFoldCV(n_splits)
    split = 0

    # open the progress bar
    with tqdm(desc=f"Subject {subject} CV, split 1",
              total=n_splits * epochs,
              leave=False,
              unit='epoch',
              ascii=True,
              disable=silent) as pbar:
        # loop over all splits
        for split, indices in enumerate(kfcv.split(samples, labels)):
            # set the progress bar title
            pbar.set_description(f"Subject {subject} CV, split {split + 1}")

            # generate dataset
            train_idx, val_idx = indices
            train_ds = t.utils.data.TensorDataset(samples[train_idx],
                                                  labels[train_idx])
            val_ds = t.utils.data.TensorDataset(samples[val_idx],
                                                labels[val_idx])
            train_loader = t.utils.data.DataLoader(train_ds,
                                                   batch_size=batch_size,
                                                   shuffle=True)
            val_loader = t.utils.data.DataLoader(val_ds,
                                                 batch_size=batch_size,
                                                 shuffle=True)

            # prepare the model
            model = models[split]
            model.initialize_params()
            if t.cuda.is_available():
                model = model.cuda()

            # prepare loss function and optimizer
            loss_function = t.nn.CrossEntropyLoss()
            optimizer = t.optim.Adam(model.parameters(), lr=lr)

            model, split_metrics, split_epoch, _ = _train_net(
                subject,
                model,
                train_loader,
                val_loader,
                loss_function,
                optimizer,
                epochs=epochs,
                early_stopping=early_stopping,
                plot=plot,
                pbar=pbar)

            metrics[split, :] = split_metrics[0, :]
            best_epoch[split] = split_epoch

    # average all metrics
    metrics = metrics.mean(axis=0).reshape(1, 4)

    # print the result
    if not silent:
        print(
            f"Subje`ct {subject} CV: accuracy = {metrics[0, 0]}, at epoch {best_epoch.mean()} "
            + f"+- {best_epoch.std()}")
    return models, metrics, int(best_epoch.mean().round())
コード例 #11
0
ファイル: train.py プロジェクト: Hossamsaad99/Transformer
def train_model(model, epoches=50, batch=32):
    seq_len = 128
    df, X_train, y_train, X_val, y_val, X_test, y_test, train_data_len, val_data_len, scaler = get_data(
    )

    CP = ModelCheckpoint(filepath='Transformer+TimeEmbedding.hdf5',
                         monitor='val_loss',
                         save_best_only=True,
                         verbose=1)

    ES = EarlyStopping(monitor='val_loss', mode='min', patience=10)

    history = model.fit(X_train,
                        y_train,
                        batch_size=batch,
                        epochs=epoches,
                        callbacks=[CP, ES],
                        verbose=1,
                        validation_data=(X_val, y_val))

    model = load_model('Transformer+TimeEmbedding.hdf5',
                       custom_objects={
                           'Time2Vector': Time2Vector,
                           'SingleAttention': SingleAttention,
                           'MultiAttention': MultiAttention,
                           'TransformerEncoder': TransformerEncoder
                       })

    #Print evaluation metrics for all datasets
    train_eval = model.evaluate(X_train, y_train, verbose=0)
    val_eval = model.evaluate(X_val, y_val, verbose=0)
    test_eval = model.evaluate(X_test, y_test, verbose=0)

    print('\nEvaluation metrics')
    print('Training Data - Loss: {:.4f}, MAE: {:.4f}'.format(
        train_eval[0], train_eval[1]))
    print('Validation Data - Loss: {:.4f}, MAE: {:.4f}'.format(
        val_eval[0], val_eval[1]))
    print('Test Data - Loss: {:.4f}, MAE: {:.4f}'.format(
        test_eval[0], test_eval[1]))
    # Visualize Loss Vs. epochs

    loss = history.history["loss"]
    val_loss = history.history["val_loss"]
    epochs = range(len(loss))
    plt.figure()
    plt.plot(epochs, loss, "b", label="Training loss")
    plt.plot(epochs, val_loss, "r", label="Validation loss")
    plt.title("Training and Validation Loss")
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.legend()
    plt.show()