def main(args):
    dataset = args.dataset
    neighborhood_size = args.neighborhood_size
    recommended_list_size = args.recommended_list_size

    data_loader = DataLoader(dataset)
    data_loader.load_data()
    user_number, item_number = data_loader.get_dataset_info()
    train, test = data_loader.train_test_split()
    recommender = RecommenderSystem()
    rating_predictions = recommender.predict_topk_nobias(train,
                                                         k=neighborhood_size)

    evaluator = RecommenderEvaluator()
    print("RMSE={}".format(evaluator.rmse(rating_predictions, test)))
    print("MAE={}".format(evaluator.mae(rating_predictions, test)))
    mean_test = np.true_divide(test.sum(1), (test != 0).sum(1))
    precisions, recalls = evaluator.precision_recall_at_k(
        rating_predictions, test, mean_test, user_number,
        recommended_list_size)
    precision = sum(prec for prec in precisions.values()) / len(precisions)
    recall = sum(rec for rec in recalls.values()) / len(recalls)
    f1 = evaluator.f1(precision, recall)
    print("Precision({})={}".format(recommended_list_size, precision))
    print("Recall({})={}".format(recommended_list_size, recall))
    print("F1({})={}".format(recommended_list_size, f1))
Example #2
0
    def prepare_real_samples(self):
        """
        prepare_real_samples function load the data provider and set
        training and testing dataset

        :return: X
        """
        # loading real data
        (x_train, _), (_, _) = DataLoader.load_data()
        # adding channels to expand to 3d
        X = expand_dims(x_train, axis=-1)
        # convert from int to float and [0,255] to [-1,1] scaling
        X = X.astype('float32')
        X = (X - 127.5) / 127.5
        return X
Example #3
0
def prepare_real_samples():
    """
    prepare_real_samples function load the data provider and set
    training and testing dataset

    :return: X_train
    """
    # loading real data
    (X_train, y_train), (X_test, y_test) = DataLoader.load_data()
    # convert from int to float and [0,255] to [-1,1] scaling
    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = X_train[:, :, :, None]
    X_test = X_test[:, :, :, None]
    # X_train = X_train.reshape((X_train.shape, 1) + X_train.shape[1:])
    return X_train
Example #4
0
def main(args):
    dataset = args.dataset
    model_type = args.model
    layers = []
    if model_type != 'latent-factor-model':
        layers = eval(args.layers)
    n_epoch = args.epochs
    max_checkout_without_progress = args.max_checkout_without_progress
    batch_size = args.batch_size
    dimension = args.dimension
    learning_rate = args.learning_rate
    if args.optimizer == 'Adam':
        optimizer = tf.train.AdamOptimizer
    elif args.optimizer == 'RMSProp':
        optimizer = tf.train.RMSPropOptimizer
    else:
        optimizer = tf.train.GradientDescentOptimizer
    dropout_rate = args.dropout_rate
    regularization_factor = args.regularization_factor

    data_loader = DataLoader(dataset)
    data_loader.load_data()
    user_number, item_number = data_loader.get_dataset_info()
    rating_data_train, rating_data_test = data_loader.train_test_split(0.8)

    iter_train = ShuffleIterator([
        rating_data_train["userid"], rating_data_train["itemid"],
        rating_data_train["rating"]
    ],
                                 batch_size=batch_size)

    if dataset == 'ml-100k' or dataset == 'ml-1m':
        userid = "userid"
        itemid = "itemid"
    else:
        userid = "userId"
        itemid = "movieId"

    user_ids_test, item_ids_test, ratings_test = data_loader.get_test_data([
        rating_data_test[userid], rating_data_test[itemid],
        rating_data_test["rating"]
    ])
    model_name = model_type + '-' + dataset
    if model_type == 'latent-factor-model':
        model = LatentFactorModel(batch_size,
                                  dimension,
                                  learning_rate,
                                  user_number,
                                  item_number,
                                  iter_train,
                                  dropout_rate,
                                  optimizer_class=optimizer,
                                  reg_factor=regularization_factor)
    elif model_type == 'deep-neural-network-model':
        model = DeepNeuralNetworkModel(batch_size,
                                       dimension,
                                       learning_rate,
                                       user_number,
                                       item_number,
                                       iter_train,
                                       dropout_rate,
                                       layers=layers,
                                       optimizer_class=optimizer,
                                       reg_factor=regularization_factor)
    elif model_type == 'ensemble-no-transfer-learning':
        model = EnsembleModel(batch_size,
                              dimension,
                              learning_rate,
                              user_number,
                              item_number,
                              iter_train,
                              dropout_rate,
                              layers=layers[:-1],
                              optimizer_class=optimizer,
                              reg_factor=regularization_factor)
    else:
        model = EnsembleModel(batch_size,
                              dimension,
                              learning_rate,
                              user_number,
                              item_number,
                              iter_train,
                              dropout_rate,
                              layers=layers[:-1],
                              optimizer_class=optimizer,
                              reg_factor=regularization_factor,
                              transfer_learning=True)

    model.fit(user_ids_test,
              item_ids_test,
              ratings_test,
              rating_data_train,
              model_name,
              dataset,
              n_epoch=n_epoch,
              max_checkout_without_progress=max_checkout_without_progress)

    predicted_ratings = model.get_test_data_prediction()
    evaluator = RecommenderEvaluator(rating_data_test, predicted_ratings,
                                     dataset)
    print("\nRMSE={}".format(evaluator.rmse()))
    print("MAE={}".format(evaluator.mae()))
    k = 20
    precisions, recalls = evaluator.precision_recall_at_k(k)
    precision = sum(prec for prec in precisions.values()) / len(precisions)
    recall = sum(rec for rec in recalls.values()) / len(recalls)
    f1 = evaluator.f1(precision, recall)
    print("Precision({})={}".format(k, precision))
    print("Recall({})={}".format(k, recall))
    print("F1({})={}".format(k, f1))
Example #5
0
def train_network(combined_data_file_name):
    dataloader = DataLoader(combined_data_file_name)
    data = dataloader.load_data()
    onet_trainer('./model_store', data, 32, 0.001)
Example #6
0
    args = parse_args()
    DATA_DIR = Path("data")
    data_path = DATA_DIR / "_".join([args.dataset, "processed"])
    model_name = "_".join(
        ["pt", args.model,
         str(datetime.now()).replace(" ", "_")])

    log_dir = Path(args.log_dir)
    model_weights = log_dir / "weights"
    if not os.path.exists(model_weights):
        os.makedirs(model_weights)

    data_loader = DataLoader(data_path)
    n_items = data_loader.n_items
    train_data = data_loader.load_data("train")
    valid_data_tr, valid_data_te = data_loader.load_data("validation")
    test_data_tr, test_data_te = data_loader.load_data("test")

    training_steps = len(range(0, train_data.shape[0], args.batch_size))
    try:
        total_anneal_steps = (
            training_steps *
            (args.n_epochs - int(args.n_epochs * 0.15))) / args.anneal_cap
    except ZeroDivisionError:
        assert (
            args.constant_anneal
        ), "if 'anneal_cap' is set to 0.0 'constant_anneal' must be set to 'True"

    p_dims = eval(args.p_dims)
    q_dims = eval(args.q_dims)
    data_loader = DataLoader(data_path)
    n_items = data_loader.n_items
    p_dims, q_dims, dropout_enc, dropout_dec = process_args(n_items)
    model = MultiVAE(
        p_dims=p_dims,
        q_dims=q_dims,
        dropout_enc=dropout_enc,
        dropout_dec=dropout_dec,
    )

    new_colnames = ["user_id", "skill"]
    filename = "generated_data.json"
    data = pd.read_json(os.path.join(DATA_DIR, filename), lines=True)
    data.drop('count', axis=1)

    model.to(device)
    model.load_state_dict(
        torch.load(os.path.join(model_weights, model_name + ".pt")))
    loader = DataLoader(data_path)
    data_tr = loader.load_data("train")
    res_df = make_prediction(model, data_tr, data_path)
    # res.to_csv(os.path.join(out_path, f"prediction_{args.n_epochs}.csv"), sep=';')
    key = 4.9
    recoms = process_results(res_df, key, data)

    with open(os.path.join(out_path, f'prediction_{args.n_epochs}.json'),
              'w') as json_file:
        for row in recoms:
            json_str = json.dumps(row)
            json_file.write(json_str + '\n')
Example #8
0
def main(unused_argv):
    assert FLAGS.output_dir, "--output_dir is required"
    # Create training directory.
    output_dir = FLAGS.output_dir
    if not tf.gfile.IsDirectory(output_dir):
        tf.gfile.MakeDirs(output_dir)

    dl = DataLoader(FLAGS.data_dir)
    dl.load_data()
    dl.split()

    x_dim = dl.get_X_dim()
    y_dim = dl.get_Y_dim()

    # Build the model.
    model = DMFD(x_dim,
                 y_dim,
                 dl.min_val,
                 dl.max_val,
                 cfgs,
                 log_dir=FLAGS.log_dir)

    if FLAGS.pretrained_fname:
        try:
            model.restore(FLAGS.pretrained_fname)
            print('Resume from %s' % (FLAGS.pretrained_fname))
        except:
            pass

    lr = cfgs.initial_lr
    epoch_counter = 0
    ite = 0
    lambda_ = cfgs.base_lambda_
    while True:
        start = time.time()
        x, y, R, mask, flag = dl.next_batch(cfgs.batch_size_x,
                                            cfgs.batch_size_y, 'train')
        load_data_time = time.time() - start
        if flag:
            epoch_counter += 1

        # some boolean variables
        do_log = (ite % FLAGS.log_every_n_steps == 0)
        do_snapshot = flag and epoch_counter > 0 and epoch_counter % FLAGS.save_every_n_epochs == 0
        val_loss = -1

        # train one step
        get_summary = do_log and cfgs.write_summary
        start = time.time()
        loss, _, summary, ite = model.partial_fit(x, y, R, mask, lr, lambda_,
                                                  get_summary)
        one_iter_time = time.time() - start

        # writing outs
        if do_log:
            print('Iteration %d, (lr=%f, lambda_=%f) training loss  : %f' %
                  (ite, lr, lambda_, loss))
            if FLAGS.log_time:
                print(
                    'Iteration %d, data loading: %f(s) ; one iteration: %f(s)'
                    % (ite, load_data_time, one_iter_time))
            if cfgs.write_summary:
                model.log(summary)

        if do_snapshot:
            print('Snapshotting')
            model.save(FLAGS.output_dir)

        if flag:
            lambda_ = get_lambda_(lambda_, cfgs.anneal_rate, epoch_counter - 1,
                                  cfgs.sigmoid_schedule)
            print('Finished epoch %d' % epoch_counter)
            print('--------------------------------------')
            if epoch_counter == FLAGS.n_epochs:
                if not do_snapshot:
                    print('Final snapshotting')
                    model.save(FLAGS.output_dir)
                break
            if epoch_counter % cfgs.num_epochs_per_decay == 0:
                lr = lr * cfgs.lr_decay_factor
                print('Decay learning rate to %f' % lr)
Example #9
0
negative_directory = "./prepare_data/Data/RNet/Negatives"
partial_directory = "./prepare_data/Data/RNet/Partial"

if not os.path.exists(positives_directory):
    os.mkdir(positives_directory)
if not os.path.exists(negative_directory):
    os.mkdir(negative_directory)
if not os.path.exists(partial_directory):
    os.mkdir(partial_directory)

p_network, _, _ = network_loader(p_model_path=p_model_path)
mtcnn_detector = MTCNNDetector(p_network=p_network, min_face_size=12)

dataloader = DataLoader('../Datasets/WIDER_train/wider_origin_anno.txt',
                        mode='Test')
data = dataloader.load_data()
test_data = TestLoader(data, 1, False)

batch_index = 0
final_boxes = []


def generate_sample_data_rnet(annotation_file, pnet_file):
    image_size = 24
    image_path_list, bbox_list = [], []

    with open(annotation_file, 'r') as f:
        annotations = f.readlines()

    training_images = len(annotations)
Example #10
0
class Analyzer():
    consecutive_data_folder = r"..\..\processed_data\consecutive_data"
    epoched_data_folder = r"..\..\processed_data\epoched_data"
    temporal_SMR_different_bands_folder = r"..\results\temporal_SMR_different_bands"
    TFR_folder = r"..\results\TFR"
    report_folder = r"..\results\report"

    def __init__(self,
                 exp_counter,
                 low_freq=0.1,
                 hi_freq=3,
                 pick_channels=['Cz'],
                 signal_tmin=-3,
                 signal_tmax=5,
                 noise_tmin=3,
                 noise_tmax=11,
                 generate_report=False):
        self.exp_counter = exp_counter
        self.pick_channels = pick_channels
        self.data_loader = DataLoader(exp_counter=self.exp_counter)
        self.data_loader.init_task_dependent_variables()
        self.data_loader.load_data()
        self.exp_name = self.data_loader.exp_name
        self.channel_dict = self.data_loader.channel_dict
        self.fs = self.data_loader.fs
        self.low_freq = low_freq
        self.hi_freq = hi_freq
        self.signal_tmin = signal_tmin
        self.signal_tmax = signal_tmax
        self.noise_tmin = noise_tmin
        self.noise_tmax = noise_tmax
        self.report = mne.Report(verbose=True)
        self.generate_report = generate_report

    def load_preprocessed_data(self, special_name, duration):
        file_path = '{}\{}_processed_BPF_{}Hz_{}Hz_{}.fif'.format(
            Analyzer.consecutive_data_folder, self.exp_name, self.low_freq,
            self.hi_freq, special_name)

        self.preprocessed_data = mne.io.read_raw_fif(file_path, preload=True)
        # fig_all = self.preprocessed_data.plot(events=self.task_event_array,
        #                                       duration=self.preprocessed_data.n_times / self.fs)

        fig_all = self.preprocessed_data.plot(duration=duration)
        fig_all.subplots_adjust(top=0.8)
        fig_all.suptitle('{}_processed_BPF_{}Hz_{}Hz'.format(
            self.exp_name, self.low_freq, self.hi_freq))

        #         channel_picked_data = self.preprocessed_data.copy().pick_channels(self.pick_channels)
        #         # fig_picked = channel_picked_data.plot(events=self.task_event_array,
        #         #                                       duration=channel_picked_data.n_times / self.fs)
        #         fig_picked = channel_picked_data.plot(duration=duration)
        #         fig_picked.subplots_adjust(top=0.9)
        #         fig_picked.suptitle('{}_processed_BPF_{}Hz_{}Hz'.format(self.exp_name, self.low_freq, self.hi_freq))
        plt.show()

        if self.generate_report:
            self.report.add_figs_to_section(
                fig_all,
                captions='{}_processed_BPF_{}Hz_{}Hz'.format(
                    self.exp_name, self.low_freq, self.hi_freq),
                section='consecutive EEG')

        if self.generate_report:
            self.report.add_figs_to_section(
                fig_picked,
                captions='picked channel {}_processed_BPF_{}Hz_{}Hz'.format(
                    self.exp_name, self.low_freq, self.hi_freq),
                section='consecutive EEG')

    def load_preprocessed_data_pipeline(self, special_name, duration):
        file_path = '{}\{}_processed_BPF_{}Hz_{}Hz_{}.fif'.format(
            Analyzer.consecutive_data_folder, self.exp_name, self.low_freq,
            self.hi_freq, special_name)

        self.preprocessed_data = mne.io.read_raw_fif(file_path, preload=True)
        # fig_all = self.preprocessed_data.plot(events=self.task_event_array,
        #                                       duration=self.preprocessed_data.n_times / self.fs)

#     def create_event(self):
#         raw_eeg_path = self.data_loader.base_folder + "//raw_eeg.csv"
#         df = pd.read_csv(raw_eeg_path, header=None)
#         self.raw_data = df.values
#         event_path = self.data_loader.base_folder + "//event.csv"
#         event_df = pd.read_csv(event_path, header=None)
#         self.events = event_df.values
#         # self.events = self.events.astype(int)
#         self.origin_time = self.raw_data[0, 0]
#         self.onsets = self.events[:, 1] - self.origin_time
#         self.durations = np.zeros_like(self.onsets)
#         self.event_array = np.column_stack(((self.onsets * self.data_loader.fs).astype(int), np.zeros_like(self.onsets,
#                                                                                                            dtype=int),
#                                             self.events[:, 0].astype(int)))
#         self.task_events = self.events[np.where(self.events == 6)[0], :]

#         self.onsets = self.task_events[:, 1] - self.origin_time
#         self.durations = np.zeros_like(self.onsets)
#         self.task_event_array = np.column_stack(((self.onsets * self.data_loader.fs).astype(int),
#                                                  np.zeros_like(self.onsets, dtype=int),
#                                                  self.task_events[:, 0].astype(int)))

    def apply_referencing(self):
        preprocessed_avg_ref = self.preprocessed_data.set_eeg_reference(
            ref_channels='average', projection=True)
        for title, proj in zip(['Original', 'Average'], [False, True]):
            fig = preprocessed_avg_ref.plot(
                proj=proj, duration=preprocessed_avg_ref.n_times / self.fs)
            # make room for title
            fig.subplots_adjust(top=0.9)
            fig.suptitle('{} reference'.format(title),
                         size='xx-large',
                         weight='bold')
        plt.show()
        if self.generate_report:
            self.report.add_figs_to_section(
                fig,
                captions='{} reference'.format(title),
                section='consecutive EEG')

    def load_epoches(self, caption):
        signal_epoch_file_path = '{}\{}_signal_epochs_{}s_{}s_{}Hz_{}Hz_{}.fif'.format(
            Analyzer.epoched_data_folder, self.data_loader.exp_name,
            self.signal_tmin, self.signal_tmax, self.low_freq, self.hi_freq,
            caption)
        epochs = mne.read_epochs(signal_epoch_file_path)
        return epochs

#         self.signal_epochs_cued = self.signal_epochs[cue_type].copy()
#         fig_signal = self.signal_epochs_cued.plot()
#         fig_signal.subplots_adjust(top=0.9)
#         fig_signal.suptitle('{} signal epochs'.format(self.exp_name), size='xx-large', weight='bold')

    def epoch_data(self, tmin, tmax, baseline, cue_type, caption):
        event_dict = {v: k for k, v in self.data_loader.mapping.items()}
        events, event_id = mne.events_from_annotations(self.preprocessed_data,
                                                       event_id=event_dict)
        # pdb.set_trace()
        self.epochs = mne.Epochs(self.preprocessed_data,
                                 events,
                                 tmin=tmin,
                                 tmax=tmax,
                                 event_id=event_id,
                                 preload=True,
                                 baseline=baseline,
                                 reject_by_annotation=True,
                                 event_repeated='drop')
        epochs_cued = self.epochs[cue_type]
        ch_counter = 0
        for ch in self.pick_channels:
            fig_image_map = epochs_cued.plot_image(picks=ch, show=False)
            ch_counter += 3
            if self.generate_report:
                self.report.add_figs_to_section(fig_image_map,
                                                captions='{} {} epochs'.format(
                                                    self.exp_name, caption),
                                                section='epochs')
        fig = epochs_cued.plot(show=False)
        fig.subplots_adjust(top=0.9)
        fig.suptitle('{} epochs'.format(caption),
                     size='xx-large',
                     weight='bold')

        return epochs_cued

    def epoch_data_self(self, tmin, tmax, baseline, cue_type, caption):
        event_dict = {v: k for k, v in self.data_loader.mapping.items()}
        events, event_id = mne.events_from_annotations(self.preprocessed_data,
                                                       event_id=event_dict)
        # pdb.set_trace()
        self.epochs = mne.Epochs(self.preprocessed_data,
                                 events,
                                 tmin=tmin,
                                 tmax=tmax,
                                 event_id=event_id,
                                 preload=True,
                                 baseline=baseline,
                                 reject_by_annotation=True,
                                 event_repeated='drop')
        ch_counter = 0
        for ch in self.pick_channels:
            fig_image_map = self.epochs[cue_type].plot_image(picks=ch,
                                                             show=False)
            ch_counter += 3
            if self.generate_report:
                self.report.add_figs_to_section(fig_image_map,
                                                captions='{} {} epochs'.format(
                                                    self.exp_name, caption),
                                                section='epochs')
        fig = self.epochs[cue_type].plot(show=False)
        fig.subplots_adjust(top=0.9)
        fig.suptitle('{} epochs'.format(caption),
                     size='xx-large',
                     weight='bold')

        return self.epochs[cue_type]

    # def choose_epochs(self, epoch, cue_type, caption):
    #     epochs_cued = epoch[cue_type].copy()
    #     fig = epochs_cued.plot(show=False)
    #     fig.subplots_adjust(top=0.9)
    #     fig.suptitle('{} epochs'.format(caption), size='xx-large', weight='bold')
    #
    # def plot_epochs_image(self, epoch, cue_type, caption):
    #     epochs_cued = epoch[cue_type].copy()
    #     ch_counter = 0
    #     for ch in self.pick_channels:
    #         fig_image_map = epochs_cued.plot_image(picks=ch, show=False)
    #         ch_counter += 3
    #         if self.generate_report:
    #             self.report.add_figs_to_section(fig_image_map, captions='{} {} epochs'.format(self.exp_name, caption),
    #                                             section='epochs')

    def save_epoched_data(self, epoch, caption):
        epoch.save('{}\{}_signal_epochs_{}s_{}s_{}Hz_{}Hz_{}.fif'.format(
            Analyzer.epoched_data_folder, self.data_loader.exp_name,
            self.signal_tmin, self.signal_tmax, self.low_freq, self.hi_freq,
            caption),
                   overwrite=True)

    def create_evoked_data(self, epoch_data, tmin, tmax, caption, line_color,
                           vline):
        evoked_data = epoch_data.average()

        times = np.linspace(tmin, tmax, tmax - tmin + 1)
        fig = evoked_data.plot_joint(times=times, show=False)
        fig.subplots_adjust(top=0.9)
        fig.suptitle('{} {}'.format(self.exp_name, caption),
                     size='xx-large',
                     weight='bold')

        fig_topo = evoked_data.plot_topo(color=line_color,
                                         ylim=dict(eeg=[-10, 10]),
                                         show=False,
                                         title="{} {}".format(
                                             self.exp_name, caption),
                                         vline=vline)
        fig_topo.set_size_inches(10, 10)

        if self.generate_report:
            # self.report.add_figs_to_section(fig, captions='{} {}'.format(self.exp_name, caption), section='evoked')
            self.report.add_figs_to_section(fig_topo,
                                            captions='{} {} topo'.format(
                                                self.exp_name, caption),
                                            section='evoked')

        return evoked_data

    def lap(self, data_pre_lap):
        lap_filter = [-1 / 4] * 4
        lap_filter.insert(0, 1)  # center channel is channel 0
        lap_filter = np.asarray(lap_filter)
        temp = np.reshape(lap_filter, (1, 5))
        data_lap_filtered = np.dot(temp, data_pre_lap)
        return data_lap_filtered

    def lap_Cz(self, data_pre_lap):
        lap_filter = [-1 / 8] * 8
        lap_filter.insert(0, 1)  # center channel is channel 0
        lap_filter = np.asarray(lap_filter)
        temp = np.reshape(lap_filter, (1, 9))
        data_lap_filtered = np.dot(temp, data_pre_lap)
        return data_lap_filtered

    def apply_lap(self,
                  evoked_data,
                  caption,
                  lap_type='large',
                  tmin=-3,
                  fs=500):
        channel_names = evoked_data.ch_names
        if lap_type == 'large':
            large_lap_C3_chs = [
                channel_names.index('C3'),
                channel_names.index('T7'),
                channel_names.index('Cz'),
                channel_names.index('F3'),
                channel_names.index('P3')
            ]
            large_lap_Cz_chs = [
                channel_names.index('Cz'),
                channel_names.index('C3'),
                channel_names.index('C4'),
                channel_names.index('Fz'),
                channel_names.index('Pz')
            ]
            large_lap_C4_chs = [
                channel_names.index('C4'),
                channel_names.index('Cz'),
                channel_names.index('T8'),
                channel_names.index('F4'),
                channel_names.index('P4')
            ]
            large_lap_FC1_chs = [
                channel_names.index('FC1'),
                channel_names.index('F3'),
                channel_names.index('Fz'),
                channel_names.index('C3'),
                channel_names.index('Cz')
            ]
            large_lap_FC2_chs = [
                channel_names.index('FC2'),
                channel_names.index('Cz'),
                channel_names.index('Fz'),
                channel_names.index('F4'),
                channel_names.index('C4')
            ]

            C3_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_C3_chs, :])
            Cz_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_Cz_chs, :])
            C4_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_C4_chs, :])
            FC1_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_FC1_chs, :])
            FC2_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_FC2_chs, :])
            large_lap_evoked = np.r_[C3_large_lap_evoked, Cz_large_lap_evoked,
                                     C4_large_lap_evoked, FC1_large_lap_evoked,
                                     FC2_large_lap_evoked]
            info = mne.create_info(
                ch_names=['C3', 'Cz', 'C4', 'FC1', 'FC2'],
                sfreq=fs,
                ch_types=['eeg', 'eeg', 'eeg', 'eeg', 'eeg'])
            info.set_montage('standard_1020')
            self.large_lap_evoked = mne.EvokedArray(large_lap_evoked,
                                                    info=info,
                                                    tmin=tmin,
                                                    nave=evoked_data.nave)
        elif lap_type == 'mixed':
            large_lap_C3_chs = [
                channel_names.index('C3'),
                channel_names.index('T7'),
                channel_names.index('Cz'),
                channel_names.index('F3'),
                channel_names.index('P3')
            ]
            large_lap_C1_chs = [
                channel_names.index('C1'),
                channel_names.index('C3'),
                channel_names.index('Cz'),
                channel_names.index('FC1'),
                channel_names.index('CP1')
            ]
            large_lap_Cz_chs = [
                channel_names.index('Cz'),
                channel_names.index('C3'),
                channel_names.index('C4'),
                channel_names.index('Fz'),
                channel_names.index('Pz')
            ]
            large_lap_C2_chs = [
                channel_names.index('C2'),
                channel_names.index('C4'),
                channel_names.index('Cz'),
                channel_names.index('FC2'),
                channel_names.index('CP2')
            ]
            large_lap_C4_chs = [
                channel_names.index('C4'),
                channel_names.index('Cz'),
                channel_names.index('T8'),
                channel_names.index('F4'),
                channel_names.index('P4')
            ]
            large_lap_FC1_chs = [
                channel_names.index('FC1'),
                channel_names.index('F3'),
                channel_names.index('Fz'),
                channel_names.index('C3'),
                channel_names.index('Cz')
            ]
            large_lap_FC2_chs = [
                channel_names.index('FC2'),
                channel_names.index('Cz'),
                channel_names.index('Fz'),
                channel_names.index('F4'),
                channel_names.index('C4')
            ]

            C3_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_C3_chs, :])
            C1_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_C1_chs, :])
            Cz_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_Cz_chs, :])
            C2_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_C2_chs, :])
            C4_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_C4_chs, :])
            FC1_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_FC1_chs, :])
            FC2_large_lap_evoked = self.lap(
                evoked_data.copy().data[large_lap_FC2_chs, :])
            large_lap_evoked = np.r_[C3_large_lap_evoked, C1_large_lap_evoked,
                                     Cz_large_lap_evoked, C2_large_lap_evoked,
                                     C4_large_lap_evoked, FC1_large_lap_evoked,
                                     FC2_large_lap_evoked]
            info = mne.create_info(
                ch_names=['C3', 'C1', 'Cz', 'C2', 'C4', 'FC1', 'FC2'],
                sfreq=fs,
                ch_types=['eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg', 'eeg'])
            info.set_montage('standard_1020')
            self.large_lap_evoked = mne.EvokedArray(large_lap_evoked,
                                                    info=info,
                                                    tmin=tmin,
                                                    nave=evoked_data.nave)
        elif lap_type == 'large_Cz':
            channels = ['Cz', 'F3', 'F4', 'Fz', 'C3', 'C4', 'P3', 'P4', 'Pz']
            ch_idx = []
            for ch in channels:
                ch_idx.append(channel_names.index(ch))
            Cz_large_lap_evked = self.lap_Cz(
                evoked_data.copy().data[ch_idx, :])
            info = mne.create_info(ch_names=['Cz'], sfreq=fs, ch_types=['eeg'])
            self.large_lap_evoked = mne.EvokedArray(Cz_large_lap_evked,
                                                    info=info,
                                                    tmin=tmin,
                                                    nave=evoked_data.nave)

        return self.large_lap_evoked
#             fig = self.large_lap_evoked.plot_topo(show=False, title="C3 Cz C4 LAP {}".format(caption),
#                                                   ylim=dict(eeg=[-10, 10]))

#             if self.generate_report:
#                 self.report.add_figs_to_section(fig, captions='{} {} C3, Cz, C4 signal lap'.format(self.exp_name, caption),
#                                                 section='evoked')

    def plot_power_tfr(self,
                       epoch,
                       low_freq,
                       high_freq,
                       toi_min,
                       toi_max,
                       num_freq,
                       task_name,
                       mode,
                       baseline=(0, 2)):
        # define frequencies of interest (log-spaced)
        # freqs = np.logspace(*np.log10([low_freq, high_freq]), num=num_freq)

        freqs = np.linspace(low_freq, high_freq, num=num_freq)
        n_cycles = [5] * len(freqs)  # different number of cycle per frequency

        power, itc = tfr_morlet(epoch,
                                freqs=freqs,
                                n_cycles=n_cycles,
                                use_fft=True,
                                return_itc=True,
                                decim=3,
                                n_jobs=1)
        power.plot_topo(baseline=baseline,
                        mode=mode,
                        title="{} ERD".format(task_name),
                        show=False,
                        vmin=-0.9,
                        vmax=0.9)

        fig, axis = plt.subplots(1, 5, figsize=(1, 4))
        fig.subplots_adjust(top=0.9)
        fig.suptitle('{}'.format(self.exp_name),
                     size='xx-large',
                     weight='bold')
        # delta band
        power.plot_topomap(tmin=toi_min,
                           tmax=toi_max,
                           fmin=0,
                           fmax=4,
                           baseline=baseline,
                           mode=mode,
                           axes=axis[0],
                           title='delta',
                           vmin=-0.5,
                           vmax=0.5,
                           colorbar=False,
                           show=False)

        # theta band
        power.plot_topomap(tmin=toi_min,
                           tmax=toi_max,
                           fmin=4,
                           fmax=8,
                           baseline=baseline,
                           mode=mode,
                           axes=axis[1],
                           title='theta',
                           vmin=-0.5,
                           vmax=0.5,
                           colorbar=False,
                           show=False)

        # alpha band
        power.plot_topomap(tmin=toi_min,
                           tmax=toi_max,
                           fmin=8,
                           fmax=12,
                           baseline=baseline,
                           mode=mode,
                           axes=axis[2],
                           title='alpha',
                           vmin=-0.5,
                           vmax=0.5,
                           colorbar=False,
                           show=False)

        # beta band
        power.plot_topomap(tmin=toi_min,
                           tmax=toi_max,
                           fmin=12,
                           fmax=30,
                           baseline=baseline,
                           mode=mode,
                           axes=axis[3],
                           title='beta',
                           vmin=-0.5,
                           vmax=0.5,
                           colorbar=False,
                           show=False)
        # gamma band
        power.plot_topomap(tmin=toi_min,
                           tmax=toi_max,
                           fmin=30,
                           fmax=45,
                           baseline=baseline,
                           mode=mode,
                           axes=axis[4],
                           title='gamma',
                           vmin=-0.5,
                           vmax=0.5,
                           colorbar=False,
                           show=False)
        # plt.show()
        if self.generate_report:
            self.report.add_figs_to_section(
                fig, captions="{} ERD".format(task_name), section="bands ERD")
        return power

    def plot_power_band_temporal_ERD(self,
                                     epoch,
                                     low_freq,
                                     high_freq,
                                     toi_min,
                                     toi_max,
                                     num_freq,
                                     caption,
                                     task_name,
                                     mode,
                                     baseline=(0, 2)):
        # define frequencies of interest (log-spaced)
        freqs = np.linspace(low_freq, high_freq, num=num_freq)
        n_cycles = [5] * len(freqs)  # different number of cycle per frequency

        power, itc = tfr_morlet(epoch,
                                freqs=freqs,
                                n_cycles=n_cycles,
                                use_fft=True,
                                return_itc=True,
                                decim=3,
                                n_jobs=1)
        fig_tfr = power.plot_topo(baseline=baseline,
                                  mode=mode,
                                  title="{} {} ERD".format(task_name, caption),
                                  show=False,
                                  fig_facecolor='w',
                                  font_color='k',
                                  vmin=-0.5,
                                  vmax=0.5)
        fig_tfr.set_size_inches(10, 6)
        fig_tfr.savefig("{}\{}_tfr.png".format(Analyzer.TFR_folder,
                                               self.exp_name))

        counter_delta = 0
        counter_theta = 0
        counter_alpha = 0
        counter_beta = 0
        counter_gamma = 0

        fig_delta, axis_delta = plt.subplots(1, 6, figsize=(10, 6))
        fig_delta.suptitle('Delta {}'.format(self.exp_name),
                           size='xx-large',
                           weight='bold')
        fig_theta, axis_theta = plt.subplots(1, 6, figsize=(10, 6))
        fig_theta.suptitle('Theta {}'.format(self.exp_name),
                           size='xx-large',
                           weight='bold')
        fig_alpha, axis_alpha = plt.subplots(1, 6, figsize=(10, 6))
        fig_alpha.suptitle('Alpha {}'.format(self.exp_name),
                           size='xx-large',
                           weight='bold')
        fig_beta, axis_beta = plt.subplots(1, 6, figsize=(10, 6))
        fig_beta.suptitle('Beta {}'.format(self.exp_name),
                          size='xx-large',
                          weight='bold')
        fig_gamma, axis_gamma = plt.subplots(1, 6, figsize=(10, 6))
        fig_gamma.suptitle('Gamma {}'.format(self.exp_name),
                           size='xx-large',
                           weight='bold')
        while toi_max < 5:
            # delta band

            power.plot_topomap(tmin=toi_min,
                               tmax=toi_max,
                               fmin=0,
                               fmax=4,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(toi_min, toi_max),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=False,
                               show=False,
                               axes=axis_delta[counter_delta])
            counter_delta += 1

            # theta band

            power.plot_topomap(tmin=toi_min,
                               tmax=toi_max,
                               fmin=4,
                               fmax=8,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(toi_min, toi_max),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=False,
                               show=False,
                               axes=axis_theta[counter_theta])
            counter_theta += 1
            # alpha band

            power.plot_topomap(tmin=toi_min,
                               tmax=toi_max,
                               fmin=8,
                               fmax=12,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(toi_min, toi_max),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=False,
                               show=False,
                               axes=axis_alpha[counter_alpha])
            counter_alpha += 1

            # beta band

            power.plot_topomap(tmin=toi_min,
                               tmax=toi_max,
                               fmin=12,
                               fmax=30,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(toi_min, toi_max),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=False,
                               show=False,
                               axes=axis_beta[counter_beta])
            counter_beta += 1

            # gamma band

            power.plot_topomap(tmin=toi_min,
                               tmax=toi_max,
                               fmin=30,
                               fmax=45,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(toi_min, toi_max),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=False,
                               show=False,
                               axes=axis_gamma[counter_gamma])
            counter_gamma += 1

            toi_min += 1
            toi_max += 1
        # plt.show()

        if self.generate_report:
            self.report.add_figs_to_section(
                fig_tfr,
                captions="{} ERD".format(task_name),
                section='bands ERD')
            self.report.add_figs_to_section(fig_delta,
                                            captions='Delta {}'.format(
                                                self.exp_name),
                                            section="bands ERD")
            self.report.add_figs_to_section(fig_theta,
                                            captions='Theta {}'.format(
                                                self.exp_name),
                                            section="bands ERD")
            self.report.add_figs_to_section(fig_alpha,
                                            captions='Alpha {}'.format(
                                                self.exp_name),
                                            section="bands ERD")
            self.report.add_figs_to_section(fig_beta,
                                            captions='Beta {}'.format(
                                                self.exp_name),
                                            section="bands ERD")
            self.report.add_figs_to_section(fig_gamma,
                                            captions='Gamma {}'.format(
                                                self.exp_name),
                                            section="bands ERD")
        fig_delta.savefig("{}\{}_delta.png".format(
            Analyzer.temporal_SMR_different_bands_folder, self.exp_name))
        fig_theta.savefig("{}\{}_theta.png".format(
            Analyzer.temporal_SMR_different_bands_folder, self.exp_name))
        fig_alpha.savefig("{}\{}_alpha.png".format(
            Analyzer.temporal_SMR_different_bands_folder, self.exp_name))
        fig_beta.savefig("{}\{}_beta.png".format(
            Analyzer.temporal_SMR_different_bands_folder, self.exp_name))
        fig_gamma.savefig("{}\{}_gamma.png".format(
            Analyzer.temporal_SMR_different_bands_folder, self.exp_name))

    def plot_power_topomap(self,
                           power,
                           task_name,
                           mode,
                           baseline,
                           toi_min=-3,
                           toi_max=5,
                           colorbar=False):
        counter_alpha = 0
        counter_beta = 0

        fig_alpha, axis_alpha = plt.subplots(1,
                                             int(toi_max - toi_min),
                                             figsize=(10, 6))
        fig_alpha.suptitle('Alpha {}'.format(task_name),
                           size='xx-large',
                           weight='bold')
        fig_beta, axis_beta = plt.subplots(1,
                                           int(toi_max - toi_min),
                                           figsize=(10, 6))
        fig_beta.suptitle('Beta {}'.format(task_name),
                          size='xx-large',
                          weight='bold')
        t_down = toi_min
        t_up = t_down + 1
        while t_up <= toi_max:
            # alpha band

            power.plot_topomap(tmin=t_down,
                               tmax=t_up,
                               fmin=8,
                               fmax=12,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(t_down, t_up),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=colorbar,
                               show=False,
                               axes=axis_alpha[counter_alpha])
            counter_alpha += 1

            # beta band

            power.plot_topomap(tmin=t_down,
                               tmax=t_up,
                               fmin=12,
                               fmax=30,
                               baseline=baseline,
                               mode=mode,
                               title='{}s~{}s'.format(t_down, t_up),
                               vmin=-0.5,
                               vmax=0.5,
                               colorbar=colorbar,
                               show=False,
                               axes=axis_beta[counter_beta])
            counter_beta += 1

            t_down += 1
            t_up += 1

    def plot_psd_topomap(self, epochs, vmin, vmax, toi_min=-3, toi_max=5):
        t_down = toi_min
        t_up = t_down + 1
        while t_up <= toi_max:
            epochs.plot_psd_topomap(bands=[(8, 12, 'Alpha'), (12, 30, 'Beta')],
                                    ch_type='eeg',
                                    normalize=True,
                                    tmin=t_down,
                                    tmax=t_up,
                                    vmin=vmin,
                                    vmax=vmax)
            t_down += 1
            t_up += 1

    def save_report(self, mode, Ref):
        if mode == 'MRCP':
            if Ref is not None:
                self.report.save('{}\{}_MRCP_{}_{}_{}.html'.format(
                    Analyzer.report_folder, self.exp_name, self.low_freq,
                    self.hi_freq, Ref))
            else:
                self.report.save('{}\{}_MRCP_{}_{}.html'.format(
                    Analyzer.report_folder, self.exp_name, self.low_freq,
                    self.hi_freq))
        elif mode == 'SMR':
            if Ref is not None:
                self.report.save('{}\{}_SMR_{}_{}_{}.html'.format(
                    Analyzer.report_folder, self.exp_name, Ref, self.low_freq,
                    self.hi_freq))
            else:
                self.report.save('{}\{}_SMR_{}_{}.html'.format(
                    Analyzer.report_folder, self.exp_name, self.low_freq,
                    self.hi_freq))
Example #11
0
def main():
    args = parser.parse_args()
    log = logger(args)
    log.write('V' * 50 + " configs " + 'V' * 50 + '\n')
    log.write(args)
    log.write('')
    log.write('Λ' * 50 + " configs " + 'Λ' * 50 + '\n')

    # load data
    input_size = (224, 224)
    dataset = DataLoader(args, input_size)
    train_data, val_data = dataset.load_data()

    num_classes = dataset.num_classes
    classes = dataset.classes
    log.write('\n\n')
    log.write('V' * 50 + " data " + 'V' * 50 + '\n')
    log.info('success load data.')
    log.info('num classes: %s' % num_classes)
    log.info('classes: ' + str(classes) + '\n')
    log.write('Λ' * 50 + " data " + 'Λ' * 50 + '\n')

    # Random seed
    if args.manual_seed is None:
        args.manual_seed = random.randint(1, 10000)
    random.seed(args.manual_seed)
    torch.manual_seed(args.manual_seed)
    np.random.seed(args.manual_seed)
    log.write('random seed is %s' % args.manual_seed)

    # pretrained or not
    log.write('\n\n')
    log.write('V' * 50 + " model " + 'V' * 50 + '\n')
    if args.pretrained:
        log.info("using pre-trained model")
    else:
        log.info("creating model from initial")

    # model
    log.info('using model: %s' % args.arch)
    log.write('')
    log.write('Λ' * 50 + " model " + 'Λ' * 50 + '\n')

    # resume model
    if args.resume:
        log.info('using resume model: %s' % args.resume)
        states = torch.load(args.resume)
        model = states['model']
        model.load_state_dict(states['state_dict'])
    else:
        log.info('not using resume model')
        if args.arch.startswith('dla'):
            model = eval(args.arch)(args.pretrained, num_classes)

        elif args.arch.startswith('efficientnet'):
            if args.pretrained:
                model = EfficientNet.from_pretrained(args.arch,
                                                     num_classes=num_classes)
            else:
                model = EfficientNet.from_name(args.arch,
                                               num_classes=num_classes)
        else:
            model = make_model(model_name=args.arch,
                               num_classes=num_classes,
                               pretrained=args.pretrained,
                               pool=nn.AdaptiveAvgPool2d(output_size=1),
                               classifier_factory=None,
                               input_size=input_size,
                               original_model_state_dict=None,
                               catch_output_size_exception=True)

    # cuda
    have_cuda = torch.cuda.is_available()
    use_cuda = args.use_gpu and have_cuda
    log.info('using cuda: %s' % use_cuda)
    if have_cuda and not use_cuda:
        log.info(
            '\nWARNING: found gpu but not use, you can switch it on by: -ug or --use-gpu\n'
        )

    multi_gpus = False
    if use_cuda:
        torch.backends.cudnn.benchmark = True
        if args.multi_gpus:
            gpus = torch.cuda.device_count()
            multi_gpus = gpus > 1

    if multi_gpus:
        log.info('using multi gpus, found %d gpus.' % gpus)
        model = torch.nn.DataParallel(model).cuda()
    elif use_cuda:
        model = model.cuda()

    # criterian
    log.write('\n\n')
    log.write('V' * 50 + " criterion " + 'V' * 50 + '\n')
    if args.label_smoothing > 0 and args.mixup == 1:
        criterion = CrossEntropyWithLabelSmoothing()
        log.info('using label smoothing criterion')

    elif args.label_smoothing > 0 and args.mixup < 1:
        criterion = CrossEntropyWithMixup()
        log.info('using label smoothing and mixup criterion')

    elif args.mixup < 1 and not args.label_smoothing == 0:
        criterion = CrossEntropyWithMixup()
        log.info('using mixup criterion')

    else:
        criterion = nn.CrossEntropyLoss()
        log.info('using normal cross entropy criterion')

    if use_cuda:
        criterion = criterion.cuda()

    log.write('using criterion: %s' % str(criterion))
    log.write('')
    log.write('Λ' * 50 + " criterion " + 'Λ' * 50 + '\n')
    # optimizer
    log.write('\n\n')
    log.write('V' * 50 + " optimizer " + 'V' * 50 + '\n')
    if args.linear_scaling:
        args.lr = 0.1 * args.train_batch / 256
    log.write('initial lr: %4f\n' % args.lr)
    if args.no_bias_decay:
        log.info('using no bias weight decay')
        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [{
            'params': [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            'weight_decay':
            args.weight_decay
        }, {
            'params':
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        }]
        optimizer = optim.SGD(optimizer_grouped_parameters,
                              lr=args.lr,
                              momentum=args.momentum)

    else:
        log.info('using bias weight decay')
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)

    if args.resume:
        optimizer.load_state_dict(states['optimizer'])
    log.write('using optimizer: %s' % str(optimizer))
    log.write('')
    log.write('Λ' * 50 + " optimizer " + 'Λ' * 50 + '\n')
    # low precision
    use_low_precision_training = args.low_precision_training
    if use_low_precision_training:
        from apex import amp
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1')

    # lr scheduler
    iters_per_epoch = int(np.ceil(len(train_data) / args.train_batch))
    total_iters = iters_per_epoch * args.epochs
    log.write('\n\n')
    log.write('V' * 50 + " lr_scheduler " + 'V' * 50 + '\n')
    if args.warmup:
        log.info('using warmup scheduler, warmup epochs: %d' %
                 args.warmup_epochs)
        scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer, iters_per_epoch * args.warmup_epochs, eta_min=1e-6)
    elif args.cosine:
        log.info('using cosine lr scheduler')
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                         T_max=total_iters)

    else:
        log.info('using normal lr decay scheduler')
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         factor=0.5,
                                                         patience=10,
                                                         min_lr=1e-6,
                                                         mode='min')

    log.write('using lr scheduler: %s' % str(scheduler))
    log.write('')
    log.write('Λ' * 50 + " lr_scheduler " + 'Λ' * 50 + '\n')

    log.write('\n\n')
    log.write('V' * 50 + " training start " + 'V' * 50 + '\n')
    best_acc = 0
    start = time.time()
    log.info('\nstart training ...')
    for epoch in range(1, args.epochs + 1):
        lr = optimizer.param_groups[-1]['lr']
        train_loss, train_acc = train_one_epoch(
            log, scheduler, train_data, model, criterion, optimizer, use_cuda,
            use_low_precision_training, args.label_smoothing, args.mixup)
        test_loss, test_acc = val_one_epoch(log, val_data, model, criterion,
                                            use_cuda)
        end = time.time()
        log.info(
            'epoch: [%d / %d], time spent(s): %.2f, mean time: %.2f, lr: %.4f, train loss: %.4f, train acc: %.4f, '
            'test loss: %.4f, test acc: %.4f' %
            (epoch, args.epochs, end - start, (end - start) / epoch, lr,
             train_loss, train_acc, test_loss, test_acc))
        states = dict()
        states['arch'] = args.arch
        if multi_gpus:
            states['model'] = model.module
            states['state_dict'] = model.module.state_dict()
        else:
            states['model'] = model
            states['state_dict'] = model.state_dict()
        states['optimizer'] = optimizer.state_dict()
        states['test_acc'] = test_acc
        states['train_acc'] = train_acc
        states['epoch'] = epoch
        states['classes'] = classes
        is_best = False
        if test_acc > best_acc:
            is_best = True
            log.save_checkpoint(states, is_best)
        else:
            log.save_checkpoint(states, is_best)

    log.write('\ntraining finished.')
    log.write('Λ' * 50 + " training finished " + 'Λ' * 50 + '\n')
    log.log_file.close()
    log.writer.close()