def exp(args, fold_idx, train_set, valid_set, test_set):

    path = args.save_root + args.result_dir
    if not os.path.isdir(path):
        os.makedirs(path)
        os.makedirs(path + '/models')
        os.makedirs(path + '/logs')

    logger = eegdg_logger(path + f'/logs/{fold_idx}')

    with open(path + '/args.txt', 'w') as f:
        f.write(str(args))

    import torch.cuda
    cuda = torch.cuda.is_available()
    # check if GPU is available, if True chooses to use it
    device = 'cuda' if cuda else 'cpu'

    if cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    seed = args.seed
    random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    valid_loader = torch.utils.data.DataLoader(valid_set,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.batch_size,
                                              shuffle=False)

    model = models.get_model(args)
    # model = FcClfNet(embedding_net)
    # model  = torch.nn.DataParallel(model)

    mb_params = utils.param_size(model)
    print(f"Model size = {mb_params:.4f} MB")
    if cuda:
        model.cuda(device=device)
    print(model)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                weight_decay=1e-4,
                                momentum=0.9)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           T_max=args.epochs -
                                                           1)

    results_columns = [
        f'valid_loss', f'test_loss', f'valid_accuracy', f'test_accuracy'
    ]
    df = pd.DataFrame(columns=results_columns)

    valid_acc = 0
    valid_min_loss = 100
    best_acc = 0
    best_acc_loss = 0
    max_acc = 0

    n_epochs_stop = 200
    epochs_no_improve = 0
    early_stop = False

    for epochidx in range(1, args.epochs):
        print(epochidx)
        start = time.time()
        train(10, model, device, train_loader, optimizer, scheduler, cuda,
              args.gpuidx)
        print(f'total time: {time.time()-start}')
        utils.blockPrint()
        train_loss, train_score = eval(model, device, train_loader)
        valid_loss, valid_score = eval(model, device, valid_loader)
        test_loss, test_score = eval(model, device, test_loader)
        utils.enablePrint()

        scheduler.step()
        lr = scheduler.get_last_lr()[0]

        print(f'LR : {lr}')
        logger.log_training(train_loss, train_score, test_loss, test_score, lr,
                            epochidx)

        results = {
            f'valid_loss': valid_loss,
            f'test_loss': test_loss,
            f'valid_accuracy': valid_score,
            f'test_accuracy': test_score
        }
        df = df.append(results, ignore_index=True)
        print(results)

        if valid_score > valid_acc:
            valid_acc = valid_score
            best_acc = test_score
            torch.save(
                model.state_dict(),
                os.path.join(path, 'models', f"model_fold{fold_idx}_best.pt"))
            best_epoch = epochidx

        if valid_loss < valid_min_loss:  #모델이 개선된경우
            valid_min_loss = valid_loss
            best_acc_loss = test_score
            torch.save(
                model.state_dict(),
                os.path.join(path, 'models',
                             f"model_fold{fold_idx}_best(loss).pt"))
            best_loss_epoch = epochidx
            epochs_no_improve = 0
        else:
            epochs_no_improve += 1

        if test_score > max_acc:
            max_acc = test_score
            torch.save(
                model.state_dict(),
                os.path.join(path, 'models', f"model_fold{fold_idx}_max.pt"))
            max_epoch = epochidx

        print(f'current best acc : {best_acc:.4f} at epoch {best_epoch}')
        print(
            f'current best(loss) acc : {best_acc_loss:.4f} at epoch {best_loss_epoch}'
        )
        print(f'current max acc : {max_acc:.4f} at epoch {max_epoch}')

        if epochidx > 5 and epochs_no_improve == n_epochs_stop:
            print('Early stopping!')
            early_stop = True
            break
        else:
            continue

    if early_stop:
        print("Stopped")

    best_model = models.get_model(args)
    best_model.load_state_dict(
        torch.load(os.path.join(path, 'models',
                                f"model_fold{fold_idx}_best.pt"),
                   map_location=device))
    if cuda:
        best_model.cuda(device=device)

    print("best accuracy")
    _, _ = eval(best_model, device, test_loader)

    df = utils.get_testset_accuracy(best_model, device, test_set, args)

    return df
def exp(args, fold_idx, train_set, test_set):

    path = args.save_root + args.result_dir

    if not os.path.isdir(path):
        os.makedirs(path)
        os.makedirs(path + '/models')
        os.makedirs(path + '/logs')

    logger = eegdg_logger(path + f'/logs/{fold_idx}')

    with open(path + '/args.txt', 'w') as f:
        f.write(str(args))

    import torch.cuda
    cuda = torch.cuda.is_available()
    # check if GPU is available, if True chooses to use it
    device = 'cuda' if cuda else 'cpu'

    if cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    seed = args.seed
    random.seed(seed)
    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.batch_size,
                                              shuffle=False)

    model = models.get_model(args)
    # model = FcClfNet(embedding_net)
    # model  = torch.nn.DataParallel(model)

    mb_params = utils.param_size(model)
    print(f"Model size = {mb_params:.4f} MB")
    if cuda:
        model.cuda(device=device)
    print(model)
    optimizer = torch.optim.RMSprop(model.parameters(),
                                    lr=args.lr,
                                    weight_decay=0.01)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           T_max=args.epochs -
                                                           1)

    results_columns = [
        f'valid_loss', f'test_loss', f'valid_accuracy', f'test_acc0.3uracy'
    ]
    df = pd.DataFrame(columns=results_columns)

    valid_acc = 0
    best_acc = 0
    max_acc = 0

    for epochidx in range(1, args.epochs):
        print(epochidx)
        start = time.time()
        train(10, model, device, train_loader, optimizer, scheduler, cuda,
              args.gpuidx)
        print(f'total time: {time.time()-start}')
        # utils.blockPrint()
        train_loss, train_score = eval(model, device, train_loader)
        test_loss, test_score = eval(model, device, test_loader)
        # utils.enablePrint()
        scheduler.step()
        lr = scheduler.get_last_lr()[0]
        #
        # lrs = []
        # for i in range(100):
        #     scheduler.step()
        #     lr = scheduler.get_last_lr()[0]
        #     lrs.append(lr)
        #
        # import matplotlib.pyplot as plt
        # plt.plot(lrs)
        # plt.show()

        print(f'LR : {lr}')
        logger.log_training(train_loss, train_score, test_loss, test_score, lr,
                            epochidx)

        if test_score >= max_acc:
            max_acc = test_score
            torch.save(
                model.state_dict(),
                os.path.join(path, 'models', f"model_fold{fold_idx}_max.pt"))
            max_epoch = epochidx

        torch.save(
            model.state_dict(),
            os.path.join(path, 'models', f"model_fold{fold_idx}_last.pt"))
        print(f'current max acc : {max_acc:.4f} at epoch {max_epoch}')

    best_model = models.get_model(args)
    best_model.load_state_dict(
        torch.load(os.path.join(path, 'models',
                                f"model_fold{fold_idx}_last.pt"),
                   map_location=device))
    if cuda:
        best_model.cuda(device=device)

    print("last accuracy")
    _, _ = eval(best_model, device, test_loader)

    df = utils.get_testset_accuracy(best_model, device, test_set, args)
    logger.close()
    return df