"0,1", data_device_id="cuda:0"
)  #0, 1, 2, 3, IMPORTANT: data_device_id is set to free gpu for storing the model, e.g."cuda:1"
multi_gpu = [0, 1]  #use 2 gpus

#SEED = 1234#5678#4567#3456#2345#1234
debug = False  # if True, load 100 samples, False
IMG_SIZE = (256, 1600)
BATCH_SIZE = 32
NUM_WORKERS = 24
warm_start, last_checkpoint_path = False, 'checkpoint/%s_%s_v1_seed%s/best.pth.tar' % (
    MODEL, IMG_SIZE, SEED)
checkpoint_path = '../checkpoint/nonzero_classifier_%s_%dx%d_v4_seed%s' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)
LOG_PATH = '../logging/nonzero_classifier_%s_%dx%d_v4_seed%s.log' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)  #
#torch.cuda.manual_seed_all(SEED)

NUM_EPOCHS = 100
early_stopping_round = 10  #500#50
LearningRate = 0.02  #0.02#0.002

######### Load data #########
train_dl, val_dl = prepare_trainset(BATCH_SIZE, NUM_WORKERS, SEED, IMG_SIZE,
                                    debug)

######### Run the training process #########
run_check_net(train_dl, val_dl, multi_gpu=multi_gpu)

print('------------------------\nComplete SEED=%d\n------------------------' %
      SEED)
예제 #2
0
BATCH_SIZE = 16
NUM_WORKERS = 24
warm_start, last_checkpoint_path = False, '../checkpoint/nonzero_classifier_efficientnet-b5_512x768_v4_seed2024/best.pth.tar'
checkpoint_path = '../checkpoint/nonzero_classifier_%s_%dx%d_v4_seed%s' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)
LOG_PATH = '../logging/nonzero_classifier_%s_%dx%d_v4_seed%s.log' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)  #
#torch.cuda.manual_seed_all(SEED)

NUM_EPOCHS = 30  #25
LearningRate = 0.01
MIN_LR = 0.001
FREEZE = False  #True #only train final fc layer for first few epochs

early_stopping_round = 500  #500#50

######### Load data #########
train_dl, val_dl = prepare_trainset(BATCH_SIZE,
                                    NUM_WORKERS,
                                    SEED,
                                    IMG_SIZE,
                                    debug,
                                    nonempty_only=False,
                                    crop=False)  #crop=False

######### Run the training process #########
run_check_net(train_dl, val_dl, multi_gpu=multi_gpu)

print('------------------------\nComplete SEED=%d\n------------------------' %
      SEED)
예제 #3
0
BATCH_SIZE = 16  #8 #16
NUM_WORKERS = 24
warm_start, last_checkpoint_path = False, '../checkpoint/deeplabv3plus_%s_%dx%d_v2_seed%s/last.pth.tar' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)
checkpoint_path = '../checkpoint/CSAILVision_%s_%dx%d_v3_seed%s' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)
LOG_PATH = '../logging/CSAILVision_%s_%dx%d_v3_seed%s.log' % (
    MODEL, IMG_SIZE[0], IMG_SIZE[1], SEED)
#torch.cuda.manual_seed_all(SEED)

NUM_EPOCHS = 50
early_stopping_round = 10  #10#500#50
LearningRate = 0.02

######### Load data #########
train_dl, val_dl = prepare_trainset(
    BATCH_SIZE,
    NUM_WORKERS,
    SEED,
    IMG_SIZE,
    debug,
    nonempty_only=False,
    crop=False,
    output_shape=None)  #True: Only using nonempty-mask!

######### Run the training process #########
run_check_net(train_dl, val_dl, multi_gpu=multi_gpu, nonempty_only_loss=False)

print('------------------------\nComplete SEED=%d\n------------------------' %
      SEED)