示例#1
0
def mtrain(t_x, t_y, ep=250, cycles=25, bs=15, coshot=True):
    if coshot:
        CosHot = CosHotRestart(nb_epochs=ep,
                               nb_cycles=cycles,
                               gain=1.1,
                               verbose=0)
        model.fit(t_x,
                  t_y,
                  epochs=ep,
                  verbose=2,
                  batch_size=bs,
                  callbacks=[CosHot])  # )#
    else:
        SGDR = SGDRScheduler(min_lr=1e-6,
                             max_lr=1e-2,
                             steps_per_epoch=np.ceil(len(t_x) / 15),
                             lr_decay=0.9,
                             cycle_length=5,
                             mult_factor=1.2)
        model.fit(t_x,
                  t_y,
                  epochs=ep,
                  verbose=2,
                  batch_size=bs,
                  callbacks=[SGDR])
示例#2
0
def main():
    args = parser.parse_args()

    step = 0
    exp_name = f'{args.name}_{hp.max_lr}_{hp.cycle_length}'

    dataset = LJSpeechDataset(path=args.data,
                              text_transforms=text_to_sequence,
                              audio_transforms=wav_to_spectrogram,
                              cache=False)

    model = MelSpectrogramNet()
    if args.checkpoint:
        weights = torch.load(args.checkpoint)
        model.load_state_dict(weights)
        step = int(args.checkpoint.split('/')[-1].split('_')[-1].split('.')[0])
        exp_name = "_".join(args.checkpoint.split('/')[-1].split('_')[:-1])
    model.cuda(device=0)
    optimizer = Adam(model.parameters(),
                     lr=hp.max_lr,
                     weight_decay=hp.weight_decay,
                     betas=(0.9, 0.999),
                     eps=1e-6)
    if args.find_lr:
        scheduler = LRFinderScheduler(optimizer)
    else:
        scheduler = SGDRScheduler(optimizer,
                                  min_lr=hp.min_lr,
                                  max_lr=hp.max_lr,
                                  cycle_length=hp.cycle_length,
                                  current_step=step)

    train(model,
          optimizer,
          scheduler,
          dataset,
          args.epochs,
          args.batch_size,
          save_interval=50,
          exp_name=exp_name,
          device=0,
          step=step)
示例#3
0
def main():
    args = parser.parse_args()
    step = 0
    exp_name = f'{args.name}_{hp.max_lr}_{hp.cycle_length}'

    transforms = segtrans.JointCompose([segtrans.Resize(400),
                                        segtrans.RandomRotate(0, 90),
                                        segtrans.RandomCrop(256, 256),
                                        segtrans.ToTensor(),
                                        segtrans.Normalize(mean=hp.mean,
                                                           std=hp.std)])

    val_transforms = segtrans.JointCompose([segtrans.PadToFactor(),
                                            segtrans.ToTensor(),
                                            segtrans.Normalize(mean=hp.mean,
                                                               std=hp.std)])

    train_dataset = DSBDataset(f'{args.data}/train', transforms=transforms)
    val_dataset = DSBDataset(f'{args.data}/val', transforms=val_transforms)

    model = Unet()

    if args.checkpoint:
        checkpoint = torch.load(args.checkpoint)
        model.load_state_dict(checkpoint['state'])
        step = checkpoint['step']
        exp_name = checkpoint['exp_name']

    optimizer = Adam(model.parameters(), lr=hp.max_lr)

    if args.find_lr:
        scheduler = LRFinderScheduler(optimizer)
    else:
        scheduler = SGDRScheduler(optimizer, min_lr=hp.min_lr,
                                  max_lr=hp.max_lr, cycle_length=hp.cycle_length, current_step=step)

    model.cuda(device=args.device)
    train(model, optimizer, scheduler, train_dataset, val_dataset,
          n_epochs=args.epochs, batch_size=args.batch_size,
          exp_name=exp_name, device=args.device, step=step)
示例#4
0
def main():
    ROOT = '/home/austin/data/dsb/train'

    transforms = segtrans.JointCompose([
        segtrans.Resize(300),
        segtrans.RandomCrop(256, 256),
        segtrans.ToTensor()
    ],
                                       instance_masks=True)

    anchor_helper = AnchorHelper(areas=(16, 32, 64, 128, 256),
                                 positive_overlap=0.5,
                                 negative_overlap=0.4)

    dataset = DSBDataset(ROOT,
                         transforms,
                         merge_masks=False,
                         anchor_helper=anchor_helper)

    model = RetinaNet(num_classes=2)
    model.cuda()
    optimizer = Adam(model.parameters(), lr=1e-5)
    focal_loss = FocalLoss(gamma=2, alpha=1e3, ignore_index=-1)
    scheduler = SGDRScheduler(optimizer,
                              min_lr=1e-7,
                              max_lr=1e-6,
                              cycle_length=400,
                              current_step=0)
    train(model,
          optimizer,
          scheduler,
          focal_loss,
          dataset,
          n_epochs=20,
          batch_size=12,
          exp_name='retinacat')
示例#5
0
def main():
    for run in range(1):

        batch_size = 64
        input_dims = 3
        sequence_length = 10
        cell_size = 2
        encoded_cell_size = 1

        # Learning Rates
        max_rate = 0.18
        min_rate = 0.04
        steps_per_cycle = 20000
        warmup = 100

        # init Tensorboard
        writer = SummaryWriterWithGlobal(comment="DSARNN run " + str(run))

        # grab data
        train, valid = SetGenerator(sequence_length, time_steps=8000, target_index=0)\
            .train_valid(percent_as_float=0.05, batch_size=batch_size)

        # setup model
        model = DualAttentionSeq2Seq(input_dims, sequence_length, cell_size,
                                     encoded_cell_size)
        model.registerHooks(writer)

        criterion = torch.nn.MSELoss()
        optimiser = torch.optim.SGD(model.parameters(), lr=max_rate)
        scheduler = SGDRScheduler(optimiser, min_rate, max_rate,
                                  steps_per_cycle, warmup, 0)

        # around 20 - 40 k epochs for training
        for epoch in tqdm(range(1)):

            for minibatch in train:
                input = Variable(minibatch[0])
                target = Variable(minibatch[1])
                optimiser.zero_grad()
                output = model(input)
                loss = criterion(output, target)
                loss.backward()
                optimiser.step()
                scheduler.step()
                writer.step()
                writer.add_scalar('loss/training loss', loss,
                                  writer.global_step)
                writer.add_scalar('loss/learning rate',
                                  monitors.get_learning_rate(optimiser),
                                  writer.global_step)

            for minibatch in valid:
                input = Variable(minibatch[0])
                target = Variable(minibatch[1])
                output = model(input)
                loss = criterion(output, target)
                writer.step()
                writer.add_scalar('loss/test loss', loss, writer.global_step)

            if epoch % 200 == 0:
                plotResult(model, valid, writer)
示例#6
0
from torch.optim import Adam

import hyperparams as hp
from datasets import load_question_dataset
from models import Encoder, Decoder, Seq2Seq
from sgdr import SGDRScheduler
from utils import train

train_iter, val_iter, test_iter, inp_lang, opt_lang = load_question_dataset(batch_size=hp.batch_size, dataset=hp.dataset, device=hp.device)

encoder = Encoder(source_vocab_size=len(inp_lang.vocab),
                  embed_dim=hp.embed_dim, hidden_dim=hp.hidden_dim,
                  n_layers=hp.n_layers, dropout=hp.dropout)
decoder = Decoder(target_vocab_size=len(opt_lang.vocab),
                  embed_dim=hp.embed_dim, hidden_dim=hp.hidden_dim,
                  n_layers=hp.n_layers, dropout=hp.dropout)
seq2seq = Seq2Seq(encoder, decoder)

seq2seq.to(hp.device)
optimizer = Adam(seq2seq.parameters(), lr=hp.max_lr)
scheduler = SGDRScheduler(optimizer, max_lr=hp.max_lr, cycle_length=hp.cycle_length)

train(seq2seq, optimizer, scheduler, train_iter, val_iter, num_epochs=hp.num_epochs)
示例#7
0
    @staticmethod
    def build_resnet_34(input_shape, num_outputs,weights=None):
        return ResnetBuilder.build(input_shape, num_outputs, basic_block, [3, 4, 6, 3],pretrained_weights=weights)

    @staticmethod
    def build_resnet_50(input_shape, num_outputs,weights=None):
        return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 6, 3],pretrained_weights=weights)

    @staticmethod
    def build_resnet_101(input_shape, num_outputs,weights=None):
        return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 4, 23, 3],pretrained_weights=weights)

    @staticmethod
    def build_resnet_152(input_shape, num_outputs,weights=None):
        return ResnetBuilder.build(input_shape, num_outputs, bottleneck, [3, 8, 36, 3],pretrained_weights=weights)    
#datagen = ImageDataGenerator(
#    featurewise_center=True,
#    featurewise_std_normalization=True,
#    rotation_range=20,
#    width_shift_range=0.2,
#    height_shift_range=0.2,
#    horizontal_flip=True)
#datagen.fit(x_train)
model=ResnetBuilder.build_resnet_18(input_shape=(256,256,3),num_outputs=1,weights="resnet18/fulltrain.56-0.01-0.0308.hdf5")
schedule = SGDRScheduler(min_lr=1e-6,max_lr=1e-2,steps_per_epoch=np.ceil(100/4),lr_decay=0.9,cycle_length=5, mult_factor=1.5)
#early_stopping = EarlyStopping(monitor='val_loss',patience=30, verbose=1)
#tensor=TensorBoard(log_dir='logs', histogram_freq=1, batch_size=4, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',factor=0.3, patience=10, min_lr=1e-7, verbose=1)
model_checkpoint = ModelCheckpoint('resnet18/fulltrain.{epoch:02d}-{loss:.2f}-{val_loss:.4f}.hdf5', monitor='val_loss',verbose=1, save_best_only=True)
history=model.fit(x_train,y_train,batch_size=4,epochs=100,verbose=1,callbacks=[model_checkpoint,reduce_lr],validation_data=(x_val,y_val))
#model.fit_generator(x_train, y_train, batch_size=4,validation_data=(x_val,y_val),steps_per_epoch=len(x_train) / 4,validation_steps=len(x_val)/4, epochs=100,callbacks=[model_checkpoint,reduce_lr],verbose=1)        
示例#8
0
def main():
    if not os.path.exists("checkpoints/"):
        os.makedirs("checkpoints/")
        print("Created a 'checkpoints' folder to save/load the model")

    args = parser.parse_args()
    MAKE_DATA = False
    step = 0
    exp_name = f'{args.name}_{hp.max_lr}_{hp.cycle_length}'

    if MAKE_DATA:
        print("Starting data generation")
        data_gen = make_data()
        data_gen.make_text_data()
        print("Generated text data")
        data_gen.make_audio_data()
        print("Generated audio data")

    dataset = VCTKSets()
    model = MelSpectrogramNet()

    if args.checkpoint:
        weights = torch.load(args.checkpoint)
        model.load_state_dict(weights)
        step = int(args.checkpoint.split('/')[-1].split('_')[-1].split('.')[0])
        exp_name = "_".join(args.checkpoint.split('/')[-1].split('_')[:-1])

    model.cuda(device=0)
    optimizer = Adam(model.parameters(),
                     lr=hp.max_lr,
                     weight_decay=hp.weight_decay,
                     betas=(0.9, 0.999),
                     eps=1e-6)

    if args.find_lr:
        scheduler = LRFinderScheduler(optimizer)
    else:
        scheduler = SGDRScheduler(optimizer,
                                  min_lr=hp.min_lr,
                                  max_lr=hp.max_lr,
                                  cycle_length=hp.cycle_length,
                                  current_step=step)

    if args.mode == 'gen':
        generate(model,
                 dataset,
                 batch_size=args.batch_size,
                 save_interval=50,
                 exp_name=exp_name,
                 device=10,
                 step=step)
    else:
        train(model,
              optimizer,
              scheduler,
              dataset,
              args.epochs,
              args.batch_size,
              save_interval=50,
              exp_name=exp_name,
              device=0,
              step=step)
示例#9
0
callback section
"""

# file_running = "4"

now = datetime.now()
# logdir = "tf_logs/.../" + now.strftime("%Y%m%d-%H%M%S") + "/"
logdir="./logs/scalars_" + file_running + "/"
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir, histogram_freq=0, write_graph=True)
patience = 20
min_delta = 0.0001
early_stopping = keras.callbacks.EarlyStopping(monitor='val_mean_squared_error', min_delta=min_delta, patience=patience, verbose=1, mode='min', baseline=None, restore_best_weights=True)

warm_restart_lr = SGDRScheduler(min_lr=1e-5,
                 max_lr=1e-4,
                 steps_per_epoch=640,
                 lr_decay=0.9,
                 cycle_length=5,
                 mult_factor=1.5)

patience = 7
min_delta = 0.0001
factor = 0.8
reduce_lr_on_plateau = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=factor, patience=patience, verbose=1, mode='auto', min_delta=min_delta, cooldown=0, min_lr=5e-6)
lr_decay = 1e-2
def schedule_lr(epochs_index, current_lr ):
    return current_lr

schedule_lr_callback = keras.callbacks.LearningRateScheduler(schedule_lr, verbose=1)

############################
示例#10
0
def main(batch_size: int = 24,
         epochs: int = 384,
         train_path: str = 'train',
         val_path: str = 'val',
         multi_gpu_weights=None,
         weights=None,
         workers: int = 8,
         find_lr: bool = False):

    keras_model = MobileDetectNetModel.complete_model()
    keras_model.summary()

    if weights is not None:
        keras_model.load_weights(weights, by_name=True)

    train_seq = MobileDetectNetSequence(train_path,
                                        stage="train",
                                        batch_size=batch_size)
    val_seq = MobileDetectNetSequence(val_path,
                                      stage="val",
                                      batch_size=batch_size)

    keras_model = keras.utils.multi_gpu_model(keras_model,
                                              gpus=[0, 1],
                                              cpu_merge=True,
                                              cpu_relocation=False)
    if multi_gpu_weights is not None:
        keras_model.load_weights(multi_gpu_weights, by_name=True)

    callbacks = []

    def region_loss(classes):
        def loss_fn(y_true, y_pred):
            # Don't penalize bounding box errors when there is no object present
            return 10 * classes * K.abs(y_pred - y_true)

        return loss_fn

    keras_model.compile(optimizer=SGD(),
                        loss=[
                            'mean_absolute_error',
                            region_loss(
                                keras_model.get_layer('classes').output),
                            'binary_crossentropy'
                        ])

    if find_lr:
        from lr_finder import LRFinder
        lr_finder = LRFinder(keras_model)
        lr_finder.find_generator(train_seq,
                                 start_lr=0.000001,
                                 end_lr=1,
                                 epochs=5)
        lr_finder.plot_loss()
        return

    filepath = "weights-{epoch:02d}-{val_loss:.4f}-multi-gpu.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')
    callbacks.append(checkpoint)

    sgdr_sched = SGDRScheduler(0.00001,
                               0.01,
                               steps_per_epoch=np.ceil(
                                   len(train_seq) / batch_size),
                               mult_factor=1.5)
    callbacks.append(sgdr_sched)

    keras_model.fit_generator(
        train_seq,
        validation_data=val_seq,
        epochs=epochs,
        steps_per_epoch=np.ceil(len(train_seq) / batch_size),
        validation_steps=np.ceil(len(val_seq) / batch_size),
        callbacks=callbacks,
        use_multiprocessing=True,
        workers=workers,
        shuffle=True)
示例#11
0
    model = UNet(H, W)
    metrics = [dice_coef, iou, MeanIoU(num_classes=2), Recall(), Precision()]
    model.compile(loss=dice_loss, optimizer=Adam(lr), metrics=metrics)
    model.summary()

    callbacks = [
        ModelCheckpoint(model_path, verbose=1, save_best_only=True),
        # ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, min_lr=1e-7, verbose=1),
        CSVLogger(csv_path),
        TensorBoard(),
        EarlyStopping(monitor='val_loss',
                      patience=50,
                      restore_best_weights=False),
        SGDRScheduler(min_lr=1e-6,
                      max_lr=1e-3,
                      steps_per_epoch=np.ceil(epochs / batch_size),
                      lr_decay=0.9,
                      cycle_length=5,
                      mult_factor=1.5)
    ]

    train_steps = (len(train_x) // batch_size)
    valid_steps = (len(valid_x) // batch_size)

    if len(train_x) % batch_size != 0:
        train_steps += 1

    if len(valid_x) % batch_size != 0:
        valid_steps += 1

    model.fit(train_dataset,
              epochs=epochs,
print('\nProcessing validation data...')
validation_generator = test_datagen.flow_from_directory(
    validation_dir,
    target_size=(28, 28),
    batch_size=batch_size,
    class_mode='categorical',
    color_mode='grayscale')

# callbacks
epochs = 30000
samples = 45000

schedule = SGDRScheduler(min_lr=1e-4,
                         max_lr=1e-3,
                         steps_per_epoch=np.ceil(samples / batch_size),
                         lr_decay=0.9,
                         cycle_length=20,
                         mult_factor=1.5)

tensorboard = TensorBoard(log_dir='./graph',
                          histogram_freq=0,
                          write_graph=True,
                          write_images=False)

# best checkpoint
filepath = "models/checkpoints/best_weights.hdf5"
best_checkpoint = ModelCheckpoint(filepath,
                                  monitor='val_acc',
                                  verbose=1,
                                  save_best_only=True,
                                  mode='max')