batch_size = 10 base_lr = 4e-5 # 2e-5 momentum = 0.9 weight_decay = 5e-4 lr_policy = "step" gamma = 0.333 stepsize = 136106 #68053 // after each stepsize iterations update learning rate: lr=lr*gamma max_iter = 600000 lr_mult_distro = [1.0, 1.0, 4.0, 1] di = DataIterator("../dataset/val_dataset.h5", data_shape=(3, 368, 368), label_shape=(57, 46, 46), split_point=38, batch_size=batch_size, shuffle=True) x, y1, y2 = di.next() print("x : ", x.shape) print("y1 : ", y1.shape) print("y2 : ", y2.shape) # as suggested in: https://github.com/fchollet/keras/issues/5920 last_layer_variables = list() for layer in model.layers: #print(layer.weights) if layer.name in ['prediction']:
train_di = train_client.gen() train_samples = 52597 val_client = DataGeneratorClient(port=pargs.port + 1, host="localhost", hwm=160, batch_size=batch_size, pstages=paramNumStages) val_client.start() val_di = val_client.gen() val_samples = 2645 else: train_di = DataIterator("../dataset/train_dataset.h5", data_shape=(3, 368, 368), mask_shape=(1, 46, 46), label_shape=(57, 46, 46), vec_num=38, heat_num=19, batch_size=batch_size, shuffle=True) train_samples = train_di.N val_di = DataIterator("../dataset/val_dataset.h5", data_shape=(3, 368, 368), mask_shape=(1, 46, 46), label_shape=(57, 46, 46), vec_num=38, heat_num=19, batch_size=batch_size, shuffle=True) val_samples = val_di.N # setup lr multipliers for conv layers