if __name__ == '__main__': tf.compat.v1.enable_eager_execution() gpu = tf.config.experimental.list_physical_devices('GPU') print("Num GPUs Available: ", len(gpu)) if len(gpu) > 0: tf.config.experimental.set_memory_growth(gpu[0], True) tf.config.experimental.set_memory_growth(gpu[1], True) # read dataset path_root = os.path.abspath(os.path.dirname(__file__)) bird_data = DataSet("/Volumes/Watermelon") # DataSet(path_root) phi_train = bird_data.get_phi(set=0) w = bird_data.get_w(alpha=1) # (50*150) train_class_list, test_class_list = bird_data.get_class_split(mode="easy") train_ds, test_ds = bird_data.load_gpu(batch_size=BATCH_SIZE) #path_root = os.path.abspath(os.path.dirname(__file__)) #database = DataSet("/Volumes/Watermelon") # path_root) #PHI = database.get_phi() #DS, DS_test = database.load_gpu(batch_size=5) # image_batch, label_batch modelaki = FinalModel() # define loss and opt functions loss_fun = Loss().final_loss step = tf.Variable(0, trainable=False) boundaries = [187 * 5, 187 * 10] values = [0.05, 0.005, 0.0005] learning_rate_fn = PiecewiseConstantDecay(boundaries, values) # Later, whenever we perform an optimization step, we pass in the step. learning_rate = learning_rate_fn(step)
import sys sys.path.append("../src") from jointmodel import JFL CHANNELS = 512 N_CLASSES = 200 SEMANTIC_SIZE = 28 BATCH_SIZE = 5 IMG_SIZE = 448 IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3) # read dataset database = DataSet("/Volumes/Watermelon") PHI = database.get_phi() DS, DS_test = database.load_gpu(batch_size=BATCH_SIZE) tf.compat.v1.enable_eager_execution() strategy = tf.distribute.MirroredStrategy() print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) BUFFER_SIZE = 5 BATCH_SIZE_PER_REPLICA = 32 GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync EPOCHS = 30 train_dataset = DS train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset) class FinalModel(Model):