예제 #1
0
    #print("Current TestLoss: {}".format(loss))
    return classification


if __name__ == '__main__':
    tf.compat.v1.enable_eager_execution()
    gpu = tf.config.experimental.list_physical_devices('GPU')
    print("Num GPUs Available: ", len(gpu))
    if len(gpu) > 0:
        tf.config.experimental.set_memory_growth(gpu[0], True)
        tf.config.experimental.set_memory_growth(gpu[1], True)

    # read dataset
    path_root = os.path.abspath(os.path.dirname(__file__))
    bird_data = DataSet("/Volumes/Watermelon")  # DataSet(path_root)
    phi_train = bird_data.get_phi(set=0)
    w = bird_data.get_w(alpha=1)  # (50*150)
    train_class_list, test_class_list = bird_data.get_class_split(mode="easy")
    train_ds, test_ds = bird_data.load_gpu(batch_size=BATCH_SIZE)

    #path_root = os.path.abspath(os.path.dirname(__file__))
    #database = DataSet("/Volumes/Watermelon")  # path_root)
    #PHI = database.get_phi()
    #DS, DS_test = database.load_gpu(batch_size=5)  # image_batch, label_batch
    modelaki = FinalModel()

    # define loss and opt functions
    loss_fun = Loss().final_loss
    step = tf.Variable(0, trainable=False)
    boundaries = [187 * 5, 187 * 10]
    values = [0.05, 0.005, 0.0005]
예제 #2
0
from Losses import Loss
import sys

sys.path.append("../src")
from jointmodel import JFL

CHANNELS = 512
N_CLASSES = 200
SEMANTIC_SIZE = 28
BATCH_SIZE = 5
IMG_SIZE = 448
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)

# read dataset
database = DataSet("/Volumes/Watermelon")
PHI = database.get_phi()
DS, DS_test = database.load_gpu(batch_size=BATCH_SIZE)

tf.compat.v1.enable_eager_execution()
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

BUFFER_SIZE = 5
BATCH_SIZE_PER_REPLICA = 32
GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * strategy.num_replicas_in_sync
EPOCHS = 30

train_dataset = DS
train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)

예제 #3
0
        # sum (and normalize?) the scores and mapped features
        score = tf.add(
            tf.add(global_theta, local_scores0),
            local_scores1)  #tf.math.reduce_sum(, axis=1, keepdims=True)
        # avg_score = tf.multiply(sum_gll, 1.0 / 3.0)
        # compute ||~phi_i - ~Ci|| and ||~phi_i - ~Cj||, '~' is normalization
        # l2loss = self.l2loss(
        #    [tf.math.l2_normalize(tf.squeeze(out)), tf.math.l2_normalize(tf.transpose(center, perm=[0, 2, 1]))])
        phi_mapped = tf.add(tf.add(global_phi, local0_phi), local1_phi)
        #avg_phi = tf.multiply(sum_gll, 1.0 / 3.0)

        y_pred = self.classifier(score)

        return m0, m1, mask0, mask1, score, phi_mapped, y_pred, self.C


if __name__ == '__main__':
    # just for testing
    path_root = os.path.abspath(
        os.path.dirname(__file__))  # '/content/gdrive/My Drive/data'
    bird_data = DataSet("D:/MY2/ADDL/DD2412_project/basemodel")
    PHI = bird_data.get_phi(set=0)
    #w = bird_data.get_w(alpha=1)  # (50*150)
    #train_class_list, test_class_list = bird_data.get_class_split(mode="easy")
    # only take 1000 images for local test
    train_ds = bird_data.load(GPU=False, train=True, batch_size=4)
    # test_ds = bird_data.load(GPU=False, train=False, batch_size=32)
    image_batch, label_batch = next(iter(train_ds))
    test_model = FinalModel()
    m0, m1, mask0, mask1, scores, phi, y_pred, C = test_model(image_batch, PHI)
예제 #4
0
                              momentum=0.9)

    # MODEL
    #net = FinalModel()
    #new_root = tf.train.Checkpoint(net=net)
    #status = new_root.restore(tf.train.latest_checkpoint('./tf_ckpts/'))
    net = FinalModel()
    ckpt = tf.train.Checkpoint(step=tf.Variable(1, dtype=tf.int32),
                               optimizer=opt,
                               net=net)
    ckpt.restore(tf.train.latest_checkpoint('./tf_ckpts/'))

    #DATA
    path_root = os.path.abspath(os.path.dirname(__file__))
    bird_data = DataSet("/Volumes/Watermelon")  # DataSet(path_root)
    phi_train = bird_data.get_phi(set=0)
    w = bird_data.get_w(alpha=1)  # (50*150)
    train_class_list, test_class_list = bird_data.get_class_split(mode="easy")
    train_ds = bird_data.load(GPU=False, train=True, batch_size=32)
    #test_ds = bird_data.load(GPU=False, train=False, batch_size=4) #.load_gpu(batch_size=4)
    PHI = bird_data.get_phi(set=0)
    for im, label in train_ds:
        #im_path = "/Volumes/Watermelon/CUB_200_2011/CUB_200_2011/images/059.California_Gull/"
        #img = tf.io.read_file(im_path)
        #im = database.decode_img(img)
        m0, m1, mask0, mask1, scores, phi, y_pred, C = net(
            im, PHI)  #tf.expand_dims(im,0)

    nu = 50
    ns = 150
    W = tf.ones((nu, ns))
예제 #5
0
    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=score))



if __name__ == '__main__':
    #os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    #import tensorflow as tf
    tf.compat.v1.enable_eager_execution()
    gpu = tf.config.experimental.list_physical_devices('GPU')
    print("Num GPUs Available: ", len(gpu))
    if gpu:
        tf.config.experimental.set_memory_growth(gpu[0], True)
    path_root = os.path.abspath(os.path.dirname(__file__))  # '/content/gdrive/My Drive/data'
    bird_data = DataSet(path_root)
    # load all imgs
    phi = bird_data.get_phi()
    train_ds, test_ds = bird_data.load_gpu(batch_size=BATCH_SIZE)
    # only take 1000 images for local test
    #train_ds = bird_data.load(GPU=False, train=True, batch_size=32)
    #test_ds = bird_data.load(GPU=False, train=False, batch_size=32)

    #image_batch, label_batch = next(iter(train_ds))

    model = BaseModel(150, 312)
    #opt = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)  # or SGDW with weight decay
    opt = tfa.optimizers.SGDW(
        learning_rate=0.0001, weight_decay=5 * 1e-4, momentum=0.9)

    ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=model)
    manager = tf.train.CheckpointManager(ckpt, path_root + '/tf_ckpts',
                                         max_to_keep=3)  # keep only the three most recent checkpoints