Exemplo n.º 1
0
  if dataset_name == 'cubes':
    prepare_cubes()

  # Semantic Segmentations
  if dataset_name == 'human_seg':
    prepare_seg_from_meshcnn('human_body')

  if dataset_name == 'coseg':
    prepare_seg_from_meshcnn('coseg', 'coseg_aliens')
    prepare_seg_from_meshcnn('coseg', 'coseg_chairs')
    prepare_seg_from_meshcnn('coseg', 'coseg_vases')


if __name__ == '__main__':
  utils.config_gpu(False)
  np.random.seed(1)

  if len(sys.argv) != 2:
    print('Use: python dataset_prepare.py <dataset name>')
    print('For example: python dataset_prepare.py cubes')
    print('Another example: python dataset_prepare.py all')
  else:
    dataset_name = sys.argv[1]
    if dataset_name == 'all':
      for dataset_name in ['cubes', 'human_seg', 'coseg', 'modelnet40']:
        prepare_one_dataset(dataset_name)
    else:
      prepare_one_dataset(dataset_name)

Exemplo n.º 2
0
    parser.add_argument('--checkpoint_path', type=str, default='checkpoint.h5')
    parser.add_argument('--gpu', type=str, default='')
    parser.add_argument('--allow_growth', default=False, action='store_true')
    parser.add_argument("--model", type=str, default="johnson")
    args = parser.parse_args()
    # Arguments parsed

    # Check loss weights
    args.style_weight = std_input_list(args.style_weight, args.nb_classes,
                                       'Style weight')
    args.content_weight = std_input_list(args.content_weight, args.nb_classes,
                                         'Content weight')
    args.tv_weight = std_input_list(args.tv_weight, args.nb_classes,
                                    'TV weight')

    config_gpu(args.gpu, args.allow_growth)

    print('Creating model...', args.model)
    class_targets = K.placeholder(shape=(None, ), dtype=tf.int32)
    # The model will be trained with 256 x 256 images of the coco dataset.
    if (args.model == "unet"):
        model = unet_model(256,
                           width_factor=args.width_factor,
                           nb_classes=args.nb_classes,
                           targets=class_targets)
    else:
        # model = pastiche_model(256, width_factor=args.width_factor)
        model = pastiche_model(256,
                               width_factor=args.width_factor,
                               nb_classes=args.nb_classes,
                               targets=class_targets)
Exemplo n.º 3
0
    BatchNormalization,
    AveragePooling2D,
    Flatten,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import (ModelCheckpoint, LearningRateScheduler,
                                        ReduceLROnPlateau)
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import backend as K
from tensorflow.python.framework.ops import disable_eager_execution

from utils import config_gpu

_ = config_gpu()
disable_eager_execution()


def lr_schedule(epoch):

    lr = 1e-3
    if epoch > 180:
        lr *= 0.5e-3
    elif epoch > 160:
        lr *= 1e-3
    elif epoch > 120:
        lr *= 1e-2
    elif epoch > 80:
        lr *= 1e-1
Exemplo n.º 4
0
def init_parse():
    import argparse
    parser = argparse.ArgumentParser(
        description='C3AE retry')
    parser.add_argument(
        '-s', '--save_path', default="./model/c3ae_model_v2_$message$_{epoch}_{val_age_mae:02f}-{val_gender_acc:.3f}", type=str,
        help='the best model to save')
    parser.add_argument(
        '-l', '--log_dir', default="./logs", type=str,
        help='the tensorboard log to save')
    parser.add_argument(
        '-r', '--r_factor', default=2, type=int,
        help='the r factor of SE')

    parser.add_argument(
        '--source', default="wiki", type=str,
        choices=['asia', 'wiki', 'imdb', 'wiki|imdb', "utk", "utk|asia", "afad", "afad|utk|asia"],
        help='"wiki|imdb" or regrex pattern of feather')

    parser.add_argument(
        '--dataset', default="./dataset/data/", type=str,
        help='the path of dataset to load')

    parser.add_argument(
        '-m', "--message", default="", type=str,
        help='message')

    parser.add_argument(
        '-p', '--pretrain_path', dest="pretrain_path", default="", type=str,
        help='the pretrain path')

    parser.add_argument(
        '-b', '--batch_size', default=128, type=int,
        help='batch size degfault=64')

    parser.add_argument(
        '-a', '--activation', default="relu", type=str,
        help='relu|leakrelu|mish')


    parser.add_argument(
        '-c', '--category', default=10, type=int,
        help='category nums degfault=10, n+2')

    parser.add_argument(
        '-gpu', dest="gpu", action='store_true',
        help='config of GPU')

    parser.add_argument(
        '-fz', dest="freeze", action='store_true',
        help='freeze model')

    parser.add_argument(
        '-test', dest="test", action='store_true',
        help='test')

    parser.add_argument(
        '-se', "--se-net", dest="se_net", action='store_true',
        help='use SE-NET')

    parser.add_argument(
        '-white', '--white-norm', dest="white_norm", action='store_true',
        help='use white norm')

    parser.add_argument(
        '-d', '--dropout', default="0.2", type=float,
        help='dropout rate of erasing')

    parser.add_argument(
        '-lr', '--learning-rate', default="0.002", type=float,
        help='learning rate')

    parser.add_argument(
        '-fp16', dest="fp16", action='store_true',
        help='mix precision training')


    params = parser.parse_args()
    params.save_path = params.save_path.replace("$message$", params.message)
    print("!!-----", params.save_path)
    config_gpu() if params.gpu else config_cpu()
    return params
Exemplo n.º 5
0
        '16-04_B',
        '16-04_C',
        'aliens',
        'vases',
        'chairs',
        None,
        None,
        None,
    ][6:]

    return jobs, job_parts


if __name__ == '__main__':
    np.random.seed(0)
    utils.config_gpu()

    if len(sys.argv) <= 1:
        print('Use: python train_val.py <job> <part>')
        print(
            '<job> can be one of the following: shrec11 / coseg / human_seg / cubes / modelnet40'
        )
        print('<job> can be also "all" to run all of the above.')
        print('<part> should be used in case of shrec11 or coseg datasets.')
        print(
            'For shrec11 it should be one of the follows: 10-10_A / 10-10_B / 10-10_C / 16-04_A / 16-04_B / 16-04_C'
        )
        print(
            'For coseg it should be one of the follows: aliens / vases / chairs'
        )
        print('For example: python train_val.py shrec11 10-10_A')
Exemplo n.º 6
0
def init_parse():
    import argparse
    parser = argparse.ArgumentParser(description='C3AE retry')
    parser.add_argument('-s',
                        '--save_path',
                        default="./model/c3ae_model_v2.h5",
                        type=str,
                        help='the best model to save')
    parser.add_argument('-l',
                        '--log_dir',
                        default="./logs",
                        type=str,
                        help='the tensorboard log to save')
    parser.add_argument('-r',
                        '--r_factor',
                        default=2,
                        type=int,
                        help='the r factor of SE')

    parser.add_argument('--source',
                        default="wiki",
                        type=str,
                        choices=['wiki', 'imdb', 'wiki|imdb'],
                        help='"wiki|imdb" or regrex pattern of feather')

    parser.add_argument('--dataset',
                        default="./dataset/data/",
                        type=str,
                        help='the path of dataset to load')

    parser.add_argument('-p',
                        '--pretrain_path',
                        dest="pretrain_path",
                        default="",
                        type=str,
                        help='the pretrain path')

    parser.add_argument('-b',
                        '--batch_size',
                        default=50,
                        type=int,
                        help='batch size degfault=50')

    parser.add_argument('-w',
                        '--weight_factor',
                        default=10,
                        type=int,
                        help='age feature weight=10')

    parser.add_argument('-c',
                        '--category',
                        default=10,
                        type=int,
                        help='category nums degfault=10, n+2')

    parser.add_argument('-gpu',
                        dest="gpu",
                        action='store_true',
                        help='config of GPU')

    parser.add_argument('-se',
                        "--se-net",
                        dest="se_net",
                        action='store_true',
                        help='use SE-NET')

    parser.add_argument('-white',
                        '--white-norm',
                        dest="white_norm",
                        action='store_true',
                        help='use white norm')

    parser.add_argument('-d',
                        '--dropout',
                        default="0.2",
                        type=float,
                        help='dropout rate of erasing')

    parser.add_argument('-lr',
                        '--learning-rate',
                        default="0.002",
                        type=float,
                        help='learning rate')

    params = parser.parse_args()
    if params.gpu:
        config_gpu()
    return params
Exemplo n.º 7
0
#         epsilon = K.epsilon()
#         y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
#
#         # Calculate Cross Entropy
#         cross_entropy = -y_true * K.log(y_pred)
#
#         # Calculate Focal Loss
#         loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy
#
#         # Sum the losses in mini_batch
#         return K.sum(loss, axis=1)
#
#     return categorical_focal_loss_func

if __name__ == '__main__':
    config_gpu()

    LOAD_MODEL_WEIGHTS = True
    train_config = [CityScapesFineConfig]

    train_images_dir = [ds.train_images for ds in train_config]
    train_labels_dir = [ds.train_labels for ds in train_config]

    val_images_dir = [ds.val_images for ds in train_config]
    val_labels_dir = [ds.val_labels for ds in train_config]

    batch_size = 5
    lr_max = .001
    image_size = (320, 640)
    # image_size = (1664, 832)
    loss = tf.losses.categorical_crossentropy
                (-1, predictions.shape[-1]))
            for w_step in range(all_seq.size):
                models[name]['pred'][
                    all_seq[w_step]] += predictions4vertex[w_step]
                models[name]['pred_count'][all_seq[w_step]] += 1

    postprocess_vertex_predictions(models)
    e_acc_after_postproc, v_acc_after_postproc, f_acc_after_postproc = calc_final_accuracy(
        models)

    return [e_acc_after_postproc, e_acc_after_postproc], dnn_model


if __name__ == '__main__':
    from train_val import get_params
    utils.config_gpu(1)
    np.random.seed(0)
    tf.random.set_seed(0)

    if len(sys.argv) != 4:
        print('<>'.join(sys.argv))
        print(
            'Use: python evaluate_segmentation.py <job> <part> <trained model directory>'
        )
        print(
            'For example: python evaluate_segmentation.py coseg chairs pretrained/0009-14.11.2020..07.08__coseg_chairs'
        )
    else:
        logdir = sys.argv[3]
        job = sys.argv[1]
        job_part = sys.argv[2]
        for i, name in enumerate(labels):
            this_type = confusion[i]
            n_this_type = this_type.sum()
            accuracy_this_type = this_type[i] / n_this_type
            if n_this_type:
                acc_per_class.append(accuracy_this_type)
            this_type_ = this_type.copy()
            this_type_[i] = -1
    mean_acc_per_class = np.mean(acc_per_class)

    return [mean_accuracy_all_faces, mean_acc_per_class], dnn_model


if __name__ == '__main__':
    from train_val import get_params
    utils.config_gpu(True)
    np.random.seed(0)
    tf.random.set_seed(0)

    if len(sys.argv) != 4:
        print(
            'Use: python evaluate_classification.py <job> <part> <trained model directory>'
        )
        print(
            'For example: python evaluate_classification.py shrec11 10-10_A pretrained/0001-09.11.2020..19.57__shrec11_10-10_A'
        )
    else:
        logdir = sys.argv[3]
        job = sys.argv[1]
        job_part = sys.argv[2]
        params = get_params(job, job_part)