def main():

    sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))

    model = sequence_model(sess=sess,
                           batch_size=2,
                           learning_rate=args.learning_rate,
                           opt_algo=args.opt_algo,
                           epochs=args.epochs,
                           regular_rate=args.regular_rate,
                           dropout_rate=args.dropout_rate,
                           skip_step=args.skip_step,
                           hidden_size_list=[64, 32, 16],
                           sequence_embedding_size=32)

    model.get_data(data_path=args.data_path,
                   train_data_name=args.train_data_name,
                   test_data_name='',
                   pred_data_name='',
                   sep=args.csv_sep,
                   low_freq_threshold=args.low_freq_threshold)

    model.bulid()

    model.train(save=utils.str2bool(args.save),
                restore=utils.str2bool(args.restore))
Ejemplo n.º 2
0
def main():

    sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
        allow_growth=True)))

    if args.model == 'deep_wide':

        #---------deep_wide start-------------------------------------------------------------

        model = deep_wide_model(sess=sess,
                                batch_size=args.batch_size,
                                learning_rate=args.learning_rate,
                                opt_algo=args.opt_algo,
                                epochs=args.epochs,
                                regular_rate=args.regular_rate,
                                dropout_rate=args.dropout_rate,
                                skip_step=args.skip_step,
                                layer_size_list=list(
                                    map(lambda x: int(x),
                                        args.layer_size_list.split(","))))

        model.get_data(data_path=args.data_path,
                       train_data_name=args.train_data_name,
                       test_data_name=args.test_data_name,
                       pred_data_name=args.pred_data_name,
                       sep=args.csv_sep,
                       low_freq_threshold=args.low_freq_threshold)

        model.bulid()

        model.train(save=utils.str2bool(args.save),
                    restore=utils.str2bool(args.restore))

        if len(args.pred_data_name) > 0:
            model.predict(data_path=args.data_path)

        #---------deep_wide end-------------------------------------------------------------

    elif args.model == 'word2vec':

        #---------word2vec start-------------------------------------------------------------

        model = word2vec_model(sess=sess,
                               batch_size=args.batch_size,
                               learning_rate=args.learning_rate,
                               opt_algo=args.opt_algo,
                               epochs=args.epochs,
                               regular_rate=args.regular_rate,
                               dropout_rate=args.dropout_rate,
                               skip_step=args.skip_step,
                               num_sampled=args.num_sampled,
                               embed_size=args.vocab_embed_size)

        model.get_data(data_path=args.data_path,
                       train_data_name=args.train_data_name,
                       test_data_name='',
                       pred_data_name='',
                       sep=' ',
                       low_freq_threshold=args.low_freq_threshold)

        model.bulid()

        model.train(save=utils.str2bool(args.save),
                    restore=utils.str2bool(args.restore))

        model.write_embedding_matrix(sess=sess,
                                     data_path=args.data_path,
                                     file_name='vocab_embedding_matrix')

        #---------word2vec end-------------------------------------------------------------

    else:
        pass
Ejemplo n.º 3
0
def get_configurations(parser=None):
    # set configurations here
    # experiment_name = 'feathers_waterfall_deconv3_1'  # write here the name of the experiment
    # experiments_dir_name = os.path.join('experiments', experiment_name)
    experiments_dir_name = ''
    block_path0 = ''
    block_path1 = ''
    block_path2 = ''
    main_style_image_name = 'feathers'
    tuning_blocks_style_image_name = 'feathers'
    tuning_blocks_lower_style_image_name = 'feathers'
    tuning_blocks_higher_style_image_name = 'feathers'
    main_epochs = 2  # 2
    tuning_blocks_epochs = 2  # 2
    batch_size = 4
    learning_rate_main = 1e-3
    learning_rate_blocks = 1e-4
    main_content_wight = 1
    main_style_wight = 1e5

    network_version = 'normal'

    blocks_content_wight = 1  # set for network_version = 'normal'
    blocks_style_wight = 1e6  # set for network_version = 'normal'
    blocks_lower_content_wight = 1  # set for network_version = 'dual'
    blocks_lower_style_wight = 1e5  # set for network_version = 'dual'
    blocks_higher_content_wight = 1  # set for network_version = 'dual'
    blocks_higher_style_wight = 1e5  # set for network_version = 'dual'

    image_size = 256
    vgg_output = True
    main_style_size = None
    blocks_style_size = None
    style_wight0 = 100
    style_wight1 = 1
    style_wight2 = 1
    style_wight3 = 1
    style_wight4 = 1

    training_scheme = 'only_tuning_blocks'  # all, only_main, only_tuning_blocks, only_tuning_blocks_lower, only_tuning_blocks_higher

    checkpoint_iter = 5000
    eval_iter = 1000
    intermediate_images_iter = 500
    current_batch_eval_iter = 100

    # train_data_path = '/home/alon-ran/Alon/data_sets/COCO2014'
    train_data_path = 'train2014'
    # val_data_path = '/home/alon-ran/Alon/data_sets/COCO2014_val'
    val_data_path = 'val2014'

    model_top_params = 'main_%d_blocks_%d' % (main_style_wight,
                                              blocks_style_wight)
    checkpoint_dir = os.path.join(experiments_dir_name, 'checkpoints')
    model_save_dir = os.path.join(experiments_dir_name, 'model_dir')
    images_save_dir = os.path.join(experiments_dir_name, 'images')
    main_style_image_path = os.path.join('images', 'style_images',
                                         main_style_image_name + '.jpg')
    tuning_blocks_lower_style_image_path = os.path.join(
        'images', 'style_images',
        tuning_blocks_lower_style_image_name + '.jpg')
    tuning_blocks_higher_style_image_path = os.path.join(
        'images', 'style_images',
        tuning_blocks_higher_style_image_name + '.jpg')
    tuning_blocks_style_image_path = os.path.join(
        'images', 'style_images', tuning_blocks_style_image_name + '.jpg')
    evaluation_images_path = os.path.join('images', 'evaluation_images')
    pre_trained_main_model = 'C:\\Users\\Drorpezo\\Desktop\\dynamic_net-master\\dynamic_style_transfer\\trained_nets\\feathers_waterfall\\model_dir\\orginal_main_net.pth'
    pre_trained_tuning_blocks_lower = os.path.join(model_save_dir,
                                                   'tuning_blocks_lower.pth')
    pre_trained_tuning_blocks_higher = os.path.join(
        model_save_dir, 'tuning_blocks_higher.pth')

    # set parser
    if parser is None:
        parser = argparse.ArgumentParser()
    parser.add_argument('--main_style_image_name',
                        default=main_style_image_name)
    parser.add_argument('--main_epochs', default=main_epochs, type=int)
    parser.add_argument('--tuning_blocks_epochs',
                        default=tuning_blocks_epochs,
                        type=int)
    parser.add_argument('--batch_size', default=batch_size, type=int)
    parser.add_argument('--image_size', default=image_size, type=int)
    parser.add_argument('--style_size', default=main_style_size, type=int)
    parser.add_argument('--blocks_style_size',
                        default=blocks_style_size,
                        type=int)
    parser.add_argument('--learning_rate_main',
                        default=learning_rate_main,
                        type=float)
    parser.add_argument('--learning_rate_blocks',
                        default=learning_rate_blocks,
                        type=float)
    parser.add_argument('--main_content_wight',
                        default=main_content_wight,
                        type=float)
    parser.add_argument('--main_style_wight',
                        default=main_style_wight,
                        type=float)
    parser.add_argument('--checkpoint_iter', default=checkpoint_iter, type=int)
    parser.add_argument('--eval_iter', default=eval_iter, type=int)
    parser.add_argument('--intermediate_images_iter',
                        default=intermediate_images_iter,
                        type=int)
    parser.add_argument('--current_batch_eval_iter',
                        default=current_batch_eval_iter,
                        type=int)
    parser.add_argument('--train_data_path', default=train_data_path)
    parser.add_argument('--val_data_path', default=val_data_path)
    parser.add_argument('--model_name', default=model_top_params)
    parser.add_argument('--experiments_dir_name', default=experiments_dir_name)
    parser.add_argument('--checkpoint_dir', default=checkpoint_dir)
    parser.add_argument('--model_save_dir', default=model_save_dir)
    parser.add_argument('--images_save_dir', default=images_save_dir)
    parser.add_argument('--pre_trained_main_model',
                        default=pre_trained_main_model)
    parser.add_argument('--main_style_image_path',
                        default=main_style_image_path)
    parser.add_argument('--evaluation_images_path',
                        default=evaluation_images_path)
    parser.add_argument('--vgg_output',
                        default=vgg_output,
                        type=lambda x: bool(utils.str2bool(x)))
    parser.add_argument('--style_wight0', default=style_wight0, type=float)
    parser.add_argument('--style_wight1', default=style_wight1, type=float)
    parser.add_argument('--style_wight2', default=style_wight2, type=float)
    parser.add_argument('--style_wight3', default=style_wight3, type=float)
    parser.add_argument('--style_wight4', default=style_wight4, type=float)
    parser.add_argument('--training_scheme', default=training_scheme)
    parser.add_argument('--network_version', default=network_version)
    parser.add_argument('--block_path0', default=block_path0)
    parser.add_argument('--block_path1', default=block_path1)
    parser.add_argument('--block_path2', default=block_path2)
    parser.add_argument('--layer_num', default=-1)
    if network_version is 'dual':
        parser.add_argument('--blocks_lower_content_wight',
                            default=blocks_lower_content_wight,
                            type=float)
        parser.add_argument('--blocks_lower_style_wight',
                            default=blocks_lower_style_wight,
                            type=float)
        parser.add_argument('--blocks_higher_content_wight',
                            default=blocks_higher_content_wight,
                            type=float)
        parser.add_argument('--blocks_higher_style_wight',
                            default=blocks_higher_style_wight,
                            type=float)
        parser.add_argument('--tuning_blocks_lower_style_image_name',
                            default=tuning_blocks_lower_style_image_name)
        parser.add_argument('--tuning_blocks_higher_style_image_name',
                            default=tuning_blocks_higher_style_image_name)
        parser.add_argument('--tuning_blocks_lower_style_image_path',
                            default=tuning_blocks_lower_style_image_path)
        parser.add_argument('--tuning_blocks_higher_style_image_path',
                            default=tuning_blocks_higher_style_image_path)

        parser.add_argument('--pre_trained_tuning_blocks_lower',
                            default=pre_trained_tuning_blocks_lower)
        parser.add_argument('--pre_trained_tuning_blocks_higher',
                            default=pre_trained_tuning_blocks_higher)
    elif network_version is 'normal':
        parser.add_argument('--blocks_content_wight',
                            default=blocks_content_wight,
                            type=float)
        parser.add_argument('--blocks_style_wight',
                            default=blocks_style_wight,
                            type=float)
        parser.add_argument('--block_style_image_name',
                            default=tuning_blocks_style_image_name)
        parser.add_argument('--tuning_blocks_style_image_path',
                            default=tuning_blocks_style_image_path)

    opt = parser.parse_args()
    opt.experiments_dir_name = os.path.join('experiments',
                                            opt.experiments_dir_name)
    opt.checkpoint_dir1 = os.path.join(opt.experiments_dir_name, 'checkpoints')
    opt.checkpoint_dir2 = os.path.join(opt.experiments_dir_name, 'checkpoints')
    opt.checkpoint_dir3 = os.path.join(opt.experiments_dir_name, 'checkpoints')
    opt.model_save_dir = os.path.join(opt.experiments_dir_name, 'model_dir')
    opt.images_save_dir = os.path.join(opt.experiments_dir_name, 'images')
    opt.block_path0 = os.path.join(opt.checkpoint_dir,
                                   'block0_dynamic_net.pth')
    opt.block_path1 = os.path.join(opt.checkpoint_dir,
                                   'block1_dynamic_net.pth')
    opt.block_path2 = os.path.join(opt.checkpoint_dir,
                                   'block2_dynamic_net.pth')
    opt.pre_trained_tuning_blocks_lower = os.path.join(
        opt.model_save_dir, 'tuning_blocks_lower.pth')
    opt.pre_trained_tuning_blocks_higher = os.path.join(
        opt.model_save_dir, 'tuning_blocks_higher.pth')
    return opt
]

# Loading a json file for a pandas dataframe.
print("loading data " + args.input_file)
df = json_2_dataframe(args.input_file)
# Loading check-in list to the dataframe.
df = load_attribute(df, 'data/input/filtered/checkins.json', 'business_id',
                    'checkins')
# List containing all the implemented clustering algorithms.
clustering_algorithms = [
    highest_attribute_value, kmeans, dbscan, agglomerative_clustering
]
# Retrieve the matrix formed by latitude and longitude.
X = df[['latitude', 'longitude']].values
# It will be map plotting?
plot_map = str2bool(args.map)
# Var that save all silhouettes.
silhouettes = np.empty(len(configs))
ns_clusters = np.empty(len(configs))
for index, config in enumerate(configs):
    print('config:', config)
    # Clustering
    cluster_labels = np.array(clustering_algorithms[args.clustering_algorithm](
        df, *config)['cluster_id'])
    ns_clusters[index] = len(Counter(cluster_labels).keys())

    # The silhouette_score gives the average value for all the samples.
    # This gives a perspective into the density and separation of the formed
    # clusters
    silhouette_avg = silhouette_score(X, cluster_labels)
    # Tests if the current config is better than the current best.
Ejemplo n.º 5
0
def get_configurations(parser=None):
    # set configurations here
    experiment_name = 'on_white_II_waterfall'  # write here the name of the experiment
    experiments_dir_name = os.path.join('experiments', experiment_name)
    main_style_image_name = 'on_white_II'
    tuning_blocks_style_image_name = 'on_white_II'
    tuning_blocks_lower_style_image_name = 'udnie'
    tuning_blocks_higher_style_image_name = 'colors'
    main_epochs = 2  # 2
    tuning_blocks_epochs = 2  # 2
    batch_size = 4
    learning_rate_main = 1e-3
    learning_rate_blocks = 1e-4
    main_content_wight = 1
    main_style_wight = 1e5

    network_version = 'normal'

    blocks_content_wight = 1  # set for network_version = 'normal'
    blocks_style_wight = 1e7  # set for network_version = 'normal'
    blocks_lower_content_wight = 1  # set for network_version = 'dual'
    blocks_lower_style_wight = 1e5  # set for network_version = 'dual'
    blocks_higher_content_wight = 1  # set for network_version = 'dual'
    blocks_higher_style_wight = 1e5  # set for network_version = 'dual'

    image_size = 256
    vgg_output = True
    main_style_size = None
    blocks_style_size = None
    style_wight0 = 1
    style_wight1 = 1
    style_wight2 = 1
    style_wight3 = 1
    style_wight4 = 1

    training_scheme = 'all'  # all, only_main, only_tuning_blocks, only_tuning_blocks_lower, only_tuning_blocks_higher

    checkpoint_iter = 5000
    eval_iter = 1000
    intermediate_images_iter = 500
    current_batch_eval_iter = 100

    train_data_path = '/home/alon-ran/Alon/data_sets/COCO2014'
    val_data_path = '/home/alon-ran/Alon/data_sets/COCO2014_val'

    model_top_params = 'main_%d_blocks_%d' % (main_style_wight,
                                              blocks_style_wight)
    checkpoint_dir = os.path.join(experiments_dir_name, 'checkpoints')
    model_save_dir = os.path.join(experiments_dir_name, 'model_dir')
    images_save_dir = os.path.join(experiments_dir_name, 'images')
    main_style_image_path = os.path.join('images', 'style_images',
                                         main_style_image_name + '.jpg')
    tuning_blocks_lower_style_image_path = os.path.join(
        'images', 'style_images', tuning_blocks_style_image_name + '.jpg')
    tuning_blocks_higher_style_image_path = os.path.join(
        'images', 'style_images',
        tuning_blocks_lower_style_image_name + '.jpg')
    tuning_blocks_style_image_path = os.path.join(
        'images', 'style_images',
        tuning_blocks_higher_style_image_name + '.jpg')
    evaluation_images_path = os.path.join('images', 'evaluation_images')
    pre_trained_main_model = os.path.join(model_save_dir,
                                          'orginal_main_latest.pth')
    pre_trained_tuning_blocks_lower = os.path.join(model_save_dir,
                                                   'tuning_blocks_lower.pth')
    pre_trained_tuning_blocks_higher = os.path.join(
        model_save_dir, 'tuning_blocks_higher.pth')

    # set parser
    if parser is None:
        parser = argparse.ArgumentParser()
    parser.add_argument('--main_style_image_name',
                        default=main_style_image_name)
    parser.add_argument('--main_epochs', default=main_epochs, type=int)
    parser.add_argument('--tuning_blocks_epochs',
                        default=tuning_blocks_epochs,
                        type=int)
    parser.add_argument('--batch_size', default=batch_size, type=int)
    parser.add_argument('--image_size', default=image_size, type=int)
    parser.add_argument('--style_size', default=main_style_size, type=int)
    parser.add_argument('--blocks_style_size',
                        default=blocks_style_size,
                        type=int)
    parser.add_argument('--learning_rate_main',
                        default=learning_rate_main,
                        type=float)
    parser.add_argument('--learning_rate_blocks',
                        default=learning_rate_blocks,
                        type=float)
    parser.add_argument('--main_content_wight',
                        default=main_content_wight,
                        type=float)
    parser.add_argument('--main_style_wight',
                        default=main_style_wight,
                        type=float)
    parser.add_argument('--checkpoint_iter', default=checkpoint_iter, type=int)
    parser.add_argument('--eval_iter', default=eval_iter, type=int)
    parser.add_argument('--intermediate_images_iter',
                        default=intermediate_images_iter,
                        type=int)
    parser.add_argument('--current_batch_eval_iter',
                        default=current_batch_eval_iter,
                        type=int)
    parser.add_argument('--train_data_path', default=train_data_path)
    parser.add_argument('--val_data_path', default=val_data_path)
    parser.add_argument('--model_name', default=model_top_params)
    parser.add_argument('--experiments_dir_name', default=experiments_dir_name)
    parser.add_argument('--checkpoint_dir', default=checkpoint_dir)
    parser.add_argument('--model_save_dir', default=model_save_dir)
    parser.add_argument('--images_save_dir', default=images_save_dir)
    parser.add_argument('--pre_trained_main_model',
                        default=pre_trained_main_model)
    parser.add_argument('--main_style_image_path',
                        default=main_style_image_path)
    parser.add_argument('--evaluation_images_path',
                        default=evaluation_images_path)
    parser.add_argument('--vgg_output',
                        default=vgg_output,
                        type=lambda x: bool(utils.str2bool(x)))
    parser.add_argument('--style_wight0', default=style_wight0, type=float)
    parser.add_argument('--style_wight1', default=style_wight1, type=float)
    parser.add_argument('--style_wight2', default=style_wight2, type=float)
    parser.add_argument('--style_wight3', default=style_wight3, type=float)
    parser.add_argument('--style_wight4', default=style_wight4, type=float)
    parser.add_argument('--training_scheme', default=training_scheme)
    parser.add_argument('--network_version', default=network_version)
    if network_version is 'dual':
        parser.add_argument('--blocks_lower_content_wight',
                            default=blocks_lower_content_wight,
                            type=float)
        parser.add_argument('--blocks_lower_style_wight',
                            default=blocks_lower_style_wight,
                            type=float)
        parser.add_argument('--blocks_higher_content_wight',
                            default=blocks_higher_content_wight,
                            type=float)
        parser.add_argument('--blocks_higher_style_wight',
                            default=blocks_higher_style_wight,
                            type=float)
        parser.add_argument('--tuning_blocks_lower_style_image_name',
                            default=tuning_blocks_lower_style_image_name)
        parser.add_argument('--tuning_blocks_higher_style_image_name',
                            default=tuning_blocks_higher_style_image_name)
        parser.add_argument('--tuning_blocks_lower_style_image_path',
                            default=tuning_blocks_lower_style_image_path)
        parser.add_argument('--tuning_blocks_higher_style_image_path',
                            default=tuning_blocks_higher_style_image_path)

        parser.add_argument('--pre_trained_tuning_blocks_lower',
                            default=pre_trained_tuning_blocks_lower)
        parser.add_argument('--pre_trained_tuning_blocks_higher',
                            default=pre_trained_tuning_blocks_higher)
    elif network_version is 'normal':
        parser.add_argument('--blocks_content_wight',
                            default=blocks_content_wight,
                            type=float)
        parser.add_argument('--blocks_style_wight',
                            default=blocks_style_wight,
                            type=float)
        parser.add_argument('--block_style_image_name',
                            default=tuning_blocks_style_image_name)
        parser.add_argument('--tuning_blocks_style_image_path',
                            default=tuning_blocks_style_image_path)

    opt = parser.parse_args()
    return opt
Ejemplo n.º 6
0
    def get(self, request):
        response = {"meta":{"sensors": 0, "cities": 0, "utilities": 0},"cities":[], "sensors":[],"utilities" :[]}

        sources = request.query_params.get('sources','').split(',')

        # filter for sensor
        if 'sensors' in sources or not len(sources):
            queryset = app_models.node.objects.all()
            if request.query_params.get('status'):
                queryset.filter( status = str2bool(request.query_params.get('status')) )
            for sensor in queryset:
                data = models.data.objects.filter(node = sensor).latest('timestamp')
                response["sensors"].append({
                    "fips_state_id": sensor.fips_state,
                    "fips_county_id": sensor.fips_county,
                    "name": sensor.name,
                    "state": sensor.state,
                    "county": sensor.county,
                    "long": sensor.position.x if sensor.position else '',
                    "lat":sensor.position.y if sensor.position else '',
                    "score": sensor.score,
                    "status": sensor.stauts,
                    "disolved_oxygen": sensor.meta.get('disolved_oxygen',0),
                    "ph": sensor.meta.get('ph',0),
                    "temperature_change": sensor.meta.get('temperature_change',0),
                    "turbidity": sensor.meta.get('turbidity',0),
                })

        # filter for news
        if 'news' in sources or not len(sources):
            queryset = news_models.location.objects.all()
            if request.query_params.get('fips_state'):
                queryset.filter( fips_state = request.query_params.get('fips_state') )

            if request.query_params.get('fips_county'):
                queryset.filter( fips_state = request.query_params.get('fips_county') )

            if request.query_params.get('status'):
                queryset.filter( status = str2bool(request.query_params.get('status')) )

            response["meta"]["cities"] = queryset.count()
            for news in queryset:
                response["cities"].append({
                    "fips_state_id": news.fips_state,
                    "fips_county_id": news.fips_county,
                    "zipcode": news.zipcode,
                    "name": news.city,
                    "county": news.county,
                    "status":news.status,
                    "long": news.position.x if news.position else '',
                    "lat": news.position.y if news.position else '',
                })

        # filter for utilities
        if 'utilities' in sources or not len(sources):
            queryset = news_models.utility.objects.all()

            if request.query_params.get('violation'):
                queryset.filter( violation = str2bool(request.query_params.get('violation')) )

            response["meta"]["utilities"] = queryset.count()
            for utility in queryset:

                counties_served = []
                for county in news_models.county_served.objects.filter( utility = utility):
                    counties_served.append({
                        "fips_state_id": county.location.fips_state,
                        "fips_county_id": county.location.fips_county,
                        "name": county.location.name
                    })

                response["utilities"].append({
                    "name": utility.name,
                    "has_contaminats": utility.has_contaminats,
                    "url": utility.link,
                    "long": utility.position.x if utility.position else '',
                    "lat":  utility.position.y if utility.position else '',
                    "violation": utility.violation,
                    "violation_points": utility.voilation_points,
                    "people_served": utility.people_served,
                    "counties_served":counties_served
                })


        return Response()
Ejemplo n.º 7
0
def get_configurations(parser=None):
    # set configurations here
    experiment_name = 'Female2Male'  # write here the name of the experiment
    data_set = 'celebA'
    experiments_dir_name = os.path.join('experiments', experiment_name)
    main_epochs = 20
    tuning_blocks_epochs = 20
    batch_size = 128
    z_size = 100
    gen_learning_rate_main = 0.0002
    gen_learning_rate_tuning_blocks = 0.0002
    disc_learning_rate_main = 0.0002
    disc_learning_rate_tuning_blocks = 0.0002
    image_size = 64
    tuning_blocks_disc_same_as_main_disc = False
    crop_type = '108'

    discriminator_main_attr = 'Male'
    discriminator_tuning_blocks_attr = 'Male'
    discriminator_main_attr_is = False
    discriminator_tuning_blocks_attr_is = True

    training_scheme = 'all'  # 'all', 'only_tuning_blocks', 'only_main_net

    eval_noise_batch_size = 128

    eval_iter = 100
    intermediate_images_iter = 20000
    save_image_iter = 200

    data_set_path = '/home/alon-ran/Alon/data_sets/celebA'
    attr_path = '/home/alon-ran/Alon/data_sets/celebA'

    model_save_dir = os.path.join(experiments_dir_name, 'model_dir')
    images_save_dir = os.path.join(experiments_dir_name, 'images')
    pre_trained_original_main_model = os.path.join(model_save_dir, 'original_main_latest.pth')
    pre_trained_disc_model = 'None'

    # set parser
    if parser is None:
        parser = argparse.ArgumentParser()
    parser.add_argument('--data_set', default=data_set)
    parser.add_argument('--discriminator_main_attr', default=discriminator_main_attr)
    parser.add_argument('--discriminator_tuning_blocks_attr', default=discriminator_tuning_blocks_attr)
    parser.add_argument('--discriminator_main_attr_is', default=discriminator_main_attr_is, type=lambda x:bool(utils.str2bool(x)))
    parser.add_argument('--discriminator_tuning_blocks_attr_is', default=discriminator_tuning_blocks_attr_is, type=lambda x:bool(utils.str2bool(x)))
    parser.add_argument('--crop_type', default=crop_type)
    parser.add_argument('--tuning_blocks_disc_same_as_main_disc', default=tuning_blocks_disc_same_as_main_disc, type=lambda x:bool(utils.str2bool(x)))
    parser.add_argument('--main_epochs', default=main_epochs, type=int)
    parser.add_argument('--tuning_blocks_epochs', default=tuning_blocks_epochs, type=int)
    parser.add_argument('--batch_size', default=batch_size, type=int)
    parser.add_argument('--image_size', default=image_size, type=int)
    parser.add_argument('--eval_noise_batch_size', default=eval_noise_batch_size, type=int)
    parser.add_argument('--z_size', default=z_size, type=int)
    parser.add_argument('--gen_learning_rate_main', default=gen_learning_rate_main, type=float)
    parser.add_argument('--gen_learning_rate_tuning_blocks', default=gen_learning_rate_tuning_blocks, type=float)
    parser.add_argument('--disc_learning_rate_main', default=disc_learning_rate_main, type=float)
    parser.add_argument('--disc_learning_rate_tuning_blocks', default=disc_learning_rate_tuning_blocks, type=float)
    parser.add_argument('--eval_iter', default=eval_iter, type=int)
    parser.add_argument('--intermediate_images_iter', default=intermediate_images_iter, type=int)
    parser.add_argument('--save_image_iter', default=save_image_iter, type=int)
    parser.add_argument('--data_set_path', default=data_set_path)
    parser.add_argument('--attr_path', default=attr_path)
    parser.add_argument('--model_save_dir', default=model_save_dir)
    parser.add_argument('--images_save_dir', default=images_save_dir)
    parser.add_argument('--experiments_dir_name', default=experiments_dir_name)
    parser.add_argument('--pre_trained_original_main_model', default=pre_trained_original_main_model)
    parser.add_argument('--pre_trained_disc_model', default=pre_trained_disc_model)
    parser.add_argument('--training_scheme', default=training_scheme)

    opt = parser.parse_args()
    return opt
from utils.utils import str2bool

env = os.environ

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))


# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&ek=c#^&qox_ej@wxd$1=lo$tu1)t$c%e5$lzao59d%+%8bu&r'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = str2bool(env.get('DEBUG', 'True'))

ALLOWED_HOSTS = ['*']


CORS_ORIGIN_ALLOW_ALL = True
SESSION_COOKIE_SECURE = False


DATA_UPLOAD_MAX_MEMORY_SIZE = 20 * 1024 * 1024  # 20MB


# Application definition
DEFAULT_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
Ejemplo n.º 9
0
    0, 1
]  # alpha_1 values for normal version (if None alpha_0=alpha_1=alpha_2)
alpha_2s = alpha_1s  # alpha_2 values for normal version (if None alpha_0=alpha_1=alpha_2)
alpha_0s_dual = [-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1
                 ] + alpha_0s  # alpha_0 values for dual version
alpha_1s_dual = [
    -1, 0, 1
]  # alpha_1 values for dual version (if None alpha_0=alpha_1=alpha_2)
alpha_2s_dual = alpha_1s_dual  # alpha_2 values for dual version (if None alpha_0=alpha_1=alpha_2)
# ------------------------ #

parser = argparse.ArgumentParser()
parser.add_argument('--network_name', default=network_name)
parser.add_argument('--use_saved_config',
                    default=use_saved_config,
                    type=lambda x: bool(utils.str2bool(x)))
parser.add_argument('--set_net_version', default=set_net_version)
inference_opt = parser.parse_args()
set_net_version = inference_opt.set_net_version
network_name = inference_opt.network_name
use_saved_config = inference_opt.use_saved_config
if set_net_version == 'None':
    set_net_version = None

networks_path = os.path.join('trained_nets', network_name)
block_path0H = os.path.join('trained_nets', 'feathers_conv3_4', 'checkpoints',
                            'block1_dynamic_net.pth')
block_path0L = os.path.join('trained_nets', 'feathers_conv3_4_0',
                            'checkpoints', 'block1_dynamic_net.pth')
block_path1H = os.path.join('trained_nets', 'feathers_res3_2', 'checkpoints',
                            'block0_dynamic_net.pth')
Ejemplo n.º 10
0
    set_random_seed(_env['seed'])

    project_name = _root.split("/")[-1]
    run_name = (f"{_model['name']}_{_model['size']}-"
                f"lr_{_training['lr']}-bsz_{_training['batch_size']}-"
                f"seed_{_env['seed']}")
    now = datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss')

    tokenizer = get_tokenizer(_model['name'], _model['size'])

    train_dataset = CustomDataset(_root, 'train', tokenizer, _training["max_len"])
    dev_dataset = CustomDataset(_root, 'dev', tokenizer, _training["max_len"])

    Model = get_model_class(_model['name'])
    Opt = get_optim_class(_model['opt'])
    Loss_fn = get_loss_fn_class(_model['loss'])
    model = Model(n_outputs=train_dataset.n_outputs, size=_model['size'],
                  pretrained_model_path=str2bool(_model['pretrained_model_path']))

    metric_dic = {
        "acc": Accuracy(),
        "precision": Precision()
    }
    callbacks = [
        ModelCheckpoint(f"{_save_model_root}/{run_name}.pth", monitor='dev_loss', mode="min")
    ]

    trainer = Trainer(model=model, loss_fn_class=Loss_fn, optimizer_class=Opt, metrics=metric_dic)
    trainer.fit(train_dataset, dev_dataset, lr=_training['lr'], epochs=_training['epochs'],
                batch_size=_training['batch_size'], callbacks=callbacks)
Ejemplo n.º 11
0
# inference configurations #
network_name = 'on_white_II'
use_saved_config = False  # use the configuration saved at training time (if saved)
set_net_version = 'dual'  # None/normal/dual, set to None if you want to use saved config file
alpha_0s = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]  # alpha_0 values for normal version
alpha_1s = [None]  # alpha_1 values for normal version (if None alpha_0=alpha_1=alpha_2)
alpha_2s = [None]  # alpha_2 values for normal version (if None alpha_0=alpha_1=alpha_2)
alpha_0s_dual = [-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1] + alpha_0s  # alpha_0 values for dual version
alpha_1s_dual = [None]  # alpha_1 values for dual version (if None alpha_0=alpha_1=alpha_2)
alpha_2s_dual = [None]  # alpha_2 values for dual version (if None alpha_0=alpha_1=alpha_2)
# ------------------------ #

parser = argparse.ArgumentParser()
parser.add_argument('--network_name', default=network_name)
parser.add_argument('--use_saved_config', default=use_saved_config, type=lambda x:bool(utils.str2bool(x)))
parser.add_argument('--set_net_version', default=set_net_version)
inference_opt = parser.parse_args()
set_net_version = inference_opt.set_net_version
network_name = inference_opt.network_name
use_saved_config = inference_opt.use_saved_config
if set_net_version == 'None':
    set_net_version = None

networks_path = os.path.join('trained_nets', network_name)
model_path = os.path.join(networks_path, 'model_dir', 'dynamic_net.pth')
config_path = os.path.join(networks_path, 'config.txt')
inference_images_path = os.path.join('images', 'inference_images')
save_path = os.path.join('results', 'inference_results', network_name)
if not os.path.exists(save_path):
    utils.make_dirs(save_path)
Ejemplo n.º 12
0
def parse_config(config_fname: str,
                 strict_cast: bool = True,
                 verbose: bool = True,
                 **kwargs) -> munch.Munch:
    """Parse the given configuration file with additional options to overwrite.
    Parameters
    ----------
    config_fname: str
        A configuration file defines the structure of the configuration.
        The file should be serialized by any of [yaml, json, pickle, torch].
    strict_cast: bool, optional, default=True
        If True, the overwritten config values will be casted as the
        original type.
    verbose: bool, optional, default=False
    kwargs: optional
        If specified, overwrite the current configuration by the given keywords.
        For the multi-depth configuration, "__" is used
        for the default delimiter.
        The keys in kwargs should be already defined by config_fname
         (otherwise it will raise KeyError).
        Note that if `strict_cast` is True, the values in kwargs will be casted
         as the original type defined in the configuration file.
    Returns
    -------
    config: munch.Munch
        A configuration file, which provides attribute-style access.
        See `Munch <https://github.com/Infinidat/munch>` project
        for the details.
    Examples
    --------
    >>> # simple_config.json => {"opt1": {"opt2": 1}, "opt3": 0}
    >>> config = parse_config('simple_config.json')
    >>> print(config.opt1.opt2, config.opt3, type(config.opt1.opt2),
              type(config.opt3))
    2 1 <class 'int'> <class 'int'>
    >>> config = parse_config('simple_config.json', opt1__opt2=2, opt3=1)
    >>> print(config.opt1.opt2, config.opt3, type(config.opt1.opt2),
              type(config.opt3))
    2 1 <class 'int'> <class 'int'>
    >>> parse_config('test.json', **{'opt1__opt2': '2', 'opt3': 1.0})
    >>> print(config.opt1.opt2, config.opt3, type(config.opt1.opt2),
              type(config.opt3))
    2 1 <class 'int'> <class 'int'>
    >>> parse_config('test.json', **{'opt1__opt2': '2', 'opt3': 1.0},
                     strict_cast=False)
    >>> print(config.opt1.opt2, config.opt3, type(config.opt1.opt2),
              type(config.opt3))
    2 1.0 <class 'str'> <class 'float'>
    """
    base_config = _loader('configs/base.yaml', verbose)
    additional_config = _loader(config_fname, verbose)

    config = _update_dict_with_another(base_config, additional_config)

    for arg_key, arg_val in kwargs.items():
        if arg_val == DEFAULT_CONFIG_STR:
            continue
        if strict_cast:
            typecast = type(_recursively_get_value_dict(config, arg_key))
            if typecast is bool and isinstance(arg_val, str):
                arg_val = str2bool(arg_val)
            _recursively_set_value_dict(config, arg_key, typecast(arg_val))
        else:
            _recursively_set_value_dict(config, arg_key, arg_val)

    config = munch.munchify(config)
    _print(config, verbose)
    return config
Ejemplo n.º 13
0
from corsheaders.defaults import default_headers, default_methods

from utils.utils import str2bool

# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = str2bool(os.environ.get("DEBUG", "True"))

ALLOWED_HOSTS = ["*"]

# Application definition

DJANGO_APPS = (
    "django.contrib.auth",
    "django.contrib.admin",
    "django.contrib.contenttypes",
    "django.contrib.sessions",
    "django.contrib.messages",
    "django.contrib.staticfiles",
    # Sitemap
    "django.contrib.sitemaps",
)
Ejemplo n.º 14
0
    def get(self, request):
        response = {
            "meta": {
                "cities": 0,
                "utilities": 0,
                "locations": 0
            },
            "locations": [],
            "utilities": [],
            'cities': []
        }

        sources = request.query_params.get('sources', '').split(',')

        # filter for locations
        if 'locations' in sources or not len(sources):

            queryset = app_models.location.objects.all()

            for location in queryset:

                if app_models.data.objects.filter(location=location,
                                                  score__gt=0).exists():
                    data = app_models.data.objects.filter(
                        location=location, score__gt=0).latest('timestamp')
                    response["locations"].append({
                        "fips_state_id":
                        location.fips_state,
                        "fips_county_id":
                        location.fips_county,
                        "major_city":
                        location.major_city,
                        "state":
                        location.state,
                        "county":
                        location.county,
                        "zipcode":
                        location.zipcode,
                        "population_served":
                        location.population_served,
                        "score":
                        round(float(data.score), 2),
                    })

                response["meta"]["locations"] = len(response["locations"])

        # filter for news
        if 'news' in sources or not len(sources):
            queryset = news_models.location.objects.all()
            if request.query_params.get('fips_state'):
                queryset.filter(
                    fips_state=request.query_params.get('fips_state'))

            if request.query_params.get('fips_county'):
                queryset.filter(
                    fips_state=request.query_params.get('fips_county'))

            if request.query_params.get('status'):
                queryset.filter(
                    status=str2bool(request.query_params.get('status')))

            response["meta"]["cities"] = queryset.count()
            for news in queryset:
                response["cities"].append({
                    "fips_state_id":
                    news.fips_state,
                    "fips_county_id":
                    news.fips_county,
                    "zipcode":
                    news.zipcode,
                    "name":
                    news.city,
                    "county":
                    news.county,
                    "status":
                    news.status,
                    "long":
                    news.position.x if news.position else '',
                    "lat":
                    news.position.y if news.position else '',
                })

        # filter for utilities
        if 'utilities' in sources or not len(sources):

            queryset = Q()
            if request.query_params.get('violation'):
                queryset &= Q(
                    violation=str2bool(request.query_params.get('violation')))

            response["meta"]["utilities"] = news_models.utility.objects.filter(
                queryset).count()
            for utility in news_models.utility.objects.filter(queryset):

                counties_served = []
                for county in news_models.county_served.objects.filter(
                        utility=utility):
                    counties_served.append({
                        "fips_state_id": county.location.fips_state,
                        "fips_county_id": county.location.fips_county,
                        "name": county.location.name
                    })

                response["utilities"].append({
                    "name":
                    utility.name,
                    "has_contaminats":
                    utility.has_contaminats,
                    "url":
                    utility.link,
                    "long":
                    utility.position.x if utility.position else '',
                    "lat":
                    utility.position.y if utility.position else '',
                    "violation":
                    utility.violation,
                    "violation_points":
                    utility.voilation_points,
                    "people_served":
                    utility.people_served,
                    "counties_served":
                    counties_served
                })

        return Response(response)