Esempio n. 1
0
        def loss_from_params_1d(x):
            p = x[0]
            conv_layers = self.conv_layers_sets[int(
                p[param_keys['conv_layers']])]
            max_pool_set = self.max_pool_sets_1d[int(
                p[param_keys['max_pools_1d']])]
            fc_layers = self.fc_layer_sets[int(p[param_keys['fc']])]
            try:
                model = models.build_reference_1d_model_from_args(
                    args,
                    conv_width=int(p[param_keys['conv_width']]),
                    conv_layers=conv_layers,
                    #conv_dropout = float(p[param_keys['conv_dropout']]),
                    #conv_batch_normalize = bool(p[param_keys['conv_batch_normalize']]),
                    #spatial_dropout = bool(p[param_keys['spatial_dropout']]),
                    max_pools=max_pool_set,
                    padding='valid'
                    if bool(p[param_keys['valid_padding']]) else 'same',
                    fc_layers=fc_layers
                    #fc_dropout = float(p[param_keys['fc_dropout']])
                )

                if model.count_params() > args.max_parameters:
                    print('Model too big')
                    return np.random.uniform(
                        100, 10000
                    )  # this is ugly but optimization quits when loss is the same

                model = models.train_model_from_generators(
                    args, model, generate_train, generate_valid,
                    args.output_dir + args.id + '.hd5')
                loss_and_metrics = model.evaluate_generator(
                    generate_test, steps=args.validation_steps)
                stats['count'] += 1
                print('Loss:', loss_and_metrics[0], '\nCount:', stats['count'],
                      'iterations', args.iterations, 'init numdata:',
                      args.patience, 'Model size', model.count_params())
                print(self.str_from_params_and_keys(p, param_keys))
                if args.inspect_model:
                    image_name = args.id + '_hyper_' + str(
                        stats['count']) + '.png'
                    image_path = image_name if args.image_dir is None else args.image_dir + image_name
                    models.inspect_model(args,
                                         model,
                                         generate_train,
                                         generate_valid,
                                         image_path=image_path)

                limit_mem()
                return loss_and_metrics[0]
            except ValueError as e:
                print(
                    str(e) + '\n Impossible architecture perhaps? return 9e9')
                return np.random.uniform(
                    100, 10000
                )  # this is ugly but optimization quits when loss is the same
Esempio n. 2
0
        def loss_from_params_mlp(x):
            p = x[0]
            layer_set = self.mlp_layer_sets[int(p[param_keys['mlp_fc']])]
            try:
                model = models.annotation_multilayer_perceptron_from_args(
                    args,
                    fc_layers=layer_set,
                    dropout=float(p[param_keys['dropout']]),
                    skip_connection=bool(p[param_keys['annotation_shortcut']]),
                    batch_normalization=bool(
                        p[param_keys['batch_normalization']]),
                    batch_normalize_input=bool(
                        p[param_keys['batch_normalize_input']]))

                if model.count_params() > args.max_parameters:
                    print('Model too big')
                    return np.random.uniform(
                        100, 10000
                    )  # this is ugly but optimization quits when loss is the same

                model = models.train_model_from_generators(
                    args, model, generate_train, generate_valid,
                    args.output_dir + args.id + '.hd5')
                loss_and_metrics = model.evaluate_generator(
                    generate_test, steps=args.validation_steps)
                stats['count'] += 1
                print('Loss:', loss_and_metrics[0], '\nCount:', stats['count'],
                      'iterations', args.iterations, 'init numdata:',
                      args.patience, 'Model size', model.count_params())
                print(self.str_from_params_and_keys(p, param_keys))
                if args.inspect_model:
                    image_name = args.id + '_hyper_' + str(
                        stats['count']) + '.png'
                    image_path = image_name if args.image_dir is None else args.image_dir + image_name
                    models.inspect_model(args,
                                         model,
                                         generate_train,
                                         generate_valid,
                                         image_path=image_path)

                limit_mem()
                return loss_and_metrics[0]
            except ValueError as e:
                print(
                    str(e) + '\n Impossible architecture perhaps? return 9e9')
                return np.random.uniform(
                    100, 10000
                )  # this is ugly but optimization quits when loss is the same
        def hp_loss_from_params_2d_anno(x):
            try:
                model = models.read_tensor_2d_annotation_model_from_args(
                    args,
                    conv_width=int(x['conv_width']),
                    conv_height=int(x['conv_height']),
                    conv_layers=x['conv_layers'],
                    max_pools=x['max_pools_2d'],
                    padding='valid' if bool(x['valid_padding']) else 'same',
                    kernel_single_channel=bool(x['kernel_single_channel']),
                    annotation_units=int(x['annotation_units']),
                    annotation_shortcut=bool(x['annotation_shortcut']),
                    fc_layers=x['fc'])

                if model.count_params() > args.max_parameters:
                    print('Model too big')
                    return self.max_loss

                model = models.train_model_from_generators(
                    args, model, generate_train, generate_valid,
                    args.output_dir + args.id + '.hd5')
                loss_and_metrics = model.evaluate_generator(
                    generate_test, steps=args.patience)
                stats['count'] += 1
                print('Current architecture: ', self.string_from_arch_dict(x))
                print('Loss:', loss_and_metrics[0], '\nCount:', stats['count'],
                      'iterations', args.iterations, 'Model size',
                      model.count_params())
                if args.inspect_model:
                    image_name = args.id + '_hyper_' + str(
                        stats['count']) + '.png'
                    image_path = image_name if args.image_dir is None else args.image_dir + image_name
                    models.inspect_model(args,
                                         model,
                                         generate_train,
                                         generate_valid,
                                         image_path=image_path)

                del model

                return loss_and_metrics[0]

            except ValueError as e:
                print(str(e) + '\n Impossible architecture perhaps?')
                return self.max_loss
        def loss_from_params_mlp(x):
            try:
                model = models.annotation_multilayer_perceptron_from_args(
                    args,
                    fc_layers=layer_set,
                    #dropout = float(x['dropout']),
                    skip_connection=bool(x['shortcut']),
                    batch_normalization=bool(x['batch_normalization']),
                    batch_normalize_input=bool(x['batch_normalize_input']))

                if model.count_params() > args.max_parameters:
                    print('Model too big')
                    return self.max_loss

                model = models.train_model_from_generators(
                    args, model, generate_train, generate_valid,
                    args.output_dir + args.id + '.hd5')
                loss_and_metrics = model.evaluate_generator(
                    generate_test, steps=args.patience)
                stats['count'] += 1
                print('Current architecture: ', self.string_from_arch_dict(x))
                print('Loss:', loss_and_metrics[0], '\nCount:', stats['count'],
                      'iterations', args.iterations, 'Model size',
                      model.count_params())
                if args.inspect_model:
                    image_name = args.id + '_hyper_' + str(
                        stats['count']) + '.png'
                    image_path = image_name if args.image_dir is None else args.image_dir + image_name
                    models.inspect_model(args,
                                         model,
                                         generate_train,
                                         generate_valid,
                                         image_path=image_path)

                del model
                return loss_and_metrics[0]

            except ValueError as e:
                print(
                    str(e) + '\n Impossible architecture perhaps? return 9e9')
                return self.max_loss

                trials = hyperopt.Trials()
Esempio n. 5
0
        def loss_from_params_2d_anno(x):
            p = x[0]
            fc_layers = self.fc_layer_sets[int(p[param_keys['fc']])]
            conv_layers = self.conv_layers_sets[int(
                p[param_keys['conv_layers']])]
            max_pool_set = self.max_pool_sets_2d[int(
                p[param_keys['max_pools_2d']])]
            #residual_layers = self.residual_layers_sets[int(p[param_keys['residual_layers']])]

            try:
                print(self.str_from_params_and_keys(p, param_keys))
                model = models.read_tensor_2d_annotation_model_from_args(
                    args,
                    conv_width=int(p[param_keys['conv_width']]),
                    conv_height=int(p[param_keys['conv_height']]),
                    conv_layers=conv_layers,
                    max_pools=max_pool_set,
                    padding='valid'
                    if bool(p[param_keys['valid_padding']]) else 'same',
                    kernel_single_channel=bool(
                        p[param_keys['kernel_single_channel']]),
                    #annotation_units = int(p[param_keys['annotation_units']]),
                    annotation_shortcut=bool(
                        p[param_keys['annotation_shortcut']]),
                    fc_layers=fc_layers,
                )

                if model.count_params() > args.max_parameters:
                    print('Model too big')
                    return np.random.uniform(
                        100, 10000
                    )  # this is ugly but optimization quits when loss is the same

                model = models.train_model_from_generators(
                    args, model, generate_train, generate_valid,
                    args.output_dir + args.id + '.hd5')
                loss_and_metrics = model.evaluate_generator(
                    generate_test, steps=args.validation_steps)
                stats['count'] += 1
                print('Loss:', loss_and_metrics[0], '\nCount:', stats['count'],
                      'iterations', args.iterations, 'init numdata:',
                      args.patience, 'Model size', model.count_params())
                print(self.str_from_params_and_keys(p, param_keys))

                if args.inspect_model:
                    image_name = args.id + '_hyper_' + str(
                        stats['count']) + '.png'
                    image_path = image_name if args.image_dir is None else args.image_dir + image_name
                    models.inspect_model(args,
                                         model,
                                         generate_train,
                                         generate_valid,
                                         image_path=image_path)

                limit_mem()
                return loss_and_metrics[0]

            except ValueError as e:
                print(str(e) + '\n Impossible architecture perhaps?')
                return np.random.uniform(
                    100, 10000
                )  # this is ugly but optimization quits when loss is the same