Esempio n. 1
0
def test_save_model(model_name: str, fake_model: Model):
    fake_model.__asf_model_name = f"{model_name}:some_old_tag"

    save_model(fake_model, "test_save_model")
    model = load_model(f"{model_name}:test_save_model")

    verify_model_layer_equality(model, fake_model)
Esempio n. 2
0
def main(folder: str) -> None:
    full_path = os.path.join("datasets", folder)
    model = load_model("AI_FCN_512")
    remove_64(full_path)
    run_gui(folder, model)
    print("Finishing up...")
    break_up_images(full_path)
Esempio n. 3
0
def train_wrapper(args: Namespace) -> None:
    """ Function for training a network. """
    model_name = args.model
    if args.cont:
        model = load_model(model_name)
        history = model.__asf_model_history
        weights = model.get_weights()
        lr_schedule = ExponentialDecay(9.2e-4,
                                       decay_steps=2000,
                                       decay_rate=0.96,
                                       staircase=True)
        # optimizer = model.optimizer
        model.compile(loss=jaccard_distance_loss,
                      optimizer=Adam(learning_rate=lr_schedule),
                      metrics=['accuracy', MeanIoU(num_classes=2)])
        model.set_weights(weights)
    #     model.compile(
    #         loss='binary_crossentropy', optimizer='adam', metrics=["accuracy"]
    # )
    else:
        model_path = path_from_model_name(model_name)
        if not args.overwrite and os.path.isfile(model_path):
            print(f"File {model_name} already exists!")
            return

        # model = create_model_masked(model_name)
        model = create_cdl_model_masked(model_name)
        history = {'loss': [], 'accuracy': [], "mean_io_u": []}

    train_model(model, history, args.dataset, args.epochs)
Esempio n. 4
0
def predict(config_filename, test_data, vocabulary, char_set, targets,
            additionals, rev_categories):
    config = Config()
    config.load(config_filename)

    use_cuda = torch.cuda.is_available()
    model, _ = load_model(config.model_filename, config_filename, use_cuda)
    model.eval()

    competition = config.competition
    task_type = config.task_type
    task_key = competition + "-" + task_type
    test_batches = get_batches(test_data.reviews, vocabulary, char_set, 1,
                               config.model_config.word_max_count,
                               config.model_config.char_max_word_length,
                               targets[task_key], additionals[task_key],
                               config.model_config.use_pos)
    new_reviews = []
    for review, batch in zip(test_data.reviews, test_batches):
        predictions = model.predict(batch)

        if config.model_config.is_sequence_predictor:
            length = sum(
                [int(elem != 0) for elem in batch.word_indices[0].data])
            if model.config.use_crf:
                review_pred = predictions[0][:length]
            else:
                review_pred = predictions[0, :length].cpu()
        else:
            review_pred = predictions[0].cpu().item()
        new_review = get_new_review(review, review_pred, competition,
                                    task_type, rev_categories,
                                    config.model_config.is_sequence_predictor)
        new_reviews.append(new_review)

    if competition == "imdb":
        csv = "id,sentiment\n"
        for review in new_reviews:
            csv += str(review.rid) + "," + str(review.sentiment) + "\n"
        with open(config.output_filename, "w", encoding='utf-8') as f:
            f.write(csv)

    if competition == "sst2" or competition == "sst1":
        text = ""
        for review in new_reviews:
            text += str(review.sentiment) + " " + review.text + "\n"
        with open(config.output_filename, "w", encoding='utf-8') as f:
            f.write(text)

    if competition == "semeval" or competition == "sentirueval":
        xml = '<?xml version="1.0" ?>\n'
        xml += '<Reviews>\n' if competition == "semeval" else "<reviews>\n"
        for review in new_reviews:
            xml += review.to_xml()
        xml += '</Reviews>\n' if competition == "semeval" else "</reviews>\n"
        with open(config.output_filename, "w", encoding='utf-8') as f:
            f.write(xml)
Esempio n. 5
0
def test_save_model_with_history(model_name: str, fake_model: Model,
                                 new_history: History):
    fake_model.__asf_model_name = f"{model_name}:some_old_tag"

    save_model(fake_model, "test_save_model", history=new_history)
    model = load_model(f"{model_name}:test_save_model")

    verify_model_layer_equality(model, fake_model)
    assert model.__asf_model_history == new_history
Esempio n. 6
0
    def __prepare(self):
        num_classes = len(self.classes)
        self.tf_model, self.input_details, self.output_details = load_model(
            self.model_path, self.batch_size, self.image_size, num_classes)

        self.list_nude_images_paths = get_images_from_folder(
            self.nudes_image_folder, self.image_extention)
        self.list_non_nude_images_paths = get_images_from_folder(
            self.non_nudes_image_folder, self.image_extention)

        self.df = pd.DataFrame(columns=['real_label'] + self.classes)
Esempio n. 7
0
def test_wrapper(args: Namespace) -> None:
    model_name = args.model
    model = load_model(model_name)

    if args.edit:
        predictions, data_iter, metadata = test_model_masked(
            model, args.dataset, args.edit)
        edit_predictions(predictions, data_iter, metadata)
    else:
        predictions, test_iter = test_model_masked(model, args.dataset,
                                                   args.edit)
        plot_predictions(predictions, test_iter)
Esempio n. 8
0
def create_water_mask(model_path: str,
                      vv_path: str,
                      vh_path: str,
                      outfile: str,
                      verbose: int = 0):
    if not os.path.isfile(vv_path):
        raise FileNotFoundError(f"Tiff '{vv_path}' does not exist")

    if not os.path.isfile(vh_path):
        raise FileNotFoundError(f"Tiff '{vh_path}' does not exist")

    def get_tiles(img_path):
        f = gdal.Open(img_path)
        img_array = f.ReadAsArray()
        original_shape = img_array.shape
        n_rows, n_cols = get_tile_dimensions(*original_shape, tile_size=dems)
        padded_img_array = pad_image(img_array, dems)
        invalid_pixels = np.nonzero(padded_img_array == 0.0)
        img_tiles = stride_tile_image(padded_img_array)
        return img_tiles, n_rows, n_cols, invalid_pixels, f.GetProjection(
        ), f.GetGeoTransform()

    # Get vv tiles
    vv_tiles, vv_rows, vv_cols, vv_pixels, vv_projection, vv_transform = get_tiles(
        vv_path)

    # Get vh tiles
    vh_tiles, vh_rows, vh_cols, vh_pixels, vh_projection, vh_transform = get_tiles(
        vh_path)

    model = load_model(model_path)

    # Predict masks
    masks = model.predict(np.stack((vv_tiles, vh_tiles), axis=3),
                          batch_size=1,
                          verbose=verbose)

    masks.round(decimals=0, out=masks)

    # Stitch masks together
    mask = masks.reshape((vv_rows, vv_cols, dems, dems)) \
        .swapaxes(1, 2) \
        .reshape(vv_rows * dems, vv_cols * dems)  # yapf: disable

    mask[vv_pixels] = 0
    write_mask_to_file(mask, outfile, vv_projection, vv_transform)

    # Needed?
    f = None
Esempio n. 9
0
def train_wrapper(args: Namespace) -> None:
    """ Function for training a network. """
    model_name = args.model
    if args.cont:
        model = load_model(model_name)
        history = model.__asf_model_history
    else:
        model_path = path_from_model_name(model_name)
        if not args.overwrite and os.path.isfile(model_path):
            print(f"File {model_name} already exists!")
            return

        model = create_model_masked(model_name)
        history = {"loss": [], "acc": [], "val_loss": [], "val_acc": []}

    train_model(model, history, args.dataset, args.epochs)
Esempio n. 10
0
def main(model_path: str,
         vv_path: str,
         vh_path: str,
         outfile: str,
         verbose: int = 0):

    if not os.path.isfile(vv_path):
        raise FileNotFoundError(f"Tiff '{vv_path}' does not exist")

    if not os.path.isfile(vh_path):
        raise FileNotFoundError(f"Tiff '{vh_path}' does not exist")

    # Get vv tiles
    f = gdal.Open(vv_path)
    vv_array = f.ReadAsArray()
    original_shape = vv_array.shape
    n_rows, n_cols = get_tile_dimensions(*original_shape, tile_size=dems)
    vv_array = pad_image(vv_array, dems)
    invalid_pixels = np.nonzero(vv_array == 0.0)

    vv_tiles = tile_image(vv_array)

    # Get vh tiles
    f = gdal.Open(vh_path)
    vh_array = pad_image(f.ReadAsArray(), dems)

    vh_tiles = tile_image(vh_array)

    model = load_model(model_path)
    # Predict masks
    masks = model.predict(np.stack((vh_tiles, vv_tiles), axis=3),
                          batch_size=1,
                          verbose=verbose)
    masks.round(decimals=0, out=masks)
    # Stitch masks together
    mask = masks.reshape((n_rows, n_cols, dems, dems)) \
                .swapaxes(1, 2) \
                .reshape(n_rows * dems, n_cols * dems)  # yapf: disable

    mask[invalid_pixels] = 0
    write_mask_to_file(mask, outfile, f.GetProjection(), f.GetGeoTransform())

    f = None
Esempio n. 11
0
def view_filters(model_name):
    model = load_model(model_name)
    for layer in model.layers:
        if not isinstance(layer, Conv2D):
            continue

        weights, biases = layer.get_weights()

        filter_width, filter_height, channels, num_filters = weights.shape

        nrows = math.ceil(math.sqrt(num_filters))
        ncols = math.ceil(num_filters / nrows)

        plt.figure(layer.name)
        plt.suptitle(f"Network layer: {layer.name}")
        x1w = weights[:, :, 0, :]
        for i in range(num_filters):
            plt.subplot(nrows, ncols, i + 1)
            plt.imshow(x1w[:, :, i], interpolation="nearest", cmap="gray")
    plt.show()
Esempio n. 12
0
def test_wrapper(args: Namespace) -> None:
    model_name = args.model
    model = load_model(model_name)

    if model_type(model) != dataset_type(args.dataset):
        print("ERROR: This dataset is not compatible with your model")
        return
    if dataset_type(args.dataset) == ModelType.MASKED:
        predictions, test_iter = test_model_masked(model, args.dataset)
        plot_masked_predictions(predictions, test_iter, args.dataset)
    else:

        details, confusion_matrix = test_model_binary(model, args.dataset)

        model_dir = os.path.dirname(path_from_model_name(model_name))
        with open(os.path.join(model_dir, 'results.csv'), 'w') as f:
            write_dict_to_csv(details, f)

        plot_confusion_chart(confusion_matrix)
        plot_predictions(details['Percent'], args.dataset)
Esempio n. 13
0
def train_wrapper(args: Namespace) -> None:
    """Function for training a network"""
    data_type = dataset_type(args.dataset)
    model_name = args.model
    if args.cont:
        model = load_model(model_name)
        history = model.__asf_model_history
    else:
        model_path = path_from_model_name(model_name)
        if not args.overwrite and os.path.isfile(model_path):
            print(f"File {model_name} already exists!")
            return

        model = create_model(model_name, data_type)
        history = {"loss": [], "acc": [], "val_loss": [], "val_acc": []}

    if model_type(model) != data_type:
        print("ERROR: This dataset is not compatible with your model")
        return

    train_model(model, history, args.dataset, args.epochs)
Esempio n. 14
0
def mkdata_wrapper(args: Namespace) -> None:

    etl_wm()
    setup_data(args.size)
    dataset_fpath = f"syntheticTriainingData{date.isoformat(date.today())}"
    dataset_dir = os.path.join('datasets', args.directory)
    model_name = args.model
    model = load_model(model_name)

    if args.environment:
        final_dataset_fpath = os.path.join(
            dataset_dir, f'{args.dataset}_{args.environment}')
        dataset = os.path.join(args.directory,
                               f'{args.dataset}_{args.environment}')
    else:
        final_dataset_fpath = os.path.join('datasets', args.dataset)
        dataset = args.dataset

    if not os.path.isdir(dataset_dir):
        os.mkdir(dataset_dir)

    if not os.path.isdir(final_dataset_fpath):
        os.mkdir(final_dataset_fpath)

    for folder in os.listdir(dataset_fpath):
        for img in os.listdir(os.path.join(dataset_fpath, folder)):
            os.rename(os.path.join(dataset_fpath, folder, img),
                      os.path.join(final_dataset_fpath, img))
    shutil.rmtree(dataset_fpath)
    move_imgs(final_dataset_fpath)
    prepare_data(final_dataset_fpath, 0.2)

    predictions, data_iter, metadata = test_model_masked(model,
                                                         dataset,
                                                         edit=True)
    edit_predictions(predictions, data_iter, metadata)
Esempio n. 15
0
 def __init__(self, model_path, image_paths, mode='eval'):
     self.model = load_model(model_path, mode)
     self.image_paths = image_paths
     self.feature = []
     self.r = randint(0, len(image_paths))
Esempio n. 16
0
import time

import cv2
import torch
from PIL import Image
from torch.nn import Softmax

from src.camera import get_camera_capture
from src.display import setup_window, add_label, is_quit_key, is_fullscreen_key, switch_fullscreen_mode
from src.gestures import Gestures
from src.image_processing import get_transform
from src.model import load_model, init_buffer
from src.prediction_smoothing import PredictionSmoothing

if __name__ == "__main__":
    model = load_model()
    model.eval()

    cap = get_camera_capture(0, 320, 240)

    full_screen = False
    window_name = 'Video Gesture Recognition'
    setup_window(window_name)

    transform = get_transform()
    shift_buffer = init_buffer()
    gestures = Gestures()
    prediction_smoothing = PredictionSmoothing(7)
    softmax = Softmax(1)

    while True:
Esempio n. 17
0

def server_info(token):
    servers_names, servers_uuid = sa.get_server_list(token)
    # TODO : UI ask specific server name
    print("PLEASE SELECT SERVER NAME")
    print(servers_names)
    index = int(input())
    server = sa.get_server_info(token, servers_uuid[index])
    print(server['server'])
    return servers_uuid[index]
    #return data


def get_resource_info(token, uuid):
    print("get_resource_info")
    res = sa.get_resource_list(token, uuid)
    print(res)
    return sa.get_mesuare_list(token, res)


if __name__ == '__main__':
    token = login('admin', 'devstack')
    instance_uuid = server_info(token)
    cpu, memory, disk = get_resource_info(token, instance_uuid)
    print(cpu, memory, disk)
    model = sm.load_model('model/model')
    print("PLEASE INPUT RATING DATA")
    rating = int(input())
    sm.predict(cpu, memory, disk, rating, model)
Esempio n. 18
0
cors = CORS(
            app, 
            resources={
                r"/*": {"origin": "*"},
                r"/stackUpdate/*": {"origin": "*"},
                r"/login/*": {"origin": "*"},
                r"/instanceInfo/*": {"origin": "*"},
                r"/setAlarm/*": {"origin": "*"}
            }
        )

sess = tf.Session()
graph = tf.get_default_graph()
set_session(sess)
model = om.load_model('model/model')
set_session(sess)

@app.route('/', methods=['POST'])
def hello_world():
    return 'Hello world'

# 로그인 
@app.route("/login", methods=['POST'])
def login():
    """Login Form"""
    print("/login  <- ")
    print(request.get_json())
    data = request.get_json()
    id = data['id']
    password = data['password']
Esempio n. 19
0
def main():
    torch.manual_seed(317)
    torch.backends.cudnn.benckmark = True

    train_logger = Logger(opt, "train")
    val_logger = Logger(opt, "val")

    start_epoch = 0
    print('Creating model...')
    model = get_model(opt.arch, opt.heads).to(opt.device)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    criterion = CtdetLoss(opt)

    print('Loading model...')
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(
            model, opt.load_model, optimizer, opt.lr, opt.lr_step)
    model = torch.nn.DataParallel(model)

    # amp
    scaler = GradScaler()

    print('Setting up data...')
    train_dataset = Dataset(opt, 'train')
    val_dataset = Dataset(opt, 'val')

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=16,
        pin_memory=True,
        drop_last=True
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=1,
        shuffle=False,
        num_workers=1,
        pin_memory=True
    )
    # cal left time
    time_stats = TimeMeter(opt.num_epochs, len(train_loader))

    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        print('train...')
        train(model, train_loader, criterion, optimizer,
              train_logger, opt, epoch, scaler, time_stats)

        if epoch % opt.val_intervals == 0:
            print('val...')
            val(model, val_loader, criterion, val_logger, opt, epoch)
            save_model(os.path.join(opt.save_dir, f'model_{epoch}.pth'),
                       epoch, model, optimizer)

        # update learning rate
        if epoch in opt.lr_step:
            lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

    # without optimizer
    save_model(os.path.join(opt.save_dir, 'model_final.pth'), epoch, model)
Esempio n. 20
0
from data_representation import DeepMatcherProcessor, QqpProcessor
from logging_customized import setup_logging
from src.data_loader import load_data, DataType
from src.model import load_model
from src.prediction import predict
from torch_initializer import initialize_gpu_seed

setup_logging()

if __name__ == "__main__":
    args = read_arguments_prediction()

    device, n_gpu = initialize_gpu_seed(args.seed)

    model, tokenizer = load_model(
        os.path.join(args.model_output_dir, args.trained_model_for_prediction),
        args.do_lower_case)
    model.to(device)

    if tokenizer:
        logging.info("Loaded pretrained model and tokenizer from {}".format(
            args.trained_model_for_prediction))
    else:
        tokenizer = BertTokenizer.from_pretrained(
            args.model_name_or_path, do_lower_case=args.do_lower_case)
        logging.info(
            "Loaded pretrained model from {} but no fine-tuned tokenizer found, therefore use the standard tokenizer."
            .format(args.trained_model_for_prediction))

    if args.data_processor == "QqpProcessor":
        processor = QqpProcessor()
                                      total_epoch=WARMUP_EPOCHS,
                                      after_scheduler=Da_scheduler_cos)

Db_optimizer = optim.Adam(Db.parameters(), lr=D_MAX_LR, betas=(0.5, 0.999))
Db_scheduler_cos = CosineAnnealingWarmRestarts(Db_optimizer,
                                               T_0=TOTAL_EPOCHS,
                                               T_mult=1,
                                               eta_min=D_MIN_LR)
Db_scheduler = GradualWarmupScheduler(Db_optimizer,
                                      multiplier=1,
                                      total_epoch=WARMUP_EPOCHS,
                                      after_scheduler=Db_scheduler_cos)

if LOAD_MODEL_EPOCH:
    G, GF_optimizer, GF_scheduler = load_model(G, GF_optimizer, GF_scheduler,
                                               MODEL_G_DIR, LOAD_MODEL_EPOCH,
                                               DEVICE)
    F, GF_optimizer, GF_scheduler = load_model(F, GF_optimizer, GF_scheduler,
                                               MODEL_F_DIR, LOAD_MODEL_EPOCH,
                                               DEVICE)
    Da, Da_optimizer, Da_scheduler = load_model(Da, Da_optimizer, Da_scheduler,
                                                MODEL_DA_DIR, LOAD_MODEL_EPOCH,
                                                DEVICE)
    Db, Db_optimizer, Db_scheduler = load_model(Db, Db_optimizer, Db_scheduler,
                                                MODEL_DB_DIR, LOAD_MODEL_EPOCH,
                                                DEVICE)

G.to(DEVICE)
F.to(DEVICE)
Da.to(DEVICE)
Db.to(DEVICE)
Esempio n. 22
0
def test_load_model(model_name: str, fake_model: Model):
    with mock.patch('src.model.kload_model', return_value=fake_model):
        model = load_model(model_name)

    assert model.__asf_model_name == model_name
    assert model.__asf_model_history
Esempio n. 23
0
from typing import Dict, Union

from fastapi import FastAPI
from pydantic import BaseModel

from src.model import load_model, model_predict

model, tokenizer = load_model()
app = FastAPI()


class Ticket(BaseModel):
    message: str


@app.post("/ticket_support_classification")
def classify_ticket(ticket: Ticket) -> Dict[str, Union[str, int]]:
    assert isinstance(ticket.message, str)
    return {
        "ticket_message": ticket.message,
        "ticket_category": int(model_predict(model, tokenizer, [ticket.message])),
    }
Esempio n. 24
0
def test_wrapper(args: Namespace) -> None:
    model_name = args.model
    model = load_model(model_name)

    # if args.edit:
    #     predictions, data_iter, metadata = test_model_masked(
    #         model, args.dataset, args.edit
    #     )
    #     edit_predictions(
    #         predictions, data_iter, metadata
    #     )
    # else:
    predictions, test_batch_metadata, input_metadata = test_model_timeseries(
        model, args.dataset, args.edit)

    model_batch_size = len(test_batch_metadata[0])
    current_date_time = str(datetime.utcnow())

    # Version number _ Country _ Region
    model_name_metadata = model_name.split("_")

    metadata = {
        "model_test_info": {
            "name": model_name,
            "model_architecture_version": model_name_metadata[0],
            "dataset": args.dataset,
            "batch_size": model_batch_size,
            "UTC_date_time": current_date_time
        }
    }

    prediction_directory_name = "{0}_{1}_{2}".format(model_name, args.dataset,
                                                     current_date_time)

    os.mkdir("predictions/{0}".format(prediction_directory_name))

    # {
    # model_info: {}
    # batch_x [
    #    [
    #    {
    #    sample_x: {
    #            timesteps: [
    #                [
    #                    test/WA_2018/S1A_ulx_0000_uly_0000.tif
    #                ]
    #            ]
    #        }
    # }
    #    ]
    # ]
    # }
    print(f"length of metadata: {len(test_batch_metadata)}")
    print(f"length of predictions: {len(predictions)}")

    # fixed = False
    # for batches
    non_blank_predictions = 0
    for idx, batch in enumerate(test_batch_metadata):
        metadata["batch_{0}".format(idx)] = []
        samples = []
        # for sample in batch
        for idy, sample in enumerate(batch):
            current_prediction_idx = idx * model_batch_size + idy
            current_subdataset = sample[0][0].split("/")[
                1]  #IE: WA_2018, AK_2020
            prediction_subdataset_name = f"predictions/{prediction_directory_name}/{current_subdataset}"

            if not os.path.isdir(prediction_subdataset_name):
                os.mkdir(prediction_subdataset_name)

            # if len(predictions) > current_prediction_idx:
            image = predictions[current_prediction_idx]
            image = np.array(image[:, :, 0].reshape(NETWORK_DEMS, NETWORK_DEMS,
                                                    1)).astype(dtype=np.uint8)

            # if np.ptp(image) != 0:
            # img_0 = array_to_img(image)
            prediction_frame = "_".join(sample[0][0].split("_")[-4:])
            filename = f"{prediction_subdataset_name}/CDL_{current_subdataset}_prediction_{prediction_frame}"
            dataset_path_to_sample = f"datasets/{args.dataset}/{sample[0][0]}"
            # filename_0 = "predictions/{0}/batch_{1}/batch_{1}_sample_{2}.tif".format(prediction_directory_name, batch_index, idy % model_batch_size)
            save_img(filename, dataset_path_to_sample, image)
            img_0 = array_to_img(image)
            # img_0.save(filename.replace(".tif", ".png"))
            non_blank_predictions += 1

            timeseries_sample = {}
            timeseries_sample["timesteps"] = sample
            # The name of the prediction produced by this sample
            prediction_file_name = f"predictions/{prediction_directory_name}/batch_{idx}/_sample{idy}"
            sample_data = {
                f"sample_{idy}": timeseries_sample,
                "prediction": prediction_file_name
            }
            samples.append(sample_data)

        metadata[f"batch_{idx}"].append(samples)

    with open(
            'predictions/{0}/{1}_{2}_batch_metadata_{3}.json'.format(
                prediction_directory_name, model_name, args.dataset,
                current_date_time), 'w') as fp:
        json.dump(metadata, fp, indent=4)

    print("samples:" + str(len(predictions * model_batch_size)))

    # for idx in range(len(test_batch_metadata)):
    #     os.mkdir("predictions/{0}/batch_{1}".format(prediction_directory_name, idx))
    # set to -1 to account for 0 mod 4 = 0 in batch_indexing
    # print(len(predictions))
    # print(f"Sample Shape: {predictions[0].shape}")
    # batch_index = 0
    # non_blank_predictions = 0

    # for idy, image in enumerate(predictions):
    #     if idy % model_batch_size == 0 and idy != 0:
    #         batch_index += 1

    #     for idz in range(image.shape[-1]):
    #         images = np.array(image[:, :, idz].reshape(NETWORK_DEMS, NETWORK_DEMS, 1))
    #         img = images.reshape(NETWORK_DEMS, NETWORK_DEMS, 1).astype(dtype=np.uint8)

    #         if np.ptp(img) != 0:
    #             img_0 = array_to_img(img)
    #             filename_0 = "predictions/{0}/batch_{1}/batch_{1}_sample_{2}.tif".format(prediction_directory_name, batch_index, idy % model_batch_size)
    #             img_0.save(filename_0)
    #             non_blank_predictions+=1
    print(
        f"Total non-blank predictions saved: {non_blank_predictions} out of {len(predictions)} predictions"
    )
Esempio n. 25
0
def print_summary(model_name):
    model = load_model(model_name)
    model.summary()
Esempio n. 26
0
from src import app
from src.model import load_model

if __name__ == "__main__":
    load_model()
    app.run()