def test_activations_source_image(self):
        important_layers = ['relu1_1', 'pool1', 'pool2', 'pool3', 'pool4']

        synthesis_model = utilities.load_model(PYTORCH_MODEL_PATH)

        # register hooks to extract the important activations
        hook = ActivationsHook()
        for name, layer in synthesis_model.named_children():
            if name in important_layers:
                layer.register_forward_hook(hook)

        # load an image and pass it through the synthesis model
        source_image = utilities.preprocess_image(
            utilities.load_image(SOURCE_IMG_PATH))

        synthesis_model(source_image)

        # check if they are correct
        with h5py.File(REF_VALS_PATH, 'r') as f:
            for i, layer_name in enumerate(important_layers):
                if layer_name == 'relu1_1':
                    layer_name = 'conv1_1'

                actual_activations = hook.activations[i]
                expected_activations = torch.from_numpy(
                    f['source_img_activations_{}'.format(layer_name)][()])

                assert torch.allclose(actual_activations,
                                      expected_activations,
                                      atol=1e-05)
    def test_activations_two_consecutive_runs(self):
        important_layers = ['relu1_1', 'pool1', 'pool2', 'pool3', 'pool4']

        # load an image
        source_image = utilities.preprocess_image(
            utilities.load_image(SOURCE_IMG_PATH))

        activations = []
        for j in range(2):
            # load a model and pass the image through it
            synthesis_model = utilities.load_model(PYTORCH_MODEL_PATH)

            # register hooks to extract the important activations
            hook = ActivationsHook()
            for name, layer in synthesis_model.named_children():
                if name in important_layers:
                    layer.register_forward_hook(hook)

            synthesis_model(source_image)

            # save activations from the last important layer
            activations.append(hook.activations[-1])

        assert torch.equal(activations[0], activations[1]), \
            'mean error: {}'.format(
                (activations[0] - activations[1]).abs().mean()
            )
    def test_gram_matrices_noise_image(self):
        important_layers = ['relu1_1', 'pool1', 'pool2', 'pool3', 'pool4']

        synthesis_model = utilities.load_model(PYTORCH_MODEL_PATH)

        # register hooks to extract the Gram matrices
        hook = GramHook()
        for name, layer in synthesis_model.named_children():
            if name in important_layers:
                layer.register_forward_hook(hook)

        # load an image and pass it through the synthesis model
        noise_image = None
        with h5py.File(REF_VALS_PATH, 'r') as f:
            noise_image = torch.from_numpy(f['noise1234'][()])

        synthesis_model(noise_image)

        # check if they are correct
        with h5py.File(REF_VALS_PATH, 'r') as f:
            for i, layer_name in enumerate(important_layers):
                if layer_name == 'relu1_1':
                    layer_name = 'conv1_1'

                gram_matrix = hook.gram_matrices[i]
                actual_gram_matrix = gram_matrix * gram_matrix.shape[0]

                expected_gram_matrix = torch.from_numpy(
                    f['noise1234_gram_{}'.format(layer_name)][()]).to(
                        torch.float32)

                assert torch.allclose(actual_gram_matrix,
                                      expected_gram_matrix,
                                      atol=1e-07)
Exemplo n.º 4
0
    def test_load_model(self):

        config = os.path.realpath('./configuration.ini')
        params = utils.parse_config(config)
        m = utils.load_model(params)

        #todo: make sure that this class is an instance of TestModel

        print 'done'
Exemplo n.º 5
0
    def test_load_model(self):

        config = os.path.realpath('./configuration.ini')
        params = utils.parse_config(config)
        m = utils.load_model(params)

        #todo: make sure that this class is an instance of TestModel

        print 'done'
    def test_bias(self):
        net = utilities.load_model(PYTORCH_MODEL_PATH)

        with h5py.File(REF_VALS_PATH, 'r') as f:
            for name, layer in net.named_children():
                if isinstance(layer, torch.nn.Conv2d):
                    actual_weight = layer.bias
                    expected_weight = torch.from_numpy(
                        f['{}.bias'.format(name)][()])

                    assert torch.equal(actual_weight, expected_weight)
Exemplo n.º 7
0
def predict(event, context):
    """Makes inference on the passed data."""
    df = utilities.load_dataframe_from_sqs_event(event)
    X = utilities.preprocess(df)

    model = utilities.load_model(MODEL_URI)
    y = model.predict(X)
    results = utilities.postprocess(X, y)

    msg = utilities.SQSMessage()
    msg.dataframe = results
    msg.send(queue=WRITER_QUEUE)
    return {
        "status": "success",
    }
Exemplo n.º 8
0
def predict(image_path, model_path, top_k=5, category_names=None):
    """
    Returns a set prediction of what flower the given input image is, 
    ordered descending on predicted probability.
    
    INPUTS
    image_path: path to image file
    model_path: path to saved prediction model. Assumes tensorflow model
    top_k: top K predictions to return
    category_names: Path to json mapping class numbers to flower names. If not specified, 
    the returned output only includes class numbers. 
    
    RETURNS
    classes: Classes of predicted flowers in descending order based on probabilities.
    Will be of integers if `category_names`is unspecified. Else, includes names of 
    predicted flowers. 
    probs: Proabilities of predicted classes in descending order. 
    """
    #process image from image_path
    image = process_image(image_path)

    #load the model from model_path
    model = load_model(model_path)

    #make predictions on the image with the model
    ps = model.predict(image)

    #sort indices from predictions descending based on probabilities
    psi = np.argsort(-ps)

    #get top K probabilities from classes
    classes = psi[0][:top_k]

    #use classes as indices to find the top K predictions
    probs = np.take(ps, classes)

    #adding 1 to index to get correct class values
    classes += 1

    #check if category names are specified and translate classes to class_names.
    if (category_names):
        class_names = open_json(category_names)
        class_str = [str(x) for x in classes]
        classes = [class_names.get(x) for x in class_str]

    return classes, probs
Exemplo n.º 9
0
    def __init__(
        self, path: str, device: torch.device, target_image: torch.Tensor,
        layer_weights: List[float] = [1e09, 1e09, 1e09, 1e09, 1e09],
        important_layers: List[str] = [
            'relu1_1', 'pool1', 'pool2', 'pool3', 'pool4'
        ]
    ):
        self.net = utilities.load_model(path).to(device).eval()
        self.device = device
        self.target_image = target_image.to(device)
        self.layer_weights = layer_weights
        self.important_layers = important_layers

        # extract Gram matrices of the target image
        gram_hook = GramHook()
        gram_hook_handles = []
        for name, layer in self.net.named_children():
            if name in self.important_layers:
                handle = layer.register_forward_hook(gram_hook)
                gram_hook_handles.append(handle)
        self.net(self.target_image)

        # register Gram loss hook
        self.gram_loss_hook = GramLossHook(
            gram_hook.gram_matrices, layer_weights, important_layers
        )
        for handle in gram_hook_handles:    # Gram hook is not needed anymore
            handle.remove()

        for name, layer in self.net.named_children():
            if name in self.important_layers:
                handle = layer.register_forward_hook(self.gram_loss_hook)

        # remove unnecessary layers
        i = 0
        for name, layer in self.net.named_children():
            if name == important_layers[-1]:
                break
            i += 1
        self.net = self.net[:(i + 1)]
Exemplo n.º 10
0
def main():
    in_arg = get_input_args()

    # initiate the variables passed through command line
    image_path = in_arg.image_path
    checkpoint_path = in_arg.checkpoint_path
    top_k = in_arg.top_k
    category_names = in_arg.category_names
    gpu = in_arg.gpu

    # Correct the variables if necessary to avoid incorrect calculations
    # Collect error messages what variables have been changed to what values
    error_messages = []

    if (top_k <= 0):
        top_k = 1
        error_messages.append("top_k was corrected to 1")
    elif (top_k > 5):
        top_k = 5
        error_messages.append("top_k was corrected to 5")

    if path.isfile(image_path) and path.isfile(
            checkpoint_path) and path.isfile(
                category_names):  # check if all files are existing

        # load the categoy names file which connects category indices with indices predicted by the model
        with open(category_names, 'r') as f:
            cat_to_name = json.load(f)

        # use the folder of the specified file as category index
        title_idx = image_path.split("/")[-2]

        # find the name by matching the category index with the indices in the category names file
        img_label = [v for k, v in cat_to_name.items() if k == title_idx]
        img_label = img_label[0]
        print(f"Image label: {img_label}")

        # use GPU power if available for prediction
        if gpu:
            device = torch.device(
                "cuda" if torch.cuda.is_available() else "cpu")
        else:
            device = "cpu"

        # load the model from the specified classifier path
        model = load_model(checkpoint_path)
        model = model.to(device)

        # freeze all model parameters to save speed and put model
        for param in model.parameters():
            param.requires_grad = False

        # switch to evaluation mode
        model.eval()

        # prepare the specified image so that it can be used by the model
        img = process_image(image_path)
        img = img[None, :, :, :]
        img = img.float()
        img = img.to(device)

        # deactive all gradients to further speed up the performance and calculate the log prob outputs
        with torch.no_grad():
            logps = model.forward(img)

        # convert to probability values
        ps = torch.exp(logps)

        # save the top probabilities and their category indices
        top_p, top_class = ps.topk(top_k, dim=1)
        top_p = np.array(top_p).reshape(top_k)
        top_class = np.array(top_class)

        # find the the category indices for the predicted top indices
        top_classes = [
            k for k, v in model.class_to_idx.items() if v in top_class
        ]
        # match the top category indices with their category names
        names = [cat_to_name[v] for v in top_classes if v in cat_to_name]

        for i, name in enumerate(names):
            print(f"{name}: ..... {format(top_p[i],'.2f')}")

    else:
        print("Incorrect paths to files - please check!")

    # print out error messages if any
    if (len(error_messages)):
        for v in error_messages:
            print(v)
Exemplo n.º 11
0
def run_model(dataloader, args):
    """
    Run a model in either training mode or testing mode. If the model exists, then the weights are loaded from this trained model.

    Arguments:
        dataloader (torch.utils.data.DataLoader) : contains the data used for training/testing.
        args (argparse.ArgumentParser) : command line arguments
        
    Returns:
        None
    """
    # Load the device on which we're running the model
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    ### Initialize network
    network = VAE()
    network.apply(network.initialize_weight)
    network.to(device)

    if args.load:
        load_model(network)
    else:
        if not os.path.exists("TrainedModel"):
            os.makedirs("TrainedModel")

    # Output directory might not exist yet
    if not os.path.exists("Output"):
        os.makedirs("Output")

    if args.mode == 'train':
        ### Create optimizer and scheduler

        # optimizer = torch.optim.SGD(
        #                     network.parameters(),
        #                     lr=args.lr,
        #                     momentum=0.9,
        #                     weight_decay=1e-4
        #                 )

        optimizer = torch.optim.Adam(network.parameters(),
                                     lr=args.lr,
                                     weight_decay=1e-4)

        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=[int(args.epochs / 3),
                        int(2 * args.epochs / 3)],
            gamma=0.1)

        epoch_history = []

        pytorch_total_params = sum(p.numel() for p in network.parameters()
                                   if p.requires_grad)
        print(f"Total of trainable parameters: {pytorch_total_params}")

        ### Run epochs
        for epoch in range(args.epochs):
            print(f"Starting Epoch [{epoch+1} / {args.epochs}]")

            epoch_loss, epoch_acc = run_epoch(network,
                                              optimizer,
                                              dataloader,
                                              mode=args.mode)

            ### Print statistics
            print("-" * 30)
            print(
                f"Epoch [{epoch + 1: >4}/{args.epochs}] Loss: {epoch_loss:.2e} Acc: {epoch_acc:.2e}"
            )
            print("-" * 30)

            ### Update the learning rate based on scheduler
            lr_scheduler.step()

            epoch_history.append(epoch_loss)

            ### Store model as backup
            if epoch % args.checkpoint_epochs == 0 and epoch > 0:
                torch.save(network.state_dict(),
                           "TrainedModel/modelBackup.pth")

                ### Create and Store Plots
                plt.figure(figsize=(12, 9))
                plt.plot(epoch_history, label='Loss History')

                plt.xlabel('Epoch')
                plt.ylabel('Loss')
                plt.xlim(0, epoch)
                plt.legend()
                plt.grid(True)
                plt.savefig("TrainedModel/loss_plot.png", bbox_inches='tight')

        ### Save final model
        torch.save(network.state_dict(), "TrainedModel/finalModel.pth")

    elif args.mode == 'test':
        epoch_loss, epoch_acc = run_epoch(network,
                                          None,
                                          dataloader,
                                          mode=args.mode)

        ### Print statistics
        print("-" * 30)
        print(f"Loss: {epoch_loss:.2e} Acc: {epoch_acc:.2e}")
        print("-" * 30)

    elif args.mode == 'inference':
        _, _ = run_epoch(network, None, dataloader, mode=args.mode)

    return
Exemplo n.º 12
0
from utilities import load_model, load_label_map, show_inference, parse_output_dict
from custom_np_encoder import NumpyArrayEncoder

DEFAULT_PORT = 5000
DEFAULT_HOST = '0.0.0.0'

# model_path = "models/ssd_mobilenet_v2_coco_2018_03_29/saved_model"
model_path = "models/ssd_mobilenet_v1_coco_2018_01_28/saved_model"
# model_path = "models/ssd_mobilenet_v1_ppn_shared_box_predictor_300x300_coco14_sync_2018_07_03/saved_model"
labels_path = "data/mscoco_label_map.pbtxt"
print(model_path)

vis_threshold = 0.5
max_boxes = 20

detection_model = load_model(model_path)
category_index = load_label_map(labels_path)


def parse_args():

    parser = argparse.ArgumentParser(
        description='Tensorflow object detection API')

    parser.add_argument('--debug',
                        dest='debug',
                        help='Run in debug mode.',
                        required=False,
                        action='store_true',
                        default=False)
Exemplo n.º 13
0
def train(data, params, pretrained=False, boolShuffle=True):
    boolNgram = False
    boolDependency = False
    boolKnowledge = False

    if params["MODALITY"] in set(["N", "ND", "NK", "NDK"]):
        boolNgram = True
    if params["MODALITY"] in set(["D", "ND", "DK", "NDK"]):
        boolDependency = True
    if params["MODALITY"] in set(["NK", "DK", "NDK"]):
        boolKnowledge = True

    if pretrained:
        params["WV_MATRIX"] = np.zeros(
            (params["VOCAB_SIZE"] + 2, params["WORD_DIM"]))
        params["WVF_MATRIX"] = np.zeros(
            (params["VOCAB_SIZE"] + 2, params["WORD_DIM"]))
        params["CONCEPT_MATRIX"] = np.zeros(
            (params["CONCEPT_SIZE"] + 1, params["CONCEPT_DIM"]))
        model = utilities.load_model(MGNC_CNN(**params), params)
        model.train()

    else:
        # load word2vec
        if boolNgram:
            word_vectors = KeyedVectors.load_word2vec_format(
                "n-gram.model.bin", binary=True)
            wv_matrix = []

        if boolDependency:
            word_dep_vocab, word_dep_vectors = npy_load("dependency")
            wvf_matrix = []

        for i in range(len(data["vocab"])):
            word = data["idx_to_word"][i]
            if boolNgram:
                if word in word_vectors.vocab:
                    wv_matrix.append(word_vectors.word_vec(word))
                else:
                    wv_matrix.append(
                        np.random.uniform(
                            -0.01, 0.01, params["WORD_DIM"]).astype("float16"))

            if boolDependency:
                if word in word_dep_vocab:
                    wvf_matrix.append(word_dep_vectors[word_dep_vocab[word]])
                else:
                    wvf_matrix.append(
                        np.random.uniform(
                            -0.01, 0.01, params["WORD_DIM"]).astype("float16"))

        if boolNgram:
            # one for unknown and one for zero padding
            wv_matrix.append(
                np.random.uniform(-0.01, 0.01,
                                  params["WORD_DIM"]).astype("float16"))
            wv_matrix.append(np.zeros(params["WORD_DIM"]).astype("float16"))
            wv_matrix = np.array(wv_matrix)
            params["WV_MATRIX"] = wv_matrix

        if boolDependency:
            wvf_matrix.append(
                np.random.uniform(-0.01, 0.01,
                                  params["WORD_DIM"]).astype("float16"))
            wvf_matrix.append(np.zeros(params["WORD_DIM"]).astype("float16"))
            wvf_matrix = np.array(wvf_matrix)
            params["WVF_MATRIX"] = wvf_matrix

        concept_matrix = []
        if boolKnowledge:
            concept_vocab, concept_vectors = npy_load(
                "knowledge_triplet_" +
                params["DATASET"].replace("context", ""))

            for concept in data["concept"]:
                if concept in concept_vocab:
                    concept_matrix.append(
                        concept_vectors[concept_vocab[concept]])
                else:
                    concept_matrix.append(
                        np.zeros(params["CONCEPT_DIM"]).astype("float16"))

            # one for zero padding
            concept_matrix.append(
                np.zeros(params["CONCEPT_DIM"]).astype("float16"))
            concept_matrix = np.array(concept_matrix)
            params["CONCEPT_MATRIX"] = concept_matrix

        model = MGNC_CNN(**params)

    model = model.to('cuda')
    model = nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
    parameters = filter(lambda p: p.requires_grad, model.parameters())

    if params["OPTIMIZATION"] == "adadelta":
        optimizer = optim.Adadelta(parameters, params["LEARNING_RATE"])
    elif params["OPTIMIZATION"] == "adam":
        optimizer = optim.Adam(parameters, params["LEARNING_RATE"])
    elif params["OPTIMIZATION"] == "adamax":
        optimizer = optim.Adamax(parameters, params["LEARNING_RATE"])
    elif params["OPTIMIZATION"] == "sgd":
        optimizer = optim.SGD(parameters,
                              params["LEARNING_RATE"],
                              momentum=0.9)
    elif params["OPTIMIZATION"] == "adagrad":
        optimizer = optim.Adagrad(parameters, params["LEARNING_RATE"])
    elif params["OPTIMIZATION"] == "sparseadam":
        optimizer = optim.SparseAdam(parameters, params["LEARNING_RATE"])
    elif params["OPTIMIZATION"] == "RMSprop":
        optimizer = optim.RMSprop(parameters, params["LEARNING_RATE"])

    criterion = nn.CrossEntropyLoss()

    pre_dev_fsc = 0
    max_dev_fsc = 0

    listLog = []
    int_drop_sample_size = len(data["train_sen"]) % params["BATCH_SIZE"]
    for e in range(params["EPOCH"]):
        data2 = {}
        if boolShuffle:
            if boolKnowledge:
                data["train_sen"], data["train_class"], data[
                    "train_concept"] = shuffle(data["train_sen"],
                                               data["train_class"],
                                               data["train_concept"])
                data2["train_sen"], data2["train_class"], data2[
                    "train_concept"] = data["train_sen"][
                        int_drop_sample_size:], data["train_class"][
                            int_drop_sample_size:], data["train_concept"][
                                int_drop_sample_size:]

            else:
                data["train_sen"], data["train_class"] = shuffle(
                    data["train_sen"], data["train_class"])
                data2["train_sen"], data2["train_class"] = data["train_sen"][
                    int_drop_sample_size:], data["train_class"][
                        int_drop_sample_size:]

        for i in range(0, len(data2["train_sen"]), params["BATCH_SIZE"]):
            batch_range = min(params["BATCH_SIZE"],
                              len(data2["train_sen"]) - i)

            batch_sen = [[
                data["word_to_idx"][w]
                if w in data["vocab"] else params["VOCAB_SIZE"] for w in sent
            ] + [params["VOCAB_SIZE"] + 1] *
                         (params["MAX_SENT_LEN"] - len(sent))
                         for sent in data2["train_sen"][i:i + batch_range]]

            batch_class = [
                data["classes"].index(c)
                for c in data2["train_class"][i:i + batch_range]
            ]
            batch_sen = Variable(torch.LongTensor(batch_sen)).cuda(
                params["GPU"])
            batch_class = Variable(torch.LongTensor(batch_class)).cuda(
                params["GPU"])

            optimizer.zero_grad()
            model.train()
            if boolKnowledge:
                batch_concept = [[
                    data["concept_to_idx"][concept]
                    if concept in data["concept"] else params["CONCEPT_SIZE"]
                    for concept in seq
                ] + [params["CONCEPT_SIZE"]] *
                                 (params["MAX_CONCEPT_LEN"] - len(seq))
                                 for seq in data2["train_concept"][i:i +
                                                                   batch_range]
                                 ]
                batch_concept = Variable(torch.LongTensor(batch_concept)).cuda(
                    params["GPU"])
                pred = model([batch_sen, batch_concept])
            else:
                pred = model([batch_sen])
            loss = criterion(pred, batch_class)
            loss.backward()

            torch.nn.utils.clip_grad_norm_(parameters,
                                           max_norm=params["NORM_LIMIT"])
            optimizer.step()

        tup_dev_fsc_micro, tup_dev_fsc_each = test(data,
                                                   model,
                                                   params,
                                                   boolKnowledge,
                                                   mode="dev")
        dev_fsc = tup_dev_fsc_micro[2]

        if params["EARLY_STOPPING"] and dev_fsc <= pre_dev_fsc:
            break
        else:
            pre_dev_fsc = dev_fsc

        if dev_fsc > max_dev_fsc:
            max_dev_fsc = dev_fsc
            tup_max_dev_fsc = tup_dev_fsc_micro
            best_model = copy.deepcopy(model)

    return best_model, tup_max_dev_fsc[2], tup_dev_fsc_each, listLog
Exemplo n.º 14
0
# Load corpus into input sequences
corpus = nlp_helpers.load_corpus(config["corpus_path"])
tokenizer = Tokenizer(num_words=config["num_words"], oov_token=None)
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
input_sequences = nlp_helpers.create_input_sequences(corpus,
                                                     tokenizer,
                                                     random_sampling=220000)

# create predictors and label
predictors, label = input_sequences[:, :-1], input_sequences[:, -1]
label = ku.to_categorical(label, num_classes=total_words)

if config["start_from_pretrained_model"]:
    model = utilities.load_model(config["pretrained_model_name"],
                                 config["pretrained_model_dir"])
    previous_history = utilities.load_history(config["pretrained_model_name"],
                                              config["pretrained_model_dir"])
else:
    model = models.word_predictor_model(config["batch_size"], total_words,
                                        len(input_sequences[0]))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Train model
history = model.fit(predictors,
                    label,
                    batch_size=config["batch_size"],
                    epochs=config["epochs"],
                    verbose=1)
Exemplo n.º 15
0
        print('[%d, %5d] validation loss: %.3f' %
              (epoch + 1, i + 1, running_valid_loss))
        valid_loss.append(running_valid_loss)
    print('Finished Training')

    fig, (ax1, ax2) = plt.subplots(2)
    ax1.plot(range(0, N), train_loss, label="training loss")
    ax1.plot(range(0, N), valid_loss, label="validation loss")
    ax1.set(ylabel="loss")
    ax1.legend()
    ax1.set_title("training / validation loss over epochs")
    #axs[0].savefig(PATH + "train_valid_loss" + ".jpg")

    ax2.plot(range(0, N), valid_in_ssim, label="LDPT/NDPT ssim")
    ax2.plot(range(0, N), valid_res_ssim, label="result/NDPT ssim")
    ax2.set(ylabel="ssim")
    ax2.legend()
    ax2.set_title("validation ssim over epochs")
    fig.savefig(PATH + "_valid__loss_ssim_" + ".jpg")
    # Specify a path

    # Save
    torch.save(net, PATH)

if (t == 0):
    loss_path = 'f1.txt'
    load_model(PATH, trainloader_1, loss_path)

# 1. norm all data according to low dose with low dose, high dose with high dose and multiply low dose with DRF
# 2. try on 1/5 dose
# 3. display diff