Example #1
0
def Pred():
    global result, label
    image = cv2.imread(filepath)

    print("[INFO] loading and preprocessing image...")
    image = image_utils.load_img(filepath, target_size=(224, 224))
    image = image_utils.img_to_array(image)
    image = np.expand_dims(image, axis=0)

    start = timeit.default_timer()
    # load the network
    print("[INFO] loading network...")
    model, tags_from_model = net.load("model")
    net.compile(model)

    # classify the image
    print("[INFO] classifying image...")
    preds = model.predict(image)
    y_classes = probas_to_classes(preds)
    if (y_classes == 1):
        print "it is cancerous"
        result = "malignant"
    else:
        print "it is benign"
        result = "benign"
    label.config(text=result)
    stop = timeit.default_timer()
    exec_time = stop - start
    print "Predicting Execution Time is %f s" % exec_time
    return
Example #2
0
def get_weights(model_path):
    model, tags = net.load(model_path)
    net.compile(model)
    weights_map = {}
    for layer in model.layers:
        weights = layer.get_weights()
        weights_map[layer.name] = weights
    return weights_map
Example #3
0
def get_layers(model_path):
    model, tags = net.load(model_path)
    net.compile(model)

    print len(model.layers)
    count = 0
    for layer in model.layers:
        print count + "," + layer.get_config()["name"]
        count += 1
Example #4
0
def predict(model_path, dataset_dir):
    model, tags = net.load(model_path)
    data_X, data_y, tags = dataset.dataset(dataset_dir, 299, False)

    net.compile(model)
    predictions = []
    for d in data_X:
        X = np.expand_dims(d, axis=0)
        prediction = model.predict(X)
        predictions.append(prediction[0])
    return predictions
Example #5
0
def predict_with_dataset(model_path, dataset):
    model, tags = net.load(model_path)

    data_X = dataset.X
    data_y = dataset.y

    net.compile(model)
    predictions = []
    for d in data_X:
        X = np.expand_dims(d, axis=0)
        prediction = model.predict(X)
        predictions.append(prediction[0])
    return predictions
Example #6
0
def train(save_path='', load_path=False):
    #    db_path = '/home/yash/Project/dataset/GraphSimilarity/reddit_multi_5K.graph'
    #    db = Data(db_path)
    class_count = db.classes
    val_size = db.val_size
    inp_size = db.size

    if load_path:
        net = load(load_path)
    else:
        net = [g_cnn(prv_count = val_size, filter_count = 4), g_pool(), \
               g_cnn(prv_count = 4,        filter_count = 8), g_pool(flat=True), \
               fc_nn(prv = ((inp_size//2)//2)*8, nodes = 1024), \
               fc_nn(prv = 1024, nodes = 256, dropout = True), \
               fc_nn(prv = 256, nodes = class_count, fn="Softmax") ]

    epoch = 25
    checkpoint = 10
    batch_size = 1
    train_error = np.zeros(epoch * 5000 // batch_size, np.float)
    valid_error = np.zeros(epoch)
    ctr = 0
    for i in range(epoch):
        while (db.has_more):

            data_batch = db.next_batch()
            for d in data_batch:
                e = train_step(net, d)
                #print(e)
                train_error[ctr] += np.sum(np.abs(e))

            #train_error[ctr] /= batch_size
            update(net, batch_size)

            ctr += 1
            if ctr % checkpoint == 0:
                print("Batch [%d]: Training Error: [%f]" %
                      (ctr, train_error[ctr]))

        db.has_more = True
        data_batch = db.get_test()
        for d in data_batch:
            pred = fwd_pass(net, d[0])
            valid_error[i] += -np.sum(calc_error(pred, d[1]))
        valid_error[i] /= len(data_batch)

        save(net, save_path)
        print("Epoch [%d]> Validation Error: [%f]" % (i, valid_error[i]))
Example #7
0
def train(save_path = '', load_path = False):
#    db_path = '/home/yash/Project/dataset/GraphSimilarity/reddit_multi_5K.graph'
#    db = Data(db_path)
    class_count = db.classes
    val_size    = db.val_size
    inp_size    = db.size
    
    if load_path:
        net = load(load_path)
    else:
        net = [g_cnn(prv_count = val_size, filter_count = 4), g_pool(), \
               g_cnn(prv_count = 4,        filter_count = 8), g_pool(flat=True), \
               fc_nn(prv = ((inp_size//2)//2)*8, nodes = 1024), \
               fc_nn(prv = 1024, nodes = 256, dropout = True), \
               fc_nn(prv = 256, nodes = class_count, fn="Softmax") ]
          
    epoch = 25
    checkpoint = 10
    batch_size = 1
    train_error = np.zeros(epoch*5000//batch_size, np.float)
    valid_error = np.zeros(epoch)
    ctr = 0
    for i in range(epoch):
        while(db.has_more):
            
            data_batch = db.next_batch()
            for d in data_batch:
                e = train_step(net, d)
                #print(e)
                train_error[ctr] += np.sum(np.abs(e))     
            
            #train_error[ctr] /= batch_size
            update(net, batch_size)
            
            ctr += 1
            if ctr%checkpoint == 0:
                print("Batch [%d]: Training Error: [%f]" %(ctr, train_error[ctr]))
        
        db.has_more = True
        data_batch = db.get_test()
        for d in data_batch:
            pred = fwd_pass(net, d[0])
            valid_error[i] += -np.sum(calc_error(pred, d[1]))
        valid_error[i] /= len(data_batch)    
        
        save(net, save_path)
        print("Epoch [%d]> Validation Error: [%f]" %(i, valid_error[i]))
Example #8
0
 def __init__(
     self,
     model_path,
     memory_location='../mainstream-analysis/output/mainstream/predictions'
 ):
     super(Model, self).__init__()
     self.model, self.tags = net.load(model_path)
     net.compile(self.model)
     self.dim = 224 if 'mobilenets' in model_path else 299
     print 'Tags', self.tags
     self.memory = {}
     if not os.path.isdir(memory_location):
         os.mkdir(memory_location)
     assert os.path.isdir(memory_location)
     self.memory_location = os.path.join(memory_location,
                                         os.path.basename(model_path))
     if not os.path.isdir(self.memory_location):
         os.mkdir(self.memory_location)
Example #9
0
def predict(model_path, dataset_dir):
    model, tags = net.load(model_path)
    print tags
    # Inception
    # data_X, data_y, tags = dataset.dataset(dataset_dir, 299, False)
    # MobileNets
    data_X, data_y, tags = dataset.dataset(dataset_dir, 224, False)

    net.compile(model)
    predictions = []
    # print 'shape', data_X.shape
    # prediction = model.predict(data_X)
    # print prediction.shape
    # prediction = prediction[:, 0]
    for d in data_X:
        X = np.expand_dims(d, axis=0)
        prediction = model.predict(X)
        predictions.append(prediction[0])
    return predictions
Example #10
0
def predict_by_tag(model_path, dataset_dir, tag):
    model, tags = net.load(model_path)
    tag_index = [i for i, t in enumerate(tags) if t == tag][0]
    # Inception
    # data_X = dataset.dataset_with_root_dir(dataset_dir, 299)
    # MobileNets
    data_X = dataset.dataset_with_root_dir(dataset_dir, 224)
    print tags

    net.compile(model)
    predictions = []
    for d in data_X:
        X = np.expand_dims(d, axis=0)
        prediction = (model.predict(X)).tolist()[0]
        argmax = prediction.index(max(prediction))
        if argmax == tag_index:
            predictions.append(1)
        else:
            predictions.append(0)
    return predictions
Example #11
0
        vis_image = 255 * np.ones(
            (vis_image_size, vis_image_size, 3), dtype='uint8')
        example_counts = defaultdict(int)
        for (predicted_tag, actual_tag,
             normalized_image) in zip(y_pred, y_test, X_test):
            example_count = example_counts[(predicted_tag, actual_tag)]
            if example_count >= bucket_size**2:
                continue
            image = dataset.reverse_preprocess_input(normalized_image)
            image = image.transpose((1, 2, 0))
            image = scipy.misc.imresize(
                image, (image_size, image_size)).astype(np.uint8)
            tilepos_x = bucket_size * predicted_tag
            tilepos_y = bucket_size * actual_tag
            tilepos_x += example_count % bucket_size
            tilepos_y += example_count // bucket_size
            pos_x, pos_y = tilepos_x * image_size, tilepos_y * image_size
            vis_image[pos_y:pos_y + image_size,
                      pos_x:pos_x + image_size, :] = image
            example_counts[(predicted_tag, actual_tag)] += 1
        vis_image[::image_size * bucket_size, :] = 0
        vis_image[:, ::image_size * bucket_size] = 0
        scipy.misc.imsave(vis_filename, vis_image)


model, tags_from_model = net.load("model")
assert tags == tags_from_model
net.compile(model)

evaluate(model, "classifier.png")
Example #12
0
                                   subwords="phones",
                                   min_occ_count=config.dev_min_occ_count,
                                   min_seg_dur=config.dev_min_seg_dur,
                                   stack_frames=config.stack_frames,
                                   batch_size=config.dev_batch_size,
                                   subwords_to_ids=subwords_to_ids)
        datasets.append(this_set)

    net = net.MultiViewRNN(config=config,
                           feat_dim=datasets[0].feat_dim,
                           num_subwords=len(subwords_to_ids.keys()),
                           loss_fun=None,
                           use_gpu=True)
    # load net
    net.set_savepath(config.ckpt_dir, "net")
    net.load(tag='ft')
    net.eval()

    # get language scores and embeddings

    for lang_id in range(n_lang):

        this_lang = config.main_dev_language_list[lang_id]

        embs1, ids1 = [], []
        embs2, ids2 = [], []

        with torch.no_grad():

            for batch in datasets[lang_id].loader:
                ids = batch.pop("ids")
# It's very important to put this import before keras,
# as explained here: Loading tensorflow before scipy.misc seems to cause imread to fail #1541
# https://github.com/tensorflow/tensorflow/issues/1541
import scipy.misc

import net
import dataset


n = 224

model_prefix, = sys.argv[1:]

print "loading neural network"
model, tags = net.load(model_prefix)
net.compile(model)
print "done"

print "compiling predictor function" # to avoid the delay during video capture.
_ = model.predict(np.zeros((1, 3, n, n), dtype=np.float32), batch_size=1)
print "done"

cascade_filename = "haarcascade_frontalface_default.xml"
assert os.path.isfile(cascade_filename), "face detector model haarcascade_frontalface_default.xml must be in the current directory"
faceCascade = cv2.CascadeClassifier(cascade_filename)
font = cv2.FONT_HERSHEY_SIMPLEX

video_capture = cv2.VideoCapture(0)

while True:
Example #14
0
    y = []
    for i in range(net.shape[0]):
        x.append(i)
    for i in range(net.shape[1]):
        y.append(i)
    X, Y = np.meshgrid(np.array(x), np.array(y))
    return X, Y


def th_D(net, x, y):
    z = []
    ax = plt.axes(projection='3d')
    for i in range(net.shape[1]):
        z_ = []
        for j in range(net.shape[0]):
            if net[j][i] == -1:
                z_.append(0)
            else:
                z_.append(pro())
        z.append(z_)
    z = np.array(z)
    ax.plot_surface(x, y, z, cmap='rainbow')
    plt.show()


if __name__ == '__main__':
    path = 'D:/python/complex network/2010120120101231'
    nvdi = net.load(path)
    x, y = x_y(nvdi)
    th_D(nvdi, x, y)
Example #15
0
    if vis_filename is not None:
        bucket_size = 10
        image_size = n // 4 # right now that's 56
        vis_image_size = nb_classes * image_size * bucket_size
        vis_image = 255 * np.ones((vis_image_size, vis_image_size, 3), dtype='uint8')
        example_counts = defaultdict(int)
        for (predicted_tag, actual_tag, normalized_image) in zip(y_pred, y_test, X_test):
            example_count = example_counts[(predicted_tag, actual_tag)]
            if example_count >= bucket_size**2:
                continue
            image = dataset.reverse_preprocess_input(normalized_image)
            image = image.transpose((1, 2, 0))
            image = scipy.misc.imresize(image, (image_size, image_size)).astype(np.uint8)
            tilepos_x = bucket_size * predicted_tag
            tilepos_y = bucket_size * actual_tag
            tilepos_x += example_count % bucket_size
            tilepos_y += example_count // bucket_size
            pos_x, pos_y = tilepos_x * image_size, tilepos_y * image_size
            vis_image[pos_y:pos_y+image_size, pos_x:pos_x+image_size, :] = image
            example_counts[(predicted_tag, actual_tag)] += 1
        vis_image[::image_size * bucket_size, :] = 0
        vis_image[:, ::image_size * bucket_size] = 0
        scipy.misc.imsave(vis_filename, vis_image)


model, tags_from_model = net.load("model")
assert tags == tags_from_model
net.compile(model)

evaluate(model, "classifier.png")
Example #16
0
import numpy as np

# It's very important to put this import before keras,
# as explained here: Loading tensorflow before scipy.misc seems to cause imread to fail #1541
# https://github.com/tensorflow/tensorflow/issues/1541
import scipy.misc

import net
import dataset

n = 224

model_prefix, = sys.argv[1:]

print "loading neural network"
model, tags = net.load("model")
net.compile(model)
print "done"

print "compiling predictor function"  # to avoid the delay during video capture.
_ = model.predict(np.zeros((1, 3, n, n), dtype=np.float32), batch_size=1)
print "done"

cascade_filename = "haarcascade_frontalface_default.xml"
assert os.path.isfile(
    cascade_filename
), "face detector model haarcascade_frontalface_default.xml must be in the current directory"
faceCascade = cv2.CascadeClassifier(cascade_filename)
font = cv2.FONT_HERSHEY_SIMPLEX

video_capture = cv2.VideoCapture(0)
Example #17
0
                    ),
                    row,
                    range(len(row)),
                )),
            PIXELS,
            range(len(PIXELS)),
        ))


# Network ----------------------------------------------------------------------
# The neural network for the output on the right.

srcPath = os.path.dirname(os.path.realpath(__file__))
netPath = os.path.join(srcPath, '..', 'data', 'cache', 'final-net.pkl')

NET = nn.load(netPath)
KEY_DIGIT = '-DIGIT-'


def calcNetOutput():
    return nn.calc(NET, np.array(PIXELS).flatten())


def getIndexOfHighest(values: list[float]):
    """Returns the index of the highest value in a list."""
    if len(values) <= 0:
        return -1

    result = 0
    highest = values[0]
    for i in range(len(values)):
Example #18
0
# show how many epochs there were in the end
TRAIN_FINAL.describe()

# %% [markdown]
#  ## Analyzing the Final Neural Network

# %%
FINAL_NET = TRAIN_FINAL.iloc[-1].net

# %%
# store final network to cache
nn.save(FINAL_NET, os.path.join(DATA_PATH, 'final-net.pkl'))

# %%
# load final network from cache
FINAL_NET = nn.load(os.path.join(DATA_PATH, 'final-net.pkl'))

# %%
# show the average error per digit set

net = FINAL_NET
result = {
    'kind': [],
    'error': [],
}

for kind in dg.ALL_KINDS:
    digits = dg.getDigits(kinds={kind})
    inOutputs = dg.extractInputAndOutput(digits)
    error = nn.calcBatchError(net, inOutputs['input'], inOutputs['output'])