def network(companyname):
	seq_input, seq_output, keep_prob = define_placeholders(sequence_length,
														prediction_length)
	company = load_company_data(companyname)
	weights, biases = load_variables( variables_device,
										companyname)
	model_output = model(seq_input, weights, biases, keep_prob, companyname)
	cost = model_compilation(model_output, seq_output)
	init = tf.global_variables_initializer()
	run_model(init, model_output, companyname,\
			 company, seq_input, seq_output,\
			 keep_prob, dropout, cost, batch_size,\
			 prediction_length, sequence_length, processing_device)
Beispiel #2
0
from PIL import Image
import test

im = Image.open('drawing-1.png').convert('L')
im_array = np.asarray(im).ravel()
im_array = torch.from_numpy(im_array).type(torch.FloatTensor)
test.showImage(im_array)

im_array = im_array.unsqueeze(0)
out = test.model(im_array)
_, pred = torch.max(out.data, 1)
print(int(pred))
Beispiel #3
0
def predict_mask(input, threshold):
    output = model(input.to(device))
    output = torch.sigmoid(output).detach().cpu().numpy()
    pred = output > threshold

    return pred
Beispiel #4
0
def main():

    # model config
    dtype = tf.float32
    model_name = 'model'
    model_version = 1

    len_X = 1032
    len_Y = 2
    len_XY = len_X + len_Y

    # X/Y Placeholder and model
    X = tf.placeholder(dtype=dtype, shape=(None, len_X))
    Y = tf.placeholder(dtype=dtype, shape=(None, len_Y))
    net, loss = model(x=X, y=Y, in_size=len_X, out_size=len_Y)

    # session restore
    sess = tf.Session()
    saver = tf.train.Saver()
    saver_path = './model_ckpt/'
    last_ckpt_path = tf.train.latest_checkpoint(checkpoint_dir=saver_path)
    saver.restore(sess=sess, save_path=last_ckpt_path)

    # export builder
    builder = tf.saved_model.builder.SavedModelBuilder('{:s}/{:d}'.format(model_name, model_version))

    # tensor wrapping
    regression_input = tf.saved_model.utils.build_tensor_info(X)
    regression_output = tf.saved_model.utils.build_tensor_info(net)
    # regression_loss = tf.saved_model.utils.build_tensor_info(loss)

    # signature mapping
    regression_signatures = (
        tf.saved_model.signature_def_utils.build_signature_def(
            inputs=dict({
                tf.saved_model.signature_constants.REGRESS_INPUTS:
                    regression_input
            }),
            outputs=dict({
                tf.saved_model.signature_constants.REGRESS_OUTPUTS:
                    regression_output,
                # 'loss': regression_loss
            }),
            method_name=
                tf.saved_model.signature_constants.REGRESS_METHOD_NAME
        )
    )

    # prediction signature (?)
    tensor_info_x = tf.saved_model.utils.build_tensor_info(X)
    tensor_info_y = tf.saved_model.utils.build_tensor_info(net)

    prediction_signatures = tf.saved_model.signature_def_utils.build_signature_def(
        inputs={'x': tensor_info_x},
        outputs={'y': tensor_info_y},
        method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
    )

    legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')

    builder.add_meta_graph_and_variables(
        sess=sess, tags=[tf.saved_model.tag_constants.SERVING],
        signature_def_map=dict({
            'predict': prediction_signatures,
            'regression_value': regression_signatures
        }),
        legacy_init_op=legacy_init_op,
        # main_op = tf.tables_initializer(),
        strip_default_attrs=True,
    )

    builder.save()

    # fin
    return
Beispiel #5
0
im = cv2.dilate(edges,
                cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)),
                iterations=2)

new_img = copy.deepcopy(im)
contours, hierarchy = cv2.findContours(new_img, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
#im = cv2.drawContours(im, contours, -1, (120,120,120), 6)

arr = []
i = 0
for cnt in contours:
    #if cv2.contourArea(cnt)> 10000:
    x, y, w, h = cv2.boundingRect(cnt)
    arr.append((x, y, w, h))
model = test.model()
arr = sorted(arr, key=lambda x: x[0])

arrnew = []
for i in range(len(arr)):
    k = 0
    arrcpy = copy.deepcopy(arr)
    del arrcpy[i]

    for j in range(len(arrcpy)):

        if arr[i][0] > arrcpy[j][0] and arr[i][1] > arrcpy[j][1] and (
                arr[i][2] + arr[i][0]) < (arrcpy[j][2] + arrcpy[j][0]) and (
                    arr[i][3] + arr[i][0]) < (arrcpy[j][3] + arr[j][0]):
            k += 1
    if k == 0:
Beispiel #6
0
import nyt
import test


def merge(techs, arr):
    output1 = {}
    output2 = {}

    # append to temp
    for lst in arr:
        for el in lst:
            if str(el[0]) not in output1:
                output1[str(el[0])] = 0
            output1[str(el[0])] += el[1]

    # match output1
    for tech in techs:
        output2[tech["technology_name"]] = output1[str(tech["technology_id"])]

    return output2


out1 = nyt.model(technologies)
out2 = test.model(technologies)

print merge(technologies, [out1, out2])
Beispiel #7
0
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import pandas as pd
import keras
import matplotlib.pyplot as plt

from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import test as testing

test = pd.read_csv("emnist-balanced-test.csv", header=None)

x_test = test.iloc[:, 1:]
x_test = np.asarray(x_test)
model = testing.model()
model.load_weights("output/Weights.h5")

y_test = test.iloc[:, 0]
print np.asarray(y_test)
arr = []
correct = 0
for i in range(len(y_test)):
    x = x_test[i]
    x = x.reshape(1, 28, 28, 1).astype('float32')
    x /= 255
    model.predict(x)
    out = model.predict(x)
    arr.append(np.argmax(out))
    print i
    if arr[i] == y_test[i]: