Esempio n. 1
0
def main():
    model = load_model('model.h5')
    model.load_weights("weights.h5")

    while True:
        with ai_integration.get_next_input(
                inputs_schema={"text": {
                    "type": "text"
                }}) as inputs_dict:
            # If an exception happens in this 'with' block, it will be sent back to the ai_integration library
            X_raw = [inputs_dict("text")]

            X, word_index = tokenize_data(X_raw)

            predictions = model.predict(x=X, batch_size=1)

            is_positive = predictions[0][1] >= 0.5
            status_txt = "Positive" if is_positive else "Negative"
            # This model was initially handling a set of inputs, yet now it only receives one: what implications could this have?
            # What is being done to X, and why?
            result_data = {
                "content-type": 'text/plain',
                "data": status_txt,
                "success": True
            }
            ai_integration.send_result(result_data)
Esempio n. 2
0
def main():
    # os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # str(random.randint(0, 15))

    model = load_model('model.h5')
    model.load_weights("weights.h5")

    while True:
        with ai_integration.get_next_input(inputs_schema={"text": {"type": "text"}}) as inputs_dict:
            # If an exception happens in this 'with' block, it will be sent back to the ai_integration library

            X_raw = inputs_dict["text"]

            X, word_index = tokenize_data(X_raw)

            predictions = model.predict(x=X, batch_size=1)

            is_positive = predictions[0][1] >= 0.5
            status_txt = "Positive" if is_positive else "Negative"

            result_data = {
                "content-type": 'text/plain',
                "data": status_txt,
                "success": True
            }
            ai_integration.send_result(result_data)
def initialize_model():
    with tf.Graph().as_default():
        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        model_input_path = tf.placeholder(tf.string, [])
        model_output_path = tf.placeholder(tf.string, [])
        data = tf.placeholder(tf.string,shape=[])
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.12
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True
        ))
        sess.run(init)
        print('Initialized model')
        while True:
            with ai_integration.get_next_input(inputs_schema={
                "image": {
                "type": "image"
                }
            }) as inputs_dict:
                print("post sess declare")
                image = inputs_dict["image"]
                image = [tf.io.decode_image(image,dtype=tf.uint8,channels = 3)]
                image = tf.cast(image, tf.float32)
                with tf.gfile.GFile("test/4pp_eusr_pirm.pb", 'rb') as f:
                    model_graph_def = tf.GraphDef()#example
                    model_graph_def.ParseFromString(f.read())
     
                model_output = tf.import_graph_def(model_graph_def, name='model', input_map={'sr_input:0': image}, return_elements=['sr_output:0'])[0]
                model_output = model_output[0, :, :, :]
                model_output = tf.round(model_output)
                model_output = tf.clip_by_value(model_output, 0, 255)
                model_output = tf.cast(model_output, tf.uint8)
                image = tf.image.encode_png(model_output)#RIGHT. HERE.
                write_op = tf.write_file(model_output_path, image)
                result_data = {"content-type": 'text/plain',
                               "data": None,
                               "success": False,
                               "error": None}
                image_path_list = []
                image_byte_list = []
                out = 'dummy.png'
                output_path = os.path.join('SR', out)
                input_path = os.path.join('LR', 'bleh.png')
                print('- %s -> %s' % ('', output_path))
                sess.run([write_op], feed_dict={model_input_path:input_path, model_output_path:output_path})
                file = Image.open(output_path,'r')
                imgbytes = save_image_in_memory(file)
                output_img_bytes = imgbytes
                print('Done')
                result_data["data"] = output_img_bytes
                result_data["content-type"] = 'image/jpeg'
                result_data["success"] = True
                result_data["error"] = None
                os.remove(output_path)
                print('Finished inference')
                ai_integration.send_result(result_data)
def initialize_model():
    with tf.Graph().as_default():
        init = tf.global_variables_initializer()
        config = tf.ConfigProto()
        with tf.gfile.GFile("test/4pp_eusr_pirm.pb", 'rb') as f:
            model_graph_def = tf.GraphDef()
            model_graph_def.ParseFromString(f.read())

        config.gpu_options.per_process_gpu_memory_fraction = 0.8
        sess = tf.Session(config=config)
        sess.run(init)

        input_image_bytes_placeholder = tf.placeholder(tf.string, name='input_image_bytes_placeholder')
        input_image = [tf.image.decode_image(input_image_bytes_placeholder, dtype=tf.uint8, channels=3)]
        input_image = tf.cast(input_image, tf.float32)

        model_output = tf.import_graph_def(model_graph_def, name='model', input_map={'sr_input:0': input_image},
                                           return_elements=['sr_output:0'])[0]
        model_output = model_output[0, :, :, :]
        model_output = tf.round(model_output)
        model_output = tf.clip_by_value(model_output, 0, 255)
        model_output = tf.cast(model_output, tf.uint8)
        output_image_op = tf.image.encode_jpeg(model_output, chroma_downsampling=False)

        print('Initialized model')
        while True:
            with ai_integration.get_next_input(inputs_schema={
                "image": {
                    "type": "image"
                }
            }) as inputs_dict:
                input_image_bytes = inputs_dict["image"]

                result_data = {"content-type": 'text/plain',
                               "data": None,
                               "success": False,
                               "error": None}

                run_output = sess.run([output_image_op],
                                      feed_dict={'input_image_bytes_placeholder:0': input_image_bytes})
                png_bytes = run_output[0]
                output_img_bytes = png_bytes
                print('Done')
                result_data["data"] = output_img_bytes
                result_data["content-type"] = 'image/jpeg'
                result_data["success"] = True
                result_data["error"] = None
                print('Finished inference')
                ai_integration.send_result(result_data)
Esempio n. 5
0
        text = inputs_dict['text']
        if isinstance(text, bytes):
            text = text.decode('utf-8')
        predictions = network.predict(
            input_fn=partial(gpt2_pred_input, text=text))

        p = next(predictions)  # return just the first one
        p = p["tokens"]
        result_text = enc.decode(p)

        result_data = {
            "content-type": 'text/plain',
            "data": result_text,
            "success": True
        }
        ai_integration.send_result(result_data)

# Train eval loop
# input_fn = inputs[params["input"]]

# while True:
#     start = time.time()
#
#     network.train(
#         input_fn=partial(input_fn, eval=False),
#         steps=params["train_steps"])
#
#     end = time.time()
#     logger.info("\nTrain loop took {:.2f}s\n".format(end - start))
#
#     eval_result = network.evaluate(
def initialize_model():
    global vgg
    global encoder
    global decoder
    global target
    global weighted_target
    global image
    global content
    global style
    global persistent_session
    global data_format
    alpha = 1.0

    graph = tf.Graph()
    # build the detection model graph from the saved model protobuf
    with graph.as_default():
        image = tf.placeholder(shape=(None, 3, None, None), dtype=tf.float32)
        content = tf.placeholder(shape=(1, 512, None, None), dtype=tf.float32)
        style = tf.placeholder(shape=(1, 512, None, None), dtype=tf.float32)

        target = adain(content, style, data_format=data_format)
        weighted_target = target * alpha + (1 - alpha) * content

        with open_weights('models/vgg19_weights_normalized.h5') as w:
            vgg = build_vgg(image, w, data_format=data_format)
            encoder = vgg['conv4_1']

        with open_weights('models/decoder_weights.h5') as w:
            decoder = build_decoder(weighted_target, w, trainable=False, data_format=data_format)

        # the default session behavior is to consume the entire GPU RAM during inference!
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.12

        # the persistent session across function calls exposed to external code interfaces
        persistent_session = tf.Session(graph=graph, config=config)

        persistent_session.run(tf.global_variables_initializer())

    print('Initialized model')

    while True:
        with ai_integration.get_next_input(inputs_schema={
            "style": {
                "type": "image"
            },
            "content": {
                "type": "image"
            },
        }) as inputs_dict:

            # only update the negative fields if we reach the end of the function - then update successfully
            result_data = {"content-type": 'text/plain',
                           "data": None,
                           "success": False,
                           "error": None}

            print('Starting inference')
            start = time.time()

            content_size = 512
            style_size = 512
            crop = False
            preserve_color = False

            content_image = load_image(io.BytesIO(inputs_dict['content']), content_size, crop)
            style_image = load_image(io.BytesIO(inputs_dict['style']), style_size, crop)

            if preserve_color:
                style_image = coral(style_image, content_image)
            style_image = prepare_image(style_image)
            content_image = prepare_image(content_image)
            style_feature = persistent_session.run(encoder, feed_dict={
                image: style_image[np.newaxis, :]
            })
            content_feature = persistent_session.run(encoder, feed_dict={
                image: content_image[np.newaxis, :]
            })
            target_feature = persistent_session.run(target, feed_dict={
                content: content_feature,
                style: style_feature
            })

            output = persistent_session.run(decoder, feed_dict={
                content: content_feature,
                target: target_feature
            })

            output_img_bytes = save_image_in_memory(output[0], data_format=data_format)

            result_data["content-type"] = 'image/jpeg'
            result_data["data"] = output_img_bytes
            result_data["success"] = True
            result_data["error"] = None

            print('Finished inference and it took ' + str(time.time() - start))
            ai_integration.send_result(result_data)
def main():
    if a.seed is None:
        a.seed = random.randint(0, 2**31 - 1)

    tf.set_random_seed(a.seed)
    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    for k, v in a._get_kwargs():
        print(k, "=", v)

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    examples = load_examples()
    print("examples count = %d" % examples.count)

    # inputs and targets are [batch_size, height, width, channels]
    model = create_model(examples.inputs, examples.targets)

    # undo colorization splitting on images that we use for display/output
    if a.lab_colorization:
        if a.which_direction == "AtoB":
            # inputs is brightness, this will be handled fine as a grayscale image
            # need to augment targets and outputs with brightness
            targets = augment(examples.targets, examples.inputs)
            outputs = augment(model.outputs, examples.inputs)
            # inputs can be deprocessed normally and handled as if they are single channel
            # grayscale images
            inputs = deprocess(examples.inputs)
        elif a.which_direction == "BtoA":
            # inputs will be color channels only, get brightness from targets
            inputs = augment(examples.inputs, examples.targets)
            targets = deprocess(examples.targets)
            outputs = deprocess(model.outputs)
        else:
            raise Exception("invalid direction")
    else:
        inputs = deprocess(examples.inputs)
        targets = deprocess(examples.targets)
        outputs = deprocess(model.outputs)

    def convert(image):
        if a.aspect_ratio != 1.0:
            # upscale to correct aspect ratio
            size = [CROP_SIZE, int(round(CROP_SIZE * a.aspect_ratio))]
            image = tf.image.resize_images(
                image, size=size, method=tf.image.ResizeMethod.BICUBIC)

        return tf.image.convert_image_dtype(image,
                                            dtype=tf.uint8,
                                            saturate=True)

    # reverse any processing on images so they can be written to disk or displayed to user
    with tf.name_scope("convert_inputs"):
        converted_inputs = convert(inputs)

    with tf.name_scope("convert_targets"):
        converted_targets = convert(targets)

    with tf.name_scope("convert_outputs"):
        converted_outputs = convert(outputs)

    with tf.name_scope("encode_images"):
        display_fetches = {
            "paths":
            examples.paths,
            "inputs":
            tf.map_fn(tf.image.encode_png,
                      converted_inputs,
                      dtype=tf.string,
                      name="input_pngs"),
            "targets":
            tf.map_fn(tf.image.encode_png,
                      converted_targets,
                      dtype=tf.string,
                      name="target_pngs"),
            "outputs":
            tf.map_fn(tf.image.encode_png,
                      converted_outputs,
                      dtype=tf.string,
                      name="output_pngs"),
        }

    # summaries
    with tf.name_scope("inputs_summary"):
        tf.summary.image("inputs", converted_inputs)

    with tf.name_scope("targets_summary"):
        tf.summary.image("targets", converted_targets)

    with tf.name_scope("outputs_summary"):
        tf.summary.image("outputs", converted_outputs)

    with tf.name_scope("predict_real_summary"):
        tf.summary.image(
            "predict_real",
            tf.image.convert_image_dtype(model.predict_real, dtype=tf.uint8))

    with tf.name_scope("predict_fake_summary"):
        tf.summary.image(
            "predict_fake",
            tf.image.convert_image_dtype(model.predict_fake, dtype=tf.uint8))

    tf.summary.scalar("discriminator_loss", model.discrim_loss)
    tf.summary.scalar("generator_loss_GAN", model.gen_loss_GAN)
    tf.summary.scalar("generator_loss_L1", model.gen_loss_L1)

    for var in tf.trainable_variables():
        tf.summary.histogram(var.op.name + "/values", var)

    for grad, var in model.discrim_grads_and_vars + model.gen_grads_and_vars:
        tf.summary.histogram(var.op.name + "/gradients", grad)

    with tf.name_scope("parameter_count"):
        parameter_count = tf.reduce_sum(
            [tf.reduce_prod(tf.shape(v)) for v in tf.trainable_variables()])

    saver = tf.train.Saver(max_to_keep=1)

    logdir = a.output_dir if (a.trace_freq > 0 or a.summary_freq > 0) else None
    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=0, saver=None)
    with sv.managed_session() as sess:
        print("parameter_count =", sess.run(parameter_count))

        if a.checkpoint is not None:
            print("loading model from checkpoint")
            checkpoint = tf.train.latest_checkpoint(a.checkpoint)
            saver.restore(sess, checkpoint)

        max_steps = 2**32
        if a.max_epochs is not None:
            max_steps = examples.steps_per_epoch * a.max_epochs
        if a.max_steps is not None:
            max_steps = a.max_steps


# I need this
        if a.mode == "test":
            # testing
            # at most, process the test data once
            start = time.time()
            max_steps = min(examples.steps_per_epoch, max_steps)
            for step in range(max_steps):
                results = sess.run(display_fetches)
                filesets = save_images(results)
                for i, f in enumerate(filesets):
                    print("evaluated image", f["name"])
                index_path = append_index(filesets)
            print("wrote index at", index_path)
            print("rate", (time.time() - start) / max_steps)

        while True:
            with ai_integration.get_next_input(
                    inputs_schema={"image": {
                        "type": "image"
                    }}) as inputs_dict:
                # If an exception happens in this 'with' block, it will be sent back to the ai_integration library

                result_data = {
                    "content-type": 'text/plain',
                    "data": "Fake output",
                    "success": True
                }
                ai_integration.send_result(result_data)
Esempio n. 8
0
                                           PIL.Image.open(guide_image),
                                           end=endparam)

            frame = deepdream_guided(
                net,
                frame,
                iter_n=iterations,
                step_size=stepsize,
                octave_n=octaves,
                octave_scale=octave_scale,
                jitter=jitter,
                end=endparam,
                objective_fn=objective_guide,
                guide_features=guide_features,
            )

        imgByteArr = io.BytesIO()
        PIL.Image.fromarray(np.uint8(frame)).save(imgByteArr,
                                                  format='JPEG',
                                                  subsampling=0,
                                                  quality=98)
        imgByteArr = imgByteArr.getvalue()

        result = {
            'content-type': 'image/jpeg',
            'data': imgByteArr,
            'success': True,
            'error': None
        }
        ai_integration.send_result(result)