示例#1
0
def predict():

    data = {'Success': False}

    if request.files.get('image'):

        now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))

        image = request.files['image'].read()
        image = Image.open(io.BytesIO(image))
        image = image_transform(InputSize)(image).numpy()
        # 将数组以C语言存储顺序存储
        image = image.copy(order="C")
        # 生成图像ID
        k = str(uuid.uuid4())
        d = {"id": k, "image": base64_encode_image(image)}
        # print(d)
        db.rpush(ImageQueue, json.dumps(d))
        # 运行服务
        while True:
            # 获取输出结果
            output = db.get(k)
            # print(output)
            if output is not None:
                output = output.decode("utf-8")
                data["predictions"] = json.loads(output)
                db.delete(k)
                break
            time.sleep(ClientSleep)
        data["success"] = True
    return jsonify(data)
示例#2
0
def baidu_image2str_url(uuid_url_dict={}, types="characters"):
    from aip import AipOcr
    client = AipOcr(Baidu_APP_ID, Baidu_API_KEY,
                    Baidu_SECRET_KEY)  # create a connection
    options = {}
    options["probability"] = "true"
    uuid_text_dict = {}
    for uuid, url in uuid_url_dict.items():
        ret = ""
        resp = client.basicGeneralUrl(url, options)  # url
        # print(resp)
        if "error_msg" in resp:
            print("url recognition failed! Using local model.  url: " + url)
            # print(resp)
            if resp["error_msg"] == "url response invalid" or resp[
                    "error_msg"] == "image size error":
                #request for the image of url, convert to valid format
                image_path = image_transform(url_img_download(url))
                print(image_path)
                uuid_text_dict[uuid] = baidu_image2str_local(image_path)
            else:
                uuid_text_dict[uuid] = ""
        else:
            for tex in resp["words_result"]:
                if tex["probability"]["average"] > 0.85:
                    ret = ret + tex["words"]
            # print(ret)
            uuid_text_dict[uuid] = ret
    return uuid_text_dict
示例#3
0
 def set_info(self, data):
     self.uuid_label.setText(data['uuid'])
     if os.path.exists(data['local_path']):
         img_path = data['local_path']
     else:
         img_path = utils.image_transform(
             utils.url_img_download(data['src']))
     self.set_image(img_path)
     if data['resolved']:
         self.text_box.setText(data['plain_text'])
     else:
         self.text_box.setText('')
     self.text_box.setFocus()
示例#4
0
 def get_next_batch_data(self):
     if self.getting == False:
         self.getting = True
         data = self.driver.load_unchecked_img(10, self.size - self.checked)
         count = 0
         for d in data:
             count = count + 1
             img_path = utils.image_transform(
                 utils.url_img_download(d['src']))
             d['local_path'] = img_path
             if 'plain_text' not in d.keys():
                 d['plain_text'] = ''
             self.data.append(d)
         self.size = self.size + count
         self.getting = False
示例#5
0
    e = utils.create_experiment("experiments/dcgan4_celeba")

    # Hyperparameters
    e.params["shuffle"]     = True # Shuffle the contents in the dataset
    e.params["num_workers"] = 4    # Number of worker threads for dataloader
    e.params["batch_size"]  = 128  # Size of one batch during training
    e.params["nc"]          = 3    # Number of channels in the training images (color RGB uses 3 channels)
    e.params["nz"]          = 100  # Size of z latent vector (i.e. size of generator input)
    e.params["im_size"]     = 64   # Size of the images discriminated and generated.
    e.params["num_epochs"]  = e.input_int("number of epochs", 5) # Number of epochs 
    e.params["lr"]          = 0.0002       # Learning rate for optimizer
    e.params["betas"]       = (0.5, 0.999) # Betas hyperparameter for Adam optimizers
    e.params["patience"]    = 7 # Number of epochs to wait before early stopping

    # Setup the CIFAR10 dataset
    transform = utils.image_transform(e.params["im_size"])
    data_dir = "data/celeba/"

    # WARNING DOWNLOAD IS 1.4 GB in size!!! 
    # train_dataset = datasets.CelebA(data_dir, split="train", download=False, transform=transform)

    train_dataset = datasets.ImageFolder(data_dir, transform=transform)
    e.setup_dataloader((train_dataset, None, None))
    
    # Plot a subset of the training dataset
    utils.plot_data_subset(e.fname("dataset_image.png"), train_dataset, show_labels=False)

    # Setup the two models
    e.generator = models.dcgan4_generator(e)
    e.discriminator = models.dcgan4_discriminator(e)
示例#6
0
    fig, axes = plt.subplots(2, len(original_images))

    for i in range(len(original_images)):
        axes[0, i].imshow(original_images[i])
        axes[1, i].imshow(generated_images[i])

    plt.show()


if __name__ == '__main__':

    EPOCHS = 2
    BATCH_SIZE = 4
    ROOT = os.getcwd()
    SHOW_EVERY = 500
    TRAIN_IMAGES_FOLDER = os.path.join(ROOT, 'train2014')
    STYLE_IMAGE_FOLDER = os.path.join(ROOT, 'StyleImages')
    content_dataset = MSCOCODataset(TRAIN_IMAGES_FOLDER, image_transform())
    style_dataset = StyleImageDataset(STYLE_IMAGE_FOLDER, image_transform())
    style_transfer_net = StyleTransferNet()
    optimizer = Adam(style_transfer_net.parameters(), lr=1e-3)
    C = 100000.0
    S = 700000.0
    COLOR = 20000.0
    DEVICE = 'cuda'
    PKL_NAME = 'vangogh_YIQ'

    train(style_transfer_net, EPOCHS, BATCH_SIZE, content_dataset,
          style_dataset, optimizer, C, S, COLOR, DEVICE, PKL_NAME)