Ejemplo n.º 1
0
    def __init__(self):
        self.parameter_dict = {}
        train_path = APP_ROOT + "/Data/"

        self.parameter_dict["id2image"] = train_path + "index2img_exclude.txt"
        self.parameter_dict["id2caption"] = train_path + "index2caption.txt"
        self.parameter_dict["target"] = train_path + "index2caption.txt"
        self.parameter_dict["vocab"] = 5000
        self.parameter_dict["embed"] = 300
        self.parameter_dict["hidden"] = 500
        self.parameter_dict["epoch"] = 20
        self.parameter_dict["minibatch"] = 64
        self.parameter_dict["generation_limit"] = 256
        self.parameter_dict["use_gpu"] = False
        self.parameter_dict["gpu_id"] = -1
        self.parameter_dict["choose_model"] = "Alex_Model"

        if self.parameter_dict["choose_model"] == "Alex_Model":
            self.insize = 224
        if self.parameter_dict["choose_model"] == "AlexBn_Model":
            self.insize = 227

        mean_image = pickle.load(open("mean.npy", 'rb'))

        cropwidth = 256 - self.insize
        self.start = cropwidth // 2
        self.stop = self.start + self.insize
        self.mean_image = mean_image[:, self.start:self.stop,
                                     self.start:self.stop].copy()

        self.x_batch = np.ndarray(
            (self.parameter_dict["minibatch"], 3, self.insize, self.insize),
            dtype=np.float32)
        self.y_batch = np.ndarray((self.parameter_dict["minibatch"]),
                                  dtype=np.int32)

        self.trg_vocab = Vocabulary.new(
            gens.word_list(self.parameter_dict["target"]),
            self.parameter_dict["vocab"])
        self.read_data = Read_Data(self.parameter_dict["id2image"],
                                   "Data/val2014_resize",
                                   self.parameter_dict["id2caption"])
        self.read_data.load_image_list()
        self.read_data.load_caption_list()
Ejemplo n.º 2
0
 def test_load_image_list(self):
     args = parse_args([
         "-i", APP_ROOT + "/../Data/index2img.txt", "-r", APP_ROOT, "-c",
         APP_ROOT + "/../Data/index2caption.txt"
     ])
     read_data = Read_Data(args.image_id_file, args.root, args.caption_file)
     read_data.load_image_list()
     read_data.load_caption_list()
    def __init__(self, use_gpu, gpu_id):
        self.parameter_dict = {}
        train_path = APP_ROOT + "/../../Chainer_Image_Caption_Neural_Network/Code/Data/"
        self.resize_image_path = APP_ROOT + "/../../Chainer_Image_Caption_Neural_Network/Code/"

        self.parameter_dict["id2image"]         = train_path + "index2img_exclude.txt"
        self.parameter_dict["id2caption"]       = train_path + "index2caption.txt"
        self.parameter_dict["target"]           = train_path + "index2caption.txt"
        self.parameter_dict["vocab"]            = 5000
        self.parameter_dict["embed"]            = 300
        self.parameter_dict["hidden"]           = 200
        self.parameter_dict["epoch"]            = 20
        self.parameter_dict["minibatch"]        = 110 
        self.parameter_dict["generation_limit"] = 256
        self.parameter_dict["use_gpu"]          = use_gpu
        self.parameter_dict["gpu_id"]           = gpu_id
        self.parameter_dict["choose_model"] = "Alex_Model"

        if self.parameter_dict["choose_model"] == "Alex_Model":
            self.insize = 224
        if self.parameter_dict["choose_model"] == "AlexBn_Model":
            self.insize = 227

        mean_image = pickle.load(open("mean.npy", 'rb'))

        cropwidth = 256 - self.insize
        self.start = cropwidth // 2
        self.stop = self.start + self.insize
        self.mean_image = mean_image[:, self.start:self.stop, self.start:self.stop].copy()

        self.x_batch = np.ndarray((self.parameter_dict["minibatch"], 3,
                                   self.insize, self.insize), dtype=np.float32)
        self.y_batch = np.ndarray((self.parameter_dict["minibatch"]),
                                  dtype=np.int32)

        self.trg_vocab = Vocabulary.new(gens.word_list(self.parameter_dict["target"]), self.parameter_dict["vocab"])
        self.read_data = Read_Data(self.parameter_dict["id2image"],
                                   "Data/val2014_resize",
                                   self.parameter_dict["id2caption"])
        self.read_data.load_image_list()
        self.read_data.load_caption_list()
class TrainCaptionAttention():

    def __init__(self):
        self.parameter_dict = {}
        train_path = APP_ROOT + "/Data/"

        self.parameter_dict["id2image"]         = train_path + "index2img_exclude.txt"
        self.parameter_dict["id2caption"]       = train_path + "index2caption.txt"
        self.parameter_dict["target"]           = train_path + "index2caption.txt"
        self.parameter_dict["vocab"]            = 5000
        self.parameter_dict["embed"]            = 300
        self.parameter_dict["hidden"]           = 500
        self.parameter_dict["epoch"]            = 20
        self.parameter_dict["minibatch"]        = 64
        self.parameter_dict["generation_limit"] = 256
        self.parameter_dict["use_gpu"]          = True 
        self.parameter_dict["gpu_id"]           = 0
        self.parameter_dict["choose_model"] = "Alex_Model"

        if self.parameter_dict["choose_model"] == "Alex_Model":
            self.insize = 224
        if self.parameter_dict["choose_model"] == "AlexBn_Model":
            self.insize = 227

        mean_image = pickle.load(open("mean.npy", 'rb'))

        cropwidth = 256 - self.insize
        self.start = cropwidth // 2
        self.stop = self.start + self.insize
        self.mean_image = mean_image[:, self.start:self.stop, self.start:self.stop].copy()

        self.x_batch = np.ndarray((self.parameter_dict["minibatch"], 3,
                                   self.insize, self.insize), dtype=np.float32)
        self.y_batch = np.ndarray((self.parameter_dict["minibatch"]),
                                  dtype=np.int32)

        self.trg_vocab = Vocabulary.new(gens.word_list(self.parameter_dict["target"]), self.parameter_dict["vocab"])
        self.read_data = Read_Data(self.parameter_dict["id2image"],
                                   "Data/val2014_resize",
                                   self.parameter_dict["id2caption"])
        self.read_data.load_image_list()
        self.read_data.load_caption_list()

    def train(self, use_gpu, gpu_id):
        if use_gpu:
            cuda.check_cuda_available()
        xp = cuda.cupy if gpu_id >= 0 and use_gpu == True else np
        batch_count = 0
        for k, v in self.read_data.total_words_ids.items():
            if k in self.read_data.images_ids:
                image = np.asarray(Image.open(APP_ROOT + "/" + self.read_data.images_ids[k])).transpose(2, 0, 1)[::-1]
                image = image[:, self.start:self.stop, self.start:self.stop].astype(np.float32)
                image -= self.mean_image

                self.x_batch[batch_count] = image
                self.y_batch[batch_count] = self.trg_vocab.stoi(self.read_data.total_words_ids[k].split()[0])

                if batch_count < self.parameter_dict["minibatch"]:
                    x_data = xp.asarray(self.x_batch)
                    y_data = xp.asarray(self.y_batch)

                    x = chainer.Variable(x_data, volatile=True)
                    t = chainer.Variable(y_data, volatile=True)
                    self.parameter_dict["x"] = x
                    self.parameter_dict["first_word"] = t
                    encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
                    encoderDecoderModel.train()
                    batch_count = 0
                batch_count = batch_count + 1
# encoding:utf-8

import unittest
import ddt
import pytest
import allure

from utils.father_request import Father_Request
from utils.read_data import Read_Data
from config.config import Get_Config
from test_case.test_data import data
from common.my_log import My_Log

excel_test_data = Read_Data().get_excel_data(excel_path='../test_case/excel_data.xlsx', sheet_name='Sheet1')


@ddt.ddt
class Test_Excel_Class(unittest.TestCase):

    @ddt.data(*excel_test_data)
    def test_02(self, item):
        res = Father_Request().send_request(item['method'], item['url'], eval(str(item['params'])))
        # 日志输入
        My_Log().info('================开始测试【{}】用例======【{}】==============='.format(item['case_id'], item['case_name']))
        My_Log().info('【请求url】:{}'.format(item['url']))
        My_Log().info('【请求方式】:{}'.format(item['method']))
        My_Log().info('【请求headers】:{}'.format(item['headers']))
        My_Log().info('【请求参数】:{}'.format(item['params']))
        My_Log().info('【响应参数】:{}'.format(res.text))
        My_Log().info('【Response code】:{}'.format(res.status_code))
        My_Log().info('【checkpoint】:{}'.format(item['checkpoint']))
Ejemplo n.º 6
0
import unittest
import ddt
import pytest
import allure

from utils.father_request import Father_Request
from utils.read_data import Read_Data
from config.config import Get_Config
from common.my_log import My_Log

yaml_test_data = Read_Data().get_yaml_data(
    yaml_path="../test_case/yaml_data.yaml")['result']

# print(yaml_test_data)


@ddt.ddt
class Test_Yaml_Class(unittest.TestCase):
    def setUp(self):
        pass

    def tearDown(self):
        pass

    @ddt.data(*yaml_test_data)
    def test_01(self, item):
        url = Get_Config().get_config('URL', 'server')
        method = 'get'
        res = Father_Request().send_request(method, url,
                                            eval(str(item['params'])))
        # 日志输入
Ejemplo n.º 7
0
# encoding:utf-8

import unittest
import ddt
import pytest
import allure

from utils.father_request import Father_Request
from utils.read_data import Read_Data
from config.config import Get_Config
from test_case.test_data import data
from common.my_log import My_Log

yaml_test_data = Read_Data().get_yaml_data(
    yaml_path="../test_case/yaml_data.yaml")['result']
print(yaml_test_data)
# print('123')
excel_test_data = Read_Data().get_excel_data(
    excel_path='../test_case/excel_data.xlsx', sheet_name='Sheet1')
# print(excel_test_data)
'''
def setup_module(self):
    print("setup_module:整个.py模块只执行一次")
    print("比如:所有用例开始前只打开一次浏览器")


def teardown_module(self):
    print("teardown_module:整个.py模块只执行一次")
    print("比如:所有用例结束只最后关闭浏览器")

def setup_function(self):
 def test_load_image_list(self):
     args = parse_args(["-i", APP_ROOT + "/../Data/index2img.txt", "-r", APP_ROOT,
                        "-c", APP_ROOT + "/../Data/index2caption.txt"])
     read_data = Read_Data(args.image_id_file, args.root, args.caption_file)
     read_data.load_image_list()
     read_data.load_caption_list()
class TrainCaption():

    def __init__(self, use_gpu, gpu_id):
        self.parameter_dict = {}
        train_path = APP_ROOT + "/../../Chainer_Image_Caption_Neural_Network/Code/Data/"
        self.resize_image_path = APP_ROOT + "/../../Chainer_Image_Caption_Neural_Network/Code/"

        self.parameter_dict["id2image"]         = train_path + "index2img_exclude.txt"
        self.parameter_dict["id2caption"]       = train_path + "index2caption.txt"
        self.parameter_dict["target"]           = train_path + "index2caption.txt"
        self.parameter_dict["vocab"]            = 5000
        self.parameter_dict["embed"]            = 300
        self.parameter_dict["hidden"]           = 200
        self.parameter_dict["epoch"]            = 20
        self.parameter_dict["minibatch"]        = 110 
        self.parameter_dict["generation_limit"] = 256
        self.parameter_dict["use_gpu"]          = use_gpu
        self.parameter_dict["gpu_id"]           = gpu_id
        self.parameter_dict["choose_model"] = "Alex_Model"

        if self.parameter_dict["choose_model"] == "Alex_Model":
            self.insize = 224
        if self.parameter_dict["choose_model"] == "AlexBn_Model":
            self.insize = 227

        mean_image = pickle.load(open("mean.npy", 'rb'))

        cropwidth = 256 - self.insize
        self.start = cropwidth // 2
        self.stop = self.start + self.insize
        self.mean_image = mean_image[:, self.start:self.stop, self.start:self.stop].copy()

        self.x_batch = np.ndarray((self.parameter_dict["minibatch"], 3,
                                   self.insize, self.insize), dtype=np.float32)
        self.y_batch = np.ndarray((self.parameter_dict["minibatch"]),
                                  dtype=np.int32)

        self.trg_vocab = Vocabulary.new(gens.word_list(self.parameter_dict["target"]), self.parameter_dict["vocab"])
        self.read_data = Read_Data(self.parameter_dict["id2image"],
                                   "Data/val2014_resize",
                                   self.parameter_dict["id2caption"])
        self.read_data.load_image_list()
        self.read_data.load_caption_list()

    def train(self):
        batch_count = 0
        self.parameter_dict["x"] = []
        self.parameter_dict["first_word"] = []
        encoderDecoderModel = EncoderDecoderModel(self.parameter_dict)
        for epoch in range(self.parameter_dict["epoch"]):
            for k, v in self.read_data.total_words_ids.items():
                if k in self.read_data.images_ids:
                    try:
                        self.__get_data(k, batch_count)
                        if batch_count == self.parameter_dict["minibatch"] - 1:
                            self.__call_miniatch_train(encoderDecoderModel, epoch)
                            batch_count = 0
                    except ValueError as e:
                        print(str(e))
                        continue
                batch_count = batch_count + 1
        encoderDecoderModel.save_model()

    def __get_data(self, k, batch_count):
        """
        Get the image data and caption
        :param k: image data index
        """
        image = np.asarray(Image.open(self.resize_image_path + "/" + self.read_data.images_ids[k])).transpose(2, 0, 1)[::-1]
        image = image[:, self.start:self.stop, self.start:self.stop].astype(np.float32)
        image -= self.mean_image

        self.x_batch[batch_count] = image
        self.y_batch[batch_count] = self.trg_vocab.stoi(self.read_data.total_words_ids[k].split()[0])

    def __call_miniatch_train(self, encoderDecoderModel, epoch):
        """
        Call minibatch train
        :param encoderDecoderModel:
        :param epoch:
        """
        if self.parameter_dict["use_gpu"]:
            cuda.check_cuda_available()
        xp = cuda.cupy if self.parameter_dict["gpu_id"] >= 0 and self.parameter_dict["use_gpu"] == True else np
        x_data = xp.asarray(self.x_batch)
        y_data = xp.asarray(self.y_batch)
        x = chainer.Variable(x_data, volatile=True)
        t = chainer.Variable(y_data, volatile=True)
        encoderDecoderModel.id2image = x
        encoderDecoderModel.first_word = t
        encoderDecoderModel.train(epoch)