Ejemplo n.º 1
0
def upload():
    print 'uploading...'
    fin = request.files['file']
    data = fin.read()
    user_id = request.form['openid']
    user_dir = get_user_dir(user_id)
    if not os.path.isdir(user_dir):
        os.mkdir(user_dir)  # TODO: delete zombie cache

    img = cv2.imdecode(np.fromstring(data, dtype=np.uint8), cv2.IMREAD_COLOR)
    predicted = ModelWrapper.predict(img)
    assets = json.load(open(ASSETS_DIR + ASSETS_JSON_FILE))
    resp_dict = assets[predicted.decode('utf-8')]
    resp_dict.update({'predicted': predicted})
    resp = flask.jsonify(resp_dict)
    print predicted
    print resp

    with open(user_dir + CACHE_IMAGE_FILE, 'wb') as fout:
        fout.write(data)
    json_data = {
        'predicted': predicted,
        'description': resp_dict['description']
    }
    json.dump(json_data, open(user_dir + CACHE_JSON_FILE, 'w'))
    return resp
Ejemplo n.º 2
0
    def get(self, id):
        m = ModelWrapper("pickle_model.pkl")

        customer = data[data.cuid == id]

        if len(customer) == 0:
            print("Customer not found")
            return {"conv": 0, "revenue": 0}

        print("Customer found")
        res = m.predict(customer)

        print("res[0]", res[0], "res[1]", res[1])

        mes = metrics(customer)

        return {
            "conv": res[0][0],
            "revenue": str(res[1].values[0]),
            "message": mes
        }
model.prepare_for_prediction()
#

text_raw = ["这本书不错"]
"""
work_book = xlrd.open_workbook(file_raw)
data_sheet = work_book.sheets()[0]
text_raw = data_sheet.col_values(0)
"""

#
preds_list = []
logits_list = []
#
for item in text_raw:
    out = model.predict([item])
    print(out)

    logits = out[0]
    pred = list(logits[0]).index(max(logits[0]))
    #
    logits_list.append(logits[0])
    preds_list.append(pred)
#
list_pair = zip(preds_list, text_raw)
#
for item in list_pair:
    print(item)
"""
workbook = xlwt.Workbook(encoding = 'utf-8')
worksheet = workbook.add_sheet('My Worksheet')
Ejemplo n.º 4
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 17/11/14 PM2:21
# @Author  : shaoguang.csg
# @File    : main.py

from parse_conf import DataConf, ModelConf
from model_wrapper import ModelWrapper
from utils.logger import logger
from time import time

start = time()

data_conf = DataConf()
model_conf = ModelConf()

model = ModelWrapper(data_conf=data_conf, model_conf=model_conf)
model.train()
logger.info(model.get_weight())
model.evaluate()
result = model.predict()

end = time()

logger.info('time: {}'.format(end - start))

# 2 core 1 threads 116
# 1 core  228
Ejemplo n.º 5
0
class AbstractAgent:

    def __init__(self, environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 optimizer: K.optimizers.Optimizer,
                 logger: Logger):
        self.environment = environment
        self.memory = memory
        self.policy = policy
        self.model = ModelWrapper(model, optimizer)
        self.logger = logger

        self.history = []

        logger.create_settings_model_file(model)
        logger.create_settings_agent_file(self)

    def _explore_env(self, batch_size: int, number_of_game: int = 10) -> Tuple[float, List[Sample]]:
        """ Return tuple of mean gain from all games and list of samples. """
        data = []
        gains = []
        state = self.environment.reset()
        previous_sample = None

        current_gain = 0
        n_sample = 0
        n_game = 0

        while n_game <= number_of_game or n_sample <= batch_size:
            q_values = self.model.predict(state)
            action = self.policy(q_values)

            next_state, reward, done, _ = self.environment.step(action)

            current_gain += reward

            if previous_sample is None:
                previous_sample = Sample(state, action, next_state, reward)
            else:
                current_sample = Sample(state, action, next_state, reward)
                previous_sample.next_sample = current_sample
                data.append(previous_sample)
                previous_sample = current_sample

            if done:
                data.append(current_sample)
                gains.append(current_gain)
                current_gain = 0
                previous_sample = None
                state = self.environment.reset()
                n_game += 1
            else:
                state = next_state
            n_sample += 1

        self.environment.close()
        return np.mean(gains), rand.sample(data, batch_size)

    def learn(self, epochs: int,
              batch_size_in_step: int,
              min_n_game_in_exploration: int,
              batch_size_in_exploration: int,
              change_model_delay: int):
        raise NotImplementedError

    def __str__(self):
        raise NotImplementedError
Ejemplo n.º 6
0
class DQN(AbstractAgent):
    def __init__(self,
                 environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 logger: Logger,
                 gamma: float,
                 optimizer: K.optimizers.Optimizer,
                 n_step: int = 1):

        self.model = ModelWrapper(model, optimizer)
        #self.model.compile()
        self.current_model = None

        self.gamma = gamma
        self.n_step = n_step

        super(DQN, self).__init__(environment=environment,
                                  memory=memory,
                                  policy=policy,
                                  model=model,
                                  optimizer=optimizer,
                                  logger=logger)

    def _bellman_equation(self, batch: List[Sample]) -> np.ndarray:
        state = np.array([sample.state for sample in batch])
        q_values = self.current_model.predict(state)

        for idx in range(q_values.shape[0]):
            q_values[idx][batch[idx].action] = batch[idx].reward
            if not batch[idx].is_done():
                best_action_for_q_next = np.argmax(
                    self.model.predict(batch[idx].next_state))
                q_next = self.model.predict(
                    batch[idx].next_state)[0][best_action_for_q_next]
                q_values[idx][batch[idx].action] += self.gamma * q_next

        return q_values

    def learn(self, epochs: int, batch_size_in_step: int,
              min_n_game_in_exploration: int, batch_size_in_exploration: int,
              change_model_delay: int):

        self.model.compile()
        self.current_model = self.model.clone()
        self.current_model.compile()

        eval_score, starting_experience = self._explore_env(self.memory.maxlen)
        self.memory.add(starting_experience)

        for epoch in tqdm(range(epochs), desc='Learning in progress: '):

            if epoch % change_model_delay == 0:
                self.model = self.current_model.clone()
                self.model.compile()
                if type(self.memory) == PrioritizedExperienceReplay:
                    self.memory.update_model(self.model)
                eval_score, batch = self._explore_env(
                    batch_size_in_exploration, min_n_game_in_exploration)
                self.memory.add(batch)
            batch = self.memory.sample(batch_size_in_step)

            q_values = self._bellman_equation(batch)
            state = np.array([sample.state for sample in batch])
            loss = self.current_model.fit(state, q_values)
            self.policy.update()
            self.logger.add_event({
                'loss_value': loss,
                'mean_gain': eval_score,
                'epoch': epoch
            })

    def __str__(self):
        return "Agent: " + self.__class__.__name__ + "\n\n" + \
               "Discount value: " + str(self.gamma) + "\n"\
                "N-step: " + str(self.n_step) + "\n\n"\
                "Environment:\n" + str(self.environment) + "\n\n" + \
                "Memory:\n" + str(self.memory) + "\n" + \
                "Policy:\n" + str(self.policy)
Ejemplo n.º 7
0
    test_set_data = process_data.process_test_set_data(
        img_size, test_set_raw_data_dir, test_set_processed_data_path)
    print('Data processed!')

fig = plt.figure()

for num, data in enumerate(test_set_data[:12]):
    # cat: [1,0]
    # dog: [0,1]

    img_data = data[0]
    img_num = data[1]

    y = fig.add_subplot(3, 4, num + 1)

    model_out = model.predict([img_data.reshape(img_size, img_size, 1)])[0]

    if np.argmax(model_out) == 1:
        str_label = 'Dog'
    else:
        str_label = 'Cat'

    y.imshow(img_data, cmap='gray')
    plt.title(str_label)
    y.axes.get_xaxis().set_visible(False)
    y.axes.get_yaxis().set_visible(False)
plt.show()

with open('submission-file.csv', 'w') as f:
    f.write('id,label\n')