Esempio n. 1
0
def test_object():

    foo = Object(bar='bar',
                 baz=(1, 2, 3),
                 foo1=Object(bar='BAR', baz={
                     'one': 1,
                     'two': 2
                 }))

    assert foo.bar == 'bar'
    assert foo.baz == (1, 2, 3)
    assert foo.foo1.bar == 'BAR'
    assert foo.foo1.baz['one'] == 1
Esempio n. 2
0
    def get_predict_v(self, x, score):
        # [N]
        item_pop = tf.cast(self.data.item_pop, tf.float32)
        item_pop_log = tf.log(item_pop + np.e)

        item_deg_self = tf.cast(tf.gather(self.data.item_deg_self_per_phase, x.phase), tf.float32)
        item_pop_self_log = tf.log(item_deg_self + np.e)

        if args.mode_pop == 'log':
            score = score * args.alpha_pop_base + score / item_pop_log
        elif args.mode_pop == 'log_mdeg':
            score = score * item_pop_self_log / item_pop_log
        elif args.mode_pop == 'log_mdeg_only':
            score = score * item_pop_self_log
        elif args.mode_pop == 'linear':
            item_pop = tf.cast(self.data.item_pop, tf.float32) + 1.0
            score = score * args.alpha_pop_base + score / item_pop
        elif args.mode_pop == 'log_md':
            item_pop_self_log = tf.log(item_deg_self + np.e + 10.0)
            score = score * item_pop_self_log / item_pop_log

        if args.mode_rare in {'log', 'linear', 'base'}:
            if args.mode_rare == 'log':
                rare_weight_pop = 1.0 / tf.log(tf.cast(self.data.item_pop, tf.float32) + np.e)
            elif args.mode_rare == 'linear':
                rare_weight_pop = 1.0 / tf.cast(self.data.item_pop, tf.float32)
            elif args.mode_rare == 'base':
                rare_weight_pop = 0.0
            else:
                raise Exception
            # [N]
            rare_weight = float(args.alpha_rare)
            rare_weight = rare_weight + rare_weight_pop
            rare_weight *= float(args.alpha_rare_mul)

            is_rare = self.is_rare(x)
            score = tf.where(is_rare, score * rare_weight + float(args.alpha_rare_base), score)

        score = UTILS.mask_logits(score, x.score_mask)

        tf.summary.histogram('score', score)
        top_items, top_scores = self.topk_idx(score, x)

        if args.dump_all:
            self.tmp_vars.update(all_scores=score, item_seq=x.seq, ts_seq=x.ts, q_ts=x.q_ts)
            ret = Object(user=x.user, phase=x.phase, top_items=top_items, top_scores=top_scores)
            ret.update(**self.tmp_vars)
            return ret

        return Object(user=x.user, phase=x.phase, top_items=top_items, top_scores=top_scores)
Esempio n. 3
0
def test_story_runner_returns_a_result_with_the_original_Fixture():

    mocker = Mocker()

    settings = Settings()
    fixture = Fixture()
    action = some_action()
    fixture.append_story(action.scenario.story)
    runner = StoryRunner()

    context = Object()
    context.browser_driver = mocker.mock()
    context.browser_driver.start_test("http://localhost")
    context.browser_driver.stop_test()
    context.settings = mocker.mock()
    context.settings.on_before_action
    mocker.result(None)
    context.settings.on_action_successful
    mocker.result(None)

    context.language = mocker.mock()
    context.language.get('given')
    mocker.result('Given')
    context.language.get('when')
    mocker.result('When')
    context.language.get('then')
    mocker.result('Then')

    with mocker:
        result = runner.run_stories(settings, fixture, context=context)

        assert result.fixture == fixture
Esempio n. 4
0
def test_should_catch_assertion_error():

    mocker = Mocker()

    def action_failed_method(context, *args, **kwargs):
        assert False, "bla"

    settings = Settings()
    runner = StoryRunner()
    fixture = Fixture()
    action = some_action()
    fixture.append_story(action.scenario.story)
    action.execute_function = action_failed_method

    context = Object()
    context.browser_driver = mocker.mock()
    context.browser_driver.start_test("http://localhost")
    context.browser_driver.stop_test()
    context.settings = mocker.mock()
    context.settings.on_before_action
    mocker.result(None)
    context.settings.on_action_error
    mocker.result(None)
    context.language = mocker.mock()
    context.language.get('given')
    mocker.result('Given')

    with mocker:
        result = runner.run_stories(settings=settings,
                                    fixture=fixture,
                                    context=context)

        assert isinstance(action.error, AssertionError)
        assert action.error.message == "bla"
Esempio n. 5
0
def test_should_handle_action_errors_successfully():

    mocker = Mocker()

    def action_failed_method(context, *args, **kwargs):
        raise ActionFailedError("bla")

    settings = Settings()
    runner = StoryRunner()
    fixture = Fixture()
    action = some_action()
    fixture.append_story(action.scenario.story)
    action.execute_function = action_failed_method

    context = Object()
    context.browser_driver = mocker.mock()
    context.browser_driver.start_test("http://localhost")
    context.browser_driver.stop_test()
    context.settings = mocker.mock()
    context.settings.on_before_action
    mocker.result(None)
    context.settings.on_action_error
    mocker.result(None)
    context.language = mocker.mock()
    context.language.get('given')
    mocker.result('Given')

    with mocker:
        result = runner.run_stories(settings=settings,
                                    fixture=fixture,
                                    context=context)

        assert fixture.get_status() == Status.Failed
Esempio n. 6
0
def test_should_execute_scenarios_successfully():

    mocker = Mocker()

    settings = Settings()
    runner = StoryRunner()
    fixture = Fixture()
    fixture.append_story(some_action().scenario.story)

    context = Object()
    context.browser_driver = mocker.mock()
    context.browser_driver.start_test("http://localhost")
    context.browser_driver.stop_test()
    context.settings = mocker.mock()
    context.settings.on_before_action
    mocker.result(None)
    context.settings.on_action_successful
    mocker.result(None)

    context.language = mocker.mock()
    context.language.get('given')
    mocker.result('Given')
    context.language.get('when')
    mocker.result('When')
    context.language.get('then')
    mocker.result('Then')

    with mocker:
        result = runner.run_stories(settings=settings,
                                    fixture=fixture,
                                    context=context)

        assert fixture.get_status() == Status.Successful
Esempio n. 7
0
    def forward(self, x):
        # user: [BS,], seq: [BS, L], ts: [BS, L]

        # [BS, k]
        seq_rep = self.get_out_rep(x)
        if args.use_bl:
            seq_rep = tf.layers.dense(seq_rep,
                                      args.dim_k,
                                      name='bl',
                                      kernel_regularizer=UTILS.l2_loss('bl'),
                                      use_bias=False)
        dropout = tf.where(self.is_on_train, args.dropout, 1.0)
        seq_rep = tf.nn.dropout(seq_rep, dropout)

        score = self.calculate_score_for_pred(x, seq_rep, self.item_emb_matrix)

        # [BS, M]
        neg = self.sample_neg(x.ans, score)
        if args.mode_loss == 'softmax':
            loss = self.get_loss_softmax(x, seq_rep, neg)
        elif args.mode_loss == 'pairwise':
            loss = self.get_loss_pairwise(x, seq_rep, neg)
        else:
            a = float(args.mode_loss)
            loss_1 = self.get_loss_softmax(x, seq_rep, neg)
            loss_2 = self.get_loss_pairwise(x, seq_rep, neg)
            loss = loss_1 * a + loss_2 * (1.0 - a)

        tf.summary.scalar('loss', loss)
        train_op = UTILS.minimizer(loss)
        train_v = Object(loss=loss)

        return train_op, train_v, self.get_predict_v(x, score)
Esempio n. 8
0
def test_execute_action_will_not_execute_itself():

    mocker = Mocker()

    class DoSomethingRecursiveAction(ActionBase):
        regex = r'^(And )?I do "(?P<what>\w+)" stuff$'

        def execute(self, context, getter_mock, *args, **kwargs):
            self.execute_action('And I do "recursive" stuff',
                                context,
                                getter=getter_mock)

    language_getter_mock = mocker.mock()
    language_getter_mock.get(ANY)
    mocker.count(min=1, max=None)
    mocker.result('^$')

    context_mock = Object(settings=mocker.mock())
    context_mock.settings.default_culture
    mocker.result("en-us")

    with mocker:
        dosaction = DoSomethingRecursiveAction()
        args = []
        kwargs = dict(what='nothing')

        dosaction.execute(context_mock,
                          getter_mock=language_getter_mock,
                          *args,
                          **kwargs)
Esempio n. 9
0
def standardize_new(xt, yt, xv, yv, xte, yte, is_regression):
    ss = Object()
    xt, xte, ss.mean_x_train, ss.std_x_train = standardize(xt, xte)
    if xv is not None:
        xv = (xv - ss.mean_x_train[None, :]) / ss.std_x_train[None, :]
    if is_regression:
        yt, yte, ss.mean_y_train, ss.std_y_train = standardize(yt, yte)
        if yv is not None:
            yv = (yv - ss.mean_y_train[None]) / ss.std_y_train[None]
    return xt, yt, xv, yv, xte, yte, ss
Esempio n. 10
0
 def getInfo(self):
     o = Object()
     for prop, ctype in self.elaCarrier.contents._fields_:
         # should check for pointers
         try:
             o[prop] = getattr(self.elaCarrier.contents, prop)
         except Exception as e:
             print e
             o[prop] = ctype
     return o
Esempio n. 11
0
    def __init__(self, data: dataset.Data):
        self.tmp_vars = Object()

        self.data = data

        # self.save_dir = f'{utils.save_dir}/{args.run_name}'
        self.save_dir = f'{utils.save_dir}/{args.msg}'

        with self.data.tf_graph.as_default():
            tf.set_random_seed(args.seed)
            self.compile()
        self.fit_step = 0
Esempio n. 12
0
    def make_io(self):
        self.is_on_train = tf.placeholder(tf.bool, [], 'is_on_train')

        train_data = self.data.train_batch_repeat
        train_data_iter = train_data.make_one_shot_iterator()
        self.train_data_handle = self.sess.run(train_data_iter.string_handle())
        self.data_handle = tf.placeholder(tf.string, [], 'data_handle')
        data_iter = tf.data.Iterator.from_string_handle(
            self.data_handle,
            train_data.output_types,
            train_data.output_shapes,
        )
        self.input_dict = data_iter.get_next()
        self.input_dict = Object(**self.input_dict)
Esempio n. 13
0
def test_story_runner_returns_a_result():

    mocker = Mocker()

    settings = Settings()
    fixture = Fixture()
    runner = StoryRunner()
    context = Object()
    context.browser_driver = mocker.mock()
    context.browser_driver.start_test("http://localhost")
    context.browser_driver.stop_test()

    with mocker:
        result = runner.run_stories(settings, fixture, context=context)
        assert result is not None
Esempio n. 14
0
def make_context_and_fso_mocks(mocker):
    
    hooks_dir = ["/hooks/dir/"]
    pages_dir = ["/pages/dir/"]
    custom_actions_dir = ["/custom/actions/dir/"]
    
    context_mock = Object()
    context_mock.browser_driver = mocker.mock()
    context_mock.settings = mocker.mock()
    context_mock.settings.hooks_dir
    mocker.count(min=1, max=None)
    mocker.result(hooks_dir)
    context_mock.settings.pages_dir
    mocker.count(min=1, max=None)
    mocker.result(pages_dir)
    context_mock.settings.custom_actions_dir
    mocker.count(min=1, max=None)
    mocker.result(custom_actions_dir)
    context_mock.settings.base_url
    mocker.count(min=0, max=None)
    mocker.result("http://localhost")
    context_mock.settings.default_culture
    mocker.count(min=1, max=None)
    mocker.result("en-us")

    files = ["/some/weird/file.py"]
    fso_mock = mocker.mock()
    fso_mock.add_to_import(hooks_dir[0])
    fso_mock.add_to_import(pages_dir[0])
    fso_mock.add_to_import(custom_actions_dir[0])
    fso_mock.locate(hooks_dir[0], '*.py')
    mocker.result(files)
    fso_mock.locate(pages_dir[0], '*.py')
    mocker.result(files)
    fso_mock.locate(custom_actions_dir[0], '*.py')
    mocker.result(files)
    fso_mock.import_file(ANY)
    mocker.count(min=1, max=None)
    fso_mock.remove_from_import(custom_actions_dir[0])
    mocker.count(min=1, max=None)
    fso_mock.remove_from_import(pages_dir[0])
    mocker.count(min=1, max=None)
    fso_mock.remove_from_import(hooks_dir[0])
    mocker.count(min=1, max=None)

    return context_mock, fso_mock
Esempio n. 15
0
def test_action_base_can_resolve_elements_in_a_given_page():

    mocker = Mocker()

    class DoOtherThingAction(ActionBase):
        regex = "^Do other thing$"

        def execute(self, context, *args, **kwargs):
            self.element = self.resolve_element_key(context, "button",
                                                    "Something")

    context_mock = Object(current_page=mocker.mock())
    context_mock.current_page.get_registered_element("Something")
    mocker.result("btnSomething")

    with mocker:
        action = DoOtherThingAction()
        action.execute(context_mock)
        assert action.element == "btnSomething"
Esempio n. 16
0
def test_action_base_can_resolve_elements_using_browser_driver():

    mocker = Mocker()

    class DoOneMoreThingAction(ActionBase):
        regex = "^Do other thing$"

        def execute(self, context, *args, **kwargs):
            self.element = self.resolve_element_key(context, "button",
                                                    "Something")

    context_mock = Object(browser_driver=mocker.mock(), current_page=None)
    context_mock.browser_driver.resolve_element_key(context_mock, "button",
                                                    "Something")
    mocker.result("btnSomething")

    with mocker:
        action = DoOneMoreThingAction()
        action.execute(context_mock)
        assert action.element == "btnSomething"
Esempio n. 17
0
 def get_metric_v(self, x, predict_v):
     # [BS,]
     true_item = x.ans
     true_item_a1 = tf.expand_dims(true_item, -1)
     # [BS, M], [BS, 1]
     eq = tf.cast(tf.equal(predict_v.top_items, true_item_a1), tf.int32)
     # [BS,]
     m = tf.reduce_max(eq, -1)
     idx = tf.cast(tf.argmax(eq, -1), tf.int32)
     rank = idx + m - 1
     ndcg = tf.log(2.0) * tf.cast(m, tf.float32) / tf.log(2.0 + tf.cast(idx, tf.float32))
     hit_rate = tf.cast(m, tf.float32)
     ret = Object(
         ndcg=ndcg,
         hit_rate=hit_rate,
         user=x.user,
         true_item=true_item,
         phase=x.phase,
         top_items=predict_v.top_items,
         top_scores=predict_v.top_scores,
         rank=rank,
         q_ts=x.q_ts,
     )
     return ret
Esempio n. 18
0
    def __init__(self, carrier=None):

        self.events = Object()
        self.elaCallbacks = ELACALLBACKS()

        self.events.idle = []
        self.elaCallbacks.idle = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_void_p)(self.__idleCB__)

        self.events.connectionStatus = []
        self.elaCallbacks.connection_status = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ELACONNECTIONSTATUS,
            ctypes.c_void_p)(self.__connectionStatusCB__)

        self.events.ready = []
        self.elaCallbacks.ready = ctypes.CFUNCTYPE(None,
                                                   ctypes.POINTER(ELACARRIER),
                                                   ctypes.c_void_p)(
                                                       self.__readyCB__)

        self.events.selfInfo = []
        self.elaCallbacks.self_info = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.POINTER(ELAUSERINFO),
            ctypes.c_void_p)(self.__selfInfoCB__)

        self.events.friendList = []
        self.elaCallbacks.friend_list = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.POINTER(ELAFRIENDINFO),
            ctypes.c_void_p)(self.__friendListCB__)

        self.events.friendConnection = []
        self.elaCallbacks.friend_connection = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p,
            ELACONNECTIONSTATUS, ctypes.c_void_p)(self.__friendConnectionCB__)

        self.events.friendInfo = []
        self.elaCallbacks.friend_info = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p,
            ctypes.POINTER(ELAFRIENDINFO),
            ctypes.c_void_p)(self.__friendInfoCB__)

        self.events.friendPresence = []
        self.elaCallbacks.friend_presence = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p,
            ELAPRESENCESTATUS, ctypes.c_void_p)(self.__friendPresenceCB__)

        self.events.friendRequest = []
        self.elaCallbacks.friend_request = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p,
            ctypes.POINTER(ELAUSERINFO), ctypes.c_char_p,
            ctypes.c_void_p)(self.__friendRequestCB__)

        self.events.friendAdded = []
        self.elaCallbacks.friend_added = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.POINTER(ELAUSERINFO),
            ctypes.c_void_p)(self.__friendAddedCB__)

        self.events.friendRemoved = []
        self.elaCallbacks.friend_removed = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p,
            ctypes.c_void_p)(self.__friendRemovedCB__)

        self.events.friendMessage = []
        self.elaCallbacks.friend_message = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p, ctypes.c_char_p,
            ctypes.c_size_t, ctypes.c_void_p)(self.__friendMessageCB__)

        self.events.friendInvite = []
        self.elaCallbacks.friend_invite = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p, ctypes.c_char_p,
            ctypes.c_char_p, ctypes.c_size_t,
            ctypes.c_void_p)(self.__friendInviteCB__)

        self.events.groupInvite = []
        self.elaCallbacks.group_invite = ctypes.CFUNCTYPE(
            None, ctypes.POINTER(ELACARRIER), ctypes.c_char_p, ctypes.c_char_p,
            ctypes.c_size_t, ctypes.c_void_p)(self.__groupInviteCB__)

        self.events.friendsIterate = []
        self.friendsIterate = ELAFRIENDSITERATECALLBACK(
            self.__friendsIterateCB__)

        self.events.friendInviteResponse = []
        self.friendInviteResponse = ELAFRIENDINVITERESPONSECALLBACK(
            self.__friendInviteResponseCB__)
Esempio n. 19
0
class Base:
    args = Object()
    need_train = True
    def __init__(self, data: dataset.Data):
        self.tmp_vars = Object()

        self.data = data

        # self.save_dir = f'{utils.save_dir}/{args.run_name}'
        self.save_dir = f'{utils.save_dir}/{args.msg}'

        with self.data.tf_graph.as_default():
            tf.set_random_seed(args.seed)
            self.compile()
        self.fit_step = 0

    def compile(self):
        self.emb_l2_norm_op = None
        self.sess = UTILS.get_session()
        self.make_io()
        self.make_model()
        self.sess.run(tf.global_variables_initializer())
        if self.emb_l2_norm_op is not None:
            self.sess.run(self.emb_l2_norm_op)

    def make_io(self):
        self.is_on_train = tf.placeholder(tf.bool, [], 'is_on_train')

        train_data = self.data.train_batch_repeat
        train_data_iter = train_data.make_one_shot_iterator()
        self.train_data_handle = self.sess.run(train_data_iter.string_handle())
        self.data_handle = tf.placeholder(tf.string, [], 'data_handle')
        data_iter = tf.data.Iterator.from_string_handle(
            self.data_handle,
            train_data.output_types,
            train_data.output_shapes,
        )
        self.input_dict = data_iter.get_next()
        self.input_dict = Object(**self.input_dict)

    def get_metric_v(self, x, predict_v):
        # [BS,]
        true_item = x.ans
        true_item_a1 = tf.expand_dims(true_item, -1)
        # [BS, M], [BS, 1]
        eq = tf.cast(tf.equal(predict_v.top_items, true_item_a1), tf.int32)
        # [BS,]
        m = tf.reduce_max(eq, -1)
        idx = tf.cast(tf.argmax(eq, -1), tf.int32)
        rank = idx + m - 1
        ndcg = tf.log(2.0) * tf.cast(m, tf.float32) / tf.log(2.0 + tf.cast(idx, tf.float32))
        hit_rate = tf.cast(m, tf.float32)
        ret = Object(
            ndcg=ndcg,
            hit_rate=hit_rate,
            user=x.user,
            true_item=true_item,
            phase=x.phase,
            top_items=predict_v.top_items,
            top_scores=predict_v.top_scores,
            rank=rank,
            q_ts=x.q_ts,
        )
        return ret

    def get_predict_v(self, x, score):
        # [N]
        item_pop = tf.cast(self.data.item_pop, tf.float32)
        item_pop_log = tf.log(item_pop + np.e)

        item_deg_self = tf.cast(tf.gather(self.data.item_deg_self_per_phase, x.phase), tf.float32)
        item_pop_self_log = tf.log(item_deg_self + np.e)

        if args.mode_pop == 'log':
            score = score * args.alpha_pop_base + score / item_pop_log
        elif args.mode_pop == 'log_mdeg':
            score = score * item_pop_self_log / item_pop_log
        elif args.mode_pop == 'log_mdeg_only':
            score = score * item_pop_self_log
        elif args.mode_pop == 'linear':
            item_pop = tf.cast(self.data.item_pop, tf.float32) + 1.0
            score = score * args.alpha_pop_base + score / item_pop
        elif args.mode_pop == 'log_md':
            item_pop_self_log = tf.log(item_deg_self + np.e + 10.0)
            score = score * item_pop_self_log / item_pop_log

        if args.mode_rare in {'log', 'linear', 'base'}:
            if args.mode_rare == 'log':
                rare_weight_pop = 1.0 / tf.log(tf.cast(self.data.item_pop, tf.float32) + np.e)
            elif args.mode_rare == 'linear':
                rare_weight_pop = 1.0 / tf.cast(self.data.item_pop, tf.float32)
            elif args.mode_rare == 'base':
                rare_weight_pop = 0.0
            else:
                raise Exception
            # [N]
            rare_weight = float(args.alpha_rare)
            rare_weight = rare_weight + rare_weight_pop
            rare_weight *= float(args.alpha_rare_mul)

            is_rare = self.is_rare(x)
            score = tf.where(is_rare, score * rare_weight + float(args.alpha_rare_base), score)

        score = UTILS.mask_logits(score, x.score_mask)

        tf.summary.histogram('score', score)
        top_items, top_scores = self.topk_idx(score, x)

        if args.dump_all:
            self.tmp_vars.update(all_scores=score, item_seq=x.seq, ts_seq=x.ts, q_ts=x.q_ts)
            ret = Object(user=x.user, phase=x.phase, top_items=top_items, top_scores=top_scores)
            ret.update(**self.tmp_vars)
            return ret

        return Object(user=x.user, phase=x.phase, top_items=top_items, top_scores=top_scores)

    def make_model(self):
        with tf.variable_scope('Network', reuse=tf.AUTO_REUSE, regularizer=UTILS.l2_loss('all')):
            x = self.input_dict
            self.train_op, self.train_v, self.predict_v = self.forward(x)
            self.metric_v = self.get_metric_v(x, self.predict_v)
            self.metric_v.update(loss=self.train_v.loss)

        network_var_list = tf.trainable_variables(scope='^Network/')
        if network_var_list:
            args.log.log('trainable_variables:')
            for v in network_var_list:
                args.log.log(f'network: {v}')
                tf.summary.histogram(v.name, v)
            self.saver = tf.train.Saver(var_list=tf.trainable_variables())
            # self.saver_emb = tf.train.Saver(var_list=tf.trainable_variables(scope='^Network/Emb_'))

    def fit(self):
        data = {
            self.is_on_train: True,
            self.data_handle: self.train_data_handle,
        }
        tb_v = []
        if args.run_tb:
            tb_v = [self.all_summary]
        debug_v = DEBUG.fit_show_list
        all_v = [self.train_op, self.train_v, debug_v, tb_v]
        _, train_v, debug_v, tb_v = self.sess.run(all_v, data)
        if self.emb_l2_norm_op is not None:
            self.sess.run(self.emb_l2_norm_op)
        if tb_v:
            self.tbfw.add_summary(tb_v[0], self.fit_step)
        DEBUG.when_run(debug_v)
        self.fit_step += 1
        return train_v

    def inference(self, data, out_obj):
        with self.data.tf_graph.as_default():
            data_iter = data.make_one_shot_iterator()
            data_handle = self.sess.run(data_iter.string_handle())
            data = {
                self.is_on_train: False,
                self.data_handle: data_handle,
            }
        while True:
            try:
                ret_value, debug_v = self.sess.run([out_obj, DEBUG.inf_show_list], data)
                DEBUG.when_run(debug_v)
                yield ret_value
            except tf.errors.OutOfRangeError:
                break

    def metric(self, data):
        for v in self.inference(data, self.metric_v):
            yield v

    def predict(self, data):
        for v in self.inference(data, self.predict_v):
            yield v

    def save(self, s):
        if not self.need_train:
            return
        name = f'{self.save_dir}/model_{s}.ckpt'
        self.saver.save(self.sess, name)
    def restore(self, s):
        if not self.need_train:
            return
        name = f'{self.save_dir}/model_{s}.ckpt'
        self.saver.restore(self.sess, name)
    def restore_from_other(self, run_name):
        save_dir = f'{utils.save_dir}/{run_name}'
        s = 0
        if not self.need_train:
            return
        import os
        if not os.path.isdir(save_dir):
            args.log.log('download from hdfs')
            sh = f'$HADOOP_HOME/bin/hadoop fs -get save/{utils.project_name}/{run_name} {utils.save_dir}/'
            print(os.system(sh))
        name = f'{save_dir}/model_{s}.ckpt'
        self.saver.restore(self.sess, name)
        # if args.restore_train:
        #     self.saver_emb.restore(self.sess, name)
        # else:
        #     self.saver.restore(self.sess, name)


    def forward(self, x):
        raise NotImplementedError

    def is_rare(self, x):
        is_rare = tf.gather(self.data.is_rare_per_phase, x.phase)
        return is_rare

    def topk_idx(self, prob, x):
        rare_k = args.nb_rare_k
        if rare_k < 0:
            topk = tf.nn.top_k(prob, args.nb_topk)
            return topk.indices, topk.values

        is_rare = self.is_rare(x)
        prob_rare = UTILS.mask_logits(prob, is_rare)
        prob_rich = UTILS.mask_logits(prob, tf.logical_not(is_rare))

        topk_rare = tf.nn.top_k(prob_rare, rare_k).indices
        topk_rich = tf.nn.top_k(prob_rich, args.nb_topk - rare_k).indices
        topk = tf.concat([topk_rich, topk_rare], -1)
        # [BS, N], [BS, L] --> [BS, L]
        top_prob = tf.batch_gather(prob, topk)
        sort_topk = tf.nn.top_k(top_prob, args.nb_topk)
        sort_idx = sort_topk.indices
        top_values = sort_topk.values

        sorted_topk = tf.batch_gather(topk, sort_idx)
        return sorted_topk, top_values

    def before_train(self):
        pass

    def after_train(self):
        pass
Esempio n. 20
0
"""Configurations."""
from utils import Object, color

infinite_generation = True

map_params = Object(
    unit_size=10,
    height=20,
    cell_size=3,
    wall_width=1,
    field_size=15,
    core_size=3,
    center_size=3,
    seed=10,
    colors=Object(
        wall=(1, 0.5, 0.5, 1),
        floor=color("94DEFF"),
        sky=color("FFF9D9")
    )
)

key_map = Object(
    character=Object(
        forward="w",
        backward="s",
        left="a",
        right="d"
    ),
    utility=Object(
        pause="escape"
    )