Ejemplo n.º 1
0
 def run_backtest(self):
     print("Execution Mode: Rabbit MQ")
     t = Timer()
     t.start()
     try:
         self._run_backtest()
     except KeyboardInterrupt as e:
         quit(0)
     t.end()
     print("Total Process Execution Taken:", str(t.get()) + "s")
Ejemplo n.º 2
0
class Game:
    """docstring for Game"""

    def __init__(self):
        self.accepted_moves = {
            "UP": self._up,
            "DOWN": self._down,
            "LEFT": self._left,
            "RIGHT": self._right,
        }
        self._state = {'field_size': FIELD_SIZE, 'player_size': PLAYER_SIZE, 'players': {}, 'fruits': {}}
        self.fruit_timer = Timer(3, self.add_fruit)
        # self.collision_timer = Timer(3, self.add_fruit)

    @property
    def state(self):
        return self._state

    @state.setter
    def state(self, value):
        self._state = value

    def start(self):
        self.fruit_timer.start()

    def stop(self):
        self.fruit_timer.cancel()

    def add_player(self, player_id, nick):
        """
        Add player
        :param player_id:
        :param nick:
        :return: str
        """
        try:
            _ = self._state["players"][player_id]
            print("Fruit not added!")
            return f"Player {player_id} already exists!"
        except KeyError:
            x, y = random.choices(range(0, FIELD_SIZE+PLAYER_SIZE, PLAYER_SIZE), k=2)
            self._state["players"][player_id] = {"nick": nick, "x": x, "y": y, "p": 0}
            print("Fruit added!")
            return f"Player {player_id} added!"

    def rm_player(self, player_id):
        """
        Remove player
        :param player_id:
        :return: str
        """
        try:
            del self._state["players"][player_id]
            return f"Player {player_id} removed!"
        except KeyError:
            return f"Player {player_id} does not exist!"

    def field_is_full(self):
        """
        Check if field is full
        :return: bool
        """
        return len(self._state["fruits"]) >= ((FIELD_SIZE+PLAYER_SIZE) / PLAYER_SIZE) ** 2

    def add_fruit(self):
        """
        Add fruit
        :return: str
        """
        if self.field_is_full():
            return "Impossible to add fruit. Field is full!"

        positions = [x for x in POSITIONS if x not in self._state["fruits"].keys()]
        pos = random.choice(positions)
        self._state["fruits"][str(pos)] = True
        return "Fruit added!"

    def rm_fruit(self, pos):
        """
        Remove fruit
        :param pos:
        :return: str
        """
        try:
            del self._state["fruits"][str(pos)]
            return f"Fruit {pos} removed!"
        except KeyError:
            return f"Fruit {pos} does not exist!"

    def move_player(self, player_id, move):
        """
        Move player
        :param player_id:
        :param move:
        :return: str
        """
        try:
            self.accepted_moves[move](player_id)
            self.check_collision(player_id)
            return f"Player {player_id} moved {move}!"
        except KeyError:
            return "Invalid move!"

    def _up(self, player_id):
        try:
            self._state["players"][player_id]["y"] -= PLAYER_SIZE
            if self._state["players"][player_id]["y"] < 0:
                self._state["players"][player_id]["y"] = FIELD_SIZE
            return f"Player {player_id} moved {inspect.stack()[0][3]}!"
        except KeyError:
            return f"Player {player_id} does not exist!"

    def _down(self, player_id):
        try:
            self._state["players"][player_id]["y"] += PLAYER_SIZE
            if self._state["players"][player_id]["y"] > FIELD_SIZE:
                self._state["players"][player_id]["y"] = 0
            return f"Player {player_id} moved {inspect.stack()[0][3]}!"
        except KeyError:
            return f"Player {player_id} does not exist!"

    def _left(self, player_id):
        try:
            self._state["players"][player_id]["x"] -= PLAYER_SIZE
            if self._state["players"][player_id]["x"] < 0:
                self._state["players"][player_id]["x"] = FIELD_SIZE
            return f"Player {player_id} moved {inspect.stack()[0][3]}!"
        except KeyError:
            return f"Player {player_id} does not exist!"

    def _right(self, player_id):
        try:
            self._state["players"][player_id]["x"] += PLAYER_SIZE
            if self._state["players"][player_id]["x"] > FIELD_SIZE:
                self._state["players"][player_id]["x"] = 0
            return f"Player {player_id} moved {inspect.stack()[0][3]}!"
        except KeyError:
            return f"Player {player_id} does not exist!"

    def check_collision(self, player_id):
        player = self._state["players"][player_id]
        pos = player["x"], player["y"]
        try:
            _ = self._state["fruits"][str(pos)]
            player["p"] += 1
            self.rm_fruit(pos)
            return f"Player {player_id} collected a fruit at {pos}!"
        except KeyError:
            return f"There's no fruit at {pos}!"
Ejemplo n.º 3
0
def start(
    model="gpt2",
    interact=False,
    topK=10,
    feed="",
    seed=0,
    mod=1,
    logPath="Log",
    enableTimer=True,
    enableTests=False,
    runs=1,
    probablity=20,
):

    Log.setPath(logPath)

    feedPath = os.path.join("feed", "{}.txt".format(feed))
    checks(model, topK, runs, feedPath, probablity)

    if enableTests:
        runTests()
        return

    if feedPath != "":
        inFile = open(feedPath, "r", encoding="utf8")
        text = inFile.read()
        inFile.close()

    totalRunTime = None
    if enableTimer:
        t1 = Timer(enableTimer, Log)
        totalRunTime = 0
    Log.setLevel(2)
    out = list()
    for i in range(0, runs):
        Log.Info("STARTING RUN {}".format(i + 1))

        Log.Trace(
            text=
            "Model: {} | Interact: {} | TopK: {} | Text Path: {} | Log Path: {} | Enable Timer: {} | Run Tests: {}"
            .format(model, interact, topK, feed, logPath, enableTimer,
                    enableTests))

        pyRead = pyReadability(model, interact, topK, seed, mod, probablity,
                               Log)

        if enableTimer:
            t1.start()
        # starts pyRead to score the inputted text. If text == null enable interactive mode
        pyRead.start(text)

        runTime = -1
        if enableTimer:
            t1.end()
            runTime = t1.result()
            totalRunTime += runTime
            Log.Info("Took {} Seconds to Score".format(runTime))

        seed = pyRead.getSeed()
        totalWords, wordsEncoded = pyRead.getEncoder().wordsEncoded()
        percentEncoded = round(wordsEncoded / totalWords * 100, 2)

        out.append([
            seed, totalWords, wordsEncoded, percentEncoded,
            pyRead.getNormScore(),
            pyRead.getUnNormScore(), runTime
        ])

        Log.Info(
            "Words Encoded: {} | Total Words: {} | With a score of {}%".format(
                wordsEncoded, totalWords, pyRead.getNormScore()))

    Log.Info("Took {} Seconds to Run {} tests".format(totalRunTime, runs))

    fields = [
        'seed', 'total words', 'words encoded', 'percent encoded',
        'Norm Score', 'Unnorm Score'
        'time'
    ]
    Log.csvWriter(feed, fields, out)
Ejemplo n.º 4
0
class Browser:
    def __init__(self):
        self.window = tkinter.Tk()
        self.canvas = tkinter.Canvas(self.window, width=WIDTH, height=HEIGHT)
        self.canvas.pack(expand=True, fill="both")
        self.display_list = []
        self.scroll = 0
        self.window.bind("<Up>", self.scrollup)
        self.window.bind("<Down>", self.scrolldown)
        self.window.bind("<Configure>", self.windowresize)
        self.window.bind("<Button-1>", self.handle_click)
        self.window.bind("<Key>", self.keypress)
        self.window.bind("<Return>", self.pressenter)
        self.width = WIDTH
        self.height = HEIGHT
        self.hstep = HSTEP
        self.vstep = VSTEP
        self.scroll_step = SCROLL_STEP
        self.history = []
        self.focus = None
        self.address_bar = ""
        self.timer = Timer()
        self.cookies = {}
        # self.cookies["username"] = "******" # Test of security

        # http://www.zggdwx.com/

    def scrollup(self, e):
        self.scroll -= self.scroll_step
        self.scroll = min(self.scroll, self.max_y)
        self.scroll = max(0, self.scroll)
        self.render()

    def scrolldown(self, e):
        self.scroll += self.scroll_step
        self.scroll = min(self.scroll, self.max_y)
        self.scroll = max(0, self.scroll)
        self.render()

    def windowresize(self, e):
        if e.width < 10: return
        if e.width == self.width and e.height == self.height: return
        self.width = e.width
        self.height = e.height
        print("Layout called from windowresize")
        self.layout()

    def keypress(self, e):
        if not (len(e.char) == 1 and 0x20 <= ord(e.char) < 0x7f):
            return
        if not self.focus:
            return
        elif self.focus == "address bar":
            self.address_bar += e.char
            self.render()
        else:
            self.focus.node.attributes["value"] += e.char
            self.dispatch_event("change", self.focus.node)
            print("Layout called from keypress")
            self.reflow(self.focus)

    def pressenter(self, e):
        if self.focus == "address bar":
            self.focus = None
            self.load(self.address_bar)
        elif isinstance(self.focus, InputLayout):
            self.submit_form(self.focus.node)

    def handle_click(self, e):
        self.focus = None
        if e.y < 60:  # Browser chrome
            if 10 <= e.x < 35 and 10 <= e.y < 50:
                self.go_back()
            elif 50 <= e.x < 790 and 10 <= e.y < 50:
                self.focus = "address bar"
                self.address_bar = ""
                self.render()
        else:
            x, y = e.x, e.y + self.scroll - 60
            obj = find_layout(x, y, self.document)
            if not obj: return
            elt = obj.node
            if elt and self.dispatch_event("click", elt): return
            while elt:
                if isinstance(elt, TextNode):
                    pass
                elif is_link(elt):
                    url = relative_url(elt.attributes["href"], self.url)
                    self.load(url)
                elif elt.tag == "input":
                    elt.attributes["value"] = ""
                    self.focus = obj
                    print("Layout called from handle_click in input elt")
                    return self.reflow(self.focus)
                elif elt.tag == "button":
                    self.submit_form(elt)
                elt = elt.parent

    def submit_form(self, elt):
        while elt and elt.tag != "form":
            elt = elt.parent
        if not elt: return
        if self.dispatch_event("submit", elt): return
        inputs = find_inputs(elt, [])
        body = ""
        for input in inputs:
            name = input.attributes["name"]
            value = input.attributes.get("value", "")
            body += "&" + name + "=" + value.replace(" ", "%20")
        body = body[1:]

        url = relative_url(elt.attributes["action"], self.url)
        self.load(url, body=body)

    def layout(self, tree=None):
        self.timer.start("Layout Initialization")
        if not tree:
            tree = self.cached_tree
        else:
            self.cached_tree = tree
        self.document = DocumentLayout(tree)
        self.reflow(self.document)

    def reflow(self, obj):
        self.timer.start("Style")
        style(obj.node, obj.parent, self.rules)
        self.timer.start("Layout (phase 1A)")
        obj.size()
        self.timer.start("Layout (phase 1B)")
        while obj.parent:
            obj.parent.compute_height()
            obj = obj.parent
        self.timer.start("Layout (phase 2)")
        self.document.position()
        self.timer.start("Display List")
        self.display_list = []
        self.document.draw(self.display_list)
        self.max_y = self.document.h
        self.render()

    def render(self):
        self.canvas.delete("all")
        self.timer.start("Rendering")
        for cmd in self.display_list:
            if cmd.y1 > self.scroll + self.height - 60:
                continue
            if cmd.y2 < self.scroll:
                continue
            cmd.draw(self.scroll - 60, self.canvas)

        self.timer.start("Chrome")
        self.canvas.create_rectangle(0, 0, 800, 60, width=0, fill='light gray')

        self.canvas.create_rectangle(50, 10, 790, 50)
        font = tkinter.font.Font(family="Courier", size=30)
        self.canvas.create_text(55,
                                15,
                                anchor='nw',
                                text=self.address_bar,
                                font=font)

        self.canvas.create_rectangle(10, 10, 35, 50)
        self.canvas.create_polygon(15, 30, 30, 15, 30, 45, fill='black')
        self.timer.stop()

        if self.focus == "address bar":
            w = font.measure(self.address_bar)
            self.canvas.create_line(55 + w, 15, 55 + w, 45)
        elif isinstance(self.focus, InputLayout):
            text = self.focus.node.attributes.get("value", "")
            x = self.focus.x + self.focus.font.measure(text)
            y = self.focus.y - self.scroll + 60
            self.canvas.create_line(x, y, x, y + self.focus.h)

    def cookie_string(self):
        origin = url_origin(self.history[-1])
        cookies = self.cookies.get(origin, {})
        cookie_string = ""
        for key, value in cookies.items():
            cookie_string += "&" + key + "=" + value
        return cookie_string[1:]

    def load(self, url, body=None):
        self.address_bar = url
        self.url = url
        self.history.append(url)
        self.timer.start("Downloading")
        req_headers = {"Cookie": self.cookie_string()}
        headers, body = request(url, headers=req_headers, payload=body)
        if "set-cookie" in headers:
            kv, *params = headers["set-cookie"].split(";")
            key, value = kv.split("=", 1)
            self.cookies[key] = value
            print(f"Received Cookie key={key}, value={value}")
            origin = url_origin(self.history[-1])
            self.cookies.setdefault(origin, {})[key] = value
        self.timer.start("Parsing HTML")
        self.nodes = parse(lex(body))

        self.timer.start("Parsing CSS")
        with open("browser/src/browser.css") as f:
            browser_style = f.read()
            rules = CSSParser(browser_style).parse()
        for link in find_links(self.nodes, []):
            headers, body = request(relative_url(link, url),
                                    headers=req_headers)
            rules.extend(CSSParser(body).parse())

        # tree_to_string(self.nodes)
        rules.sort(key=lambda selector_body: selector_body[0].priority(),
                   reverse=True)
        self.rules = rules

        self.timer.start("Running JS")
        self.setup_js()
        for script in find_scripts(self.nodes, []):
            header, body = request(relative_url(script, self.history[-1]),
                                   headers=req_headers)
            try:
                # print("Script returned: ", self.js_environment.evaljs(body))
                self.js_environment.evaljs(body)
            except dukpy.JSRuntimeError as e:
                print("Script", script, "crashed", e)

        print("Layout called from load")
        self.layout(self.nodes)

    def go_back(self):
        if len(self.history) > 2:
            self.history.pop()
            back = self.history.pop()
            self.load(back)

    def setup_js(self):
        self.node_to_handle = {}
        self.handle_to_node = {}
        self.js_environment = dukpy.JSInterpreter()
        self.js_environment.export_function("log", print)
        self.js_environment.export_function("querySelectorAll",
                                            self.js_querySelectorAll)
        self.js_environment.export_function("getAttribute",
                                            self.js_getAttribute)
        self.js_environment.export_function("innerHTML", self.js_innerHTML)
        self.js_environment.export_function("cookie", self.js_cookie)
        with open("browser/src/runtime.js") as f:
            self.js_environment.evaljs(f.read())

    def js_cookie(self):
        origin = url_origin(self.history[-1])
        cookies = self.cookies.get(origin, {})

        cookie_string = ""
        for key, value in cookies.items():
            cookie_string += "&" + key + "=" + value
        return cookie_string[1:]

    def js_querySelectorAll(self, sel):
        selector, _ = CSSParser(sel + "{").selector(0)
        elts = find_selected(self.nodes, selector, [])
        return [self.make_handle(elt) for elt in elts]

    def make_handle(self, elt):
        if id(elt) not in self.node_to_handle:
            handle = len(self.node_to_handle)
            self.node_to_handle[id(elt)] = handle
            self.handle_to_node[handle] = elt
        else:
            handle = self.node_to_handle[id(elt)]
        return handle

    def js_getAttribute(self, handle, attr):
        elt = self.handle_to_node[handle]
        return elt.attributes.get(attr, None)

    def js_innerHTML(self, handle, s):
        doc = parse(lex("<html><body>" + s + "</body></html>"))
        new_nodes = doc.children[0].children
        elt = self.handle_to_node[handle]
        elt.children = new_nodes
        for child in elt.children:
            child.parent = elt
        print("Layout called from js_innerHTML")
        self.reflow(layout_for_node(self.document, elt))

    def dispatch_event(self, type, elt):
        handle = self.make_handle(elt)
        code = "__runHandlers({}, \"{}\")".format(handle, type)
        do_default = self.js_environment.evaljs(code)
        return not do_default
Ejemplo n.º 5
0
class Experiment:
    def __init__(self):
        self.rnn = None
        self.l_data = None
        self.l_data_config = None
        self.data_dict = None
        self.rnn_config = None
        self.info_config = None
        self.train_config = None
        self.timer = None

    def create_rnn(self, l_data, l_data_config):
        set_momentum(self.train_config['batchnorm']['momentum'])
        self.rnn = RNN(l_data)
        self.l_data = l_data
        self.l_data_config = l_data_config

    # Creates a RNN using a modified l_data_config
    # Used e.g. by incremental sequence training, where the l_data_config is changed while training
    def create_modificated_model(self, l_data_config, session_idx):
        l_data_config['tr']['in_seq_len'] = self.train_config['mode'][
            'in_seq_len'][session_idx]
        l_data_config['tr']['max_truncation'] = self.train_config['mode'][
            'max_truncation'][session_idx]
        self.data_dict = load_dataset(l_data_config)
        labeled_data = LabeledData(l_data_config, self.data_dict)
        self.create_rnn(labeled_data, l_data_config)

    def train(self, rnn_config, l_data_config, train_config, info_config, run):
        self.rnn_config = rnn_config
        self.info_config = info_config
        self.train_config = train_config
        set_rnn_config(rnn_config)
        set_info_config(info_config)

        self.timer = Timer(info_config['timer']['enabled'])
        print_config(rnn_config, train_config, l_data_config)
        temp_model_path = '../models/temp' + info_config[
            'filename'] + '_' + str(train_config['task_id'])
        pretrained_model_path = '../tr_models/' + str(
            train_config['pretraining']['path'])

        if train_config['mode']['name'] == 'inc_lengths':
            n_sessions = len(train_config['mode']['in_seq_len'])
        elif train_config['mode']['name'] == 'classic':
            n_sessions = 1
        else:
            raise Exception('training mode not understood')

        self.timer.start()
        set_train_config(train_config)
        # Sessions refer to training with different architectures. If one RNN is used throughout the training process
        # then only one session is created. Training with incremental sequence lengths for example requires multiple
        # RNNs, one for each sequence lenghts. Evaluation datasets (validation and test) are always evaluated on a fixed
        # RNN, only the RNN structure used for the training set varies. current_epoch stores the total amounts of epochs
        # and epoch the epoch within a session
        current_epoch = 0
        tau = self.train_config['tau']
        learning_rate = self.train_config['learning_rate']
        best_weight_probs_dict = None
        for session_idx in range(n_sessions):
            tf.reset_default_graph()
            if self.train_config['mode']['name'] == 'inc_lengths':
                max_epochs = self.train_config['mode']['max_epochs'][
                    session_idx]
                min_error = self.train_config['mode']['min_errors'][
                    session_idx]
                self.create_modificated_model(l_data_config, session_idx)
            elif self.train_config['mode']['name'] == 'classic':
                self.data_dict = load_dataset(l_data_config)
                l_data = LabeledData(l_data_config, self.data_dict)
                self.create_rnn(l_data, l_data_config)
                max_epochs = self.train_config['mode']['max_epochs']
                min_error = self.train_config['mode']['min_error']
            self.timer.restart('Graph creation')

            # Saver is used for restoring weights for new session if more than one is used for training
            model_saver = tf.train.Saver(var_list=tf.trainable_variables())
            with tf.Session() as sess:
                if info_config['profiling']['enabled']:
                    options = tf.RunOptions(
                        trace_level=tf.RunOptions.FULL_TRACE)
                else:
                    options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
                run_metadata = tf.RunMetadata()
                writer = tf.summary.FileWriter(
                    info_config['tensorboard']['path'] +
                    str(self.train_config['task_id']))
                sess.run(tf.global_variables_initializer())

                if session_idx != 0:
                    #self.optimistic_restore(sess, pretrained_model_path)
                    model_saver.restore(sess, temp_model_path)
                elif self.train_config['pretraining']['enabled'] == True:
                    self.optimistic_restore(sess, pretrained_model_path)
                    sess.run(self.rnn.init_op)
                #sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type="readline")
                #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
                self.timer.restart('Intialization')

                # Loading datasets into GPU (via tf.Variables)
                for key in self.data_dict.keys():
                    sess.run(self.l_data.data[key]['load'],
                             feed_dict={
                                 self.l_data.data[key]['x_ph']:
                                 self.data_dict[key]['x'],
                                 self.l_data.data[key]['y_ph']:
                                 self.data_dict[key]['y']
                             })

                self.timer.restart('Loading data')

                traces = list()

                for epoch in range(max_epochs):
                    if self.info_config['gradient']['evaluate']:
                        self.save_gradient_variance(sess, epoch, tau)
                        quit()
                    # Evaluate performance on the different datasets and print some results on console
                    # Also check potential stopping critera
                    if current_epoch % info_config[
                            'calc_performance_every'] == 0:
                        self.rnn.t_metrics.retrieve_results(
                            sess, current_epoch, tau)
                        self.rnn.t_metrics.print(session_idx)
                        #if self.rnn.t_metrics.result_dict['tr_b']['vfe'][-1] < min_error:
                        #break

                    if current_epoch + 1 % info_config['save_weights'][
                            'save_every'] == 0:
                        self.save_weight_probs(
                            info_config['save_weights']['path'], current_epoch,
                            run, sess.run(self.rnn.get_weights_op))

                    if info_config['save_weights']['save_best']:
                        if self.rnn.t_metrics.best_va['is_current']:
                            best_weight_probs_dict = sess.run(
                                self.rnn.get_weights_op)

                    self.timer.restart('Metrics')

                    # Optionally store tensorboard summaries
                    if info_config['tensorboard']['enabled'] \
                            and current_epoch % info_config['tensorboard']['period'] == 0:
                        if info_config['tensorboard']['weights']:
                            weight_summary = sess.run(
                                self.rnn.weight_summaries,
                                feed_dict={
                                    self.rnn.tau: (tau, ),
                                    self.l_data.batch_idx: 0,
                                    self.rnn.is_training: False
                                })
                            writer.add_summary(weight_summary, current_epoch)
                        if info_config['tensorboard']['gradients']:
                            gradient_summary = sess.run(
                                self.rnn.gradient_summaries,
                                feed_dict={
                                    self.rnn.tau: (tau, ),
                                    self.l_data.batch_idx: 0,
                                    self.rnn.is_training: False
                                })
                            writer.add_summary(gradient_summary, current_epoch)
                        if info_config['tensorboard']['results']:
                            t_result_summaries = sess.run(
                                self.rnn.t_metric_summaries,
                                feed_dict={
                                    self.rnn.tau: (tau, ),
                                    self.l_data.batch_idx: 0,
                                    self.rnn.is_training: False
                                })
                            writer.add_summary(t_result_summaries,
                                               current_epoch)
                        if info_config['tensorboard']['acts']:
                            act_summaries = sess.run(self.rnn.act_summaries,
                                                     feed_dict={
                                                         self.rnn.tau: (tau, ),
                                                         self.l_data.batch_idx:
                                                         0,
                                                         self.rnn.is_training:
                                                         False
                                                     })
                            writer.add_summary(act_summaries, current_epoch)

                    self.timer.restart('Tensorboard')
                    # Train for one full epoch. First shuffle to create new minibatches from the given data and
                    # then do a training step for each minibatch.
                    # Also anneal learning rate and tau if necessary
                    if (current_epoch +
                            1) % self.train_config['learning_rate_tau'] == 0:
                        learning_rate /= 2

                    sess.run(self.l_data.data['tr']['shuffle'])
                    if 'c_ar' in self.train_config[
                            'algorithm'] or 'c_arm' in self.train_config[
                                'algorithm']:
                        sess.run(
                            self.rnn.assign_learning_rate,
                            feed_dict={self.rnn.learning_rate: learning_rate})
                    for minibatch_idx in range(
                            self.l_data.data['tr']['n_minibatches']):
                        if 'c_ar' in self.train_config['algorithm'] or 'c_arm' in self.train_config['algorithm']\
                                or 'log_der' in self.train_config['algorithm']:
                            grads = []
                            for i in range(
                                    self.train_config['carm_iterations']):
                                sess.run(self.rnn.c_arm_sample_op)
                                gradients = sess.run(self.rnn.gradients,
                                                     feed_dict={
                                                         self.l_data.batch_idx:
                                                         minibatch_idx,
                                                         self.rnn.is_training:
                                                         True
                                                     })
                                if len(grads) == 0:
                                    for j in range(len(gradients)):
                                        grads.append(gradients[j][0])
                                else:
                                    for j in range(len(grads)):
                                        if grads[j] is not None:
                                            grads[j] += gradients[j][0]
                            for j in range(len(grads)):
                                grads[j] /= self.train_config[
                                    'carm_iterations']
                            sess.run(self.rnn.train_b_op,
                                     feed_dict={
                                         gradient_ph: grad
                                         for gradient_ph, grad in zip(
                                             self.rnn.gradient_ph, grads)
                                     })

                        else:
                            sess.run(self.rnn.train_b_op,
                                     feed_dict={
                                         self.rnn.learning_rate: learning_rate,
                                         self.rnn.tau: (tau, ),
                                         self.l_data.batch_idx: minibatch_idx,
                                         self.rnn.is_training: True
                                     },
                                     options=options,
                                     run_metadata=run_metadata)

                    if info_config['profiling']['enabled']:
                        traces.append(
                            timeline.Timeline(run_metadata.step_stats).
                            generate_chrome_trace_format())
                    current_epoch += 1
                    self.timer.restart('Training')

                # Optionally store profiling results of this epoch in files
                if info_config['profiling']['enabled']:
                    for trace_idx, trace in enumerate(traces):
                        path = info_config['profiling']['path'] + '_' + str(
                            current_epoch) + '_' + str(trace_idx)
                        with open(path + 'training.json', 'w') as f:
                            f.write(trace)

                # TODO: Clean the cell access code
                if info_config['cell_access']:
                    ca_1, ca_2 = sess.run([
                        self.rnn.layers[0].cell_access_mat,
                        self.rnn.layers[1].cell_access_mat
                    ],
                                          feed_dict={self.l_data.batch_idx: 0})
                    np.save(file='../nr/ca_1_' +
                            str(self.train_config['task_id']),
                            arr=ca_1)
                    np.save(file='../nr/ca_2_' +
                            str(self.train_config['task_id']),
                            arr=ca_2)
                model_saver.save(sess, temp_model_path)

        if info_config['save_weights']['save_best']:
            self.save_weight_probs(self.info_config['save_weights']['path'],
                                   'best', run, best_weight_probs_dict)
        writer.close()
        return self.rnn.t_metrics.result_dict

    # Empirically estimates variance of gradient, saves results and quits
    def save_gradient_variance(self, sess, epoch, tau):
        n_gradients = self.info_config['gradient']['samples']
        n_grads_per_sample = self.info_config['gradient']['grad_per_sample']
        tf_grads = []
        tf_vars = []
        e = []
        se = []
        for tuple in self.rnn.gradients:
            if tuple is not None:
                tf_grads.append(tuple[0])
                tf_vars.append(tuple[1])
                e.append(np.zeros(tuple[0].shape))
                se.append(np.zeros(tuple[0].shape))

        for gradient_idx in range(n_gradients):
            gradients = []
            for grad in tf_grads:
                gradients.append(np.zeros(grad.shape))

            for sample_idx in range(n_grads_per_sample):
                sess.run(self.rnn.c_arm_sample_op)
                gradient = sess.run(tf_grads,
                                    feed_dict={
                                        self.l_data.batch_idx: 0,
                                        self.rnn.tau: (tau, )
                                    })
                for idx, val in enumerate(gradient):
                    gradients[idx] += val

            for idx in range(len(gradients)):
                gradients[idx] /= n_grads_per_sample
            # gradients is the averaged gradient using n_grads_per_sample
            # its a list where each element is a variable

            for idx in range(len(gradients)):
                e[idx] += gradients[idx]
                se[idx] += np.square(gradients[idx])
            print(gradient_idx)

        for idx in range(len(e)):
            e[idx] /= n_gradients
            se[idx] /= n_gradients
            var = tf_vars[idx]
            suffix = '_' + var.name[:var.name.index('/')] + '_' + var.name[
                var.name.index('/') + 1:-2] + '_' + str(
                    self.train_config['task_id']) + '.npy'
            np.save(file='../nr/ge' + suffix, arr=e[idx])
            np.save(file='../nr/gsqe' + suffix, arr=se[idx])

    def optimistic_restore(self, sess, file):
        reader = tf.train.NewCheckpointReader(file)
        saved_shapes = reader.get_variable_to_shape_map()
        var_names = sorted([(var.name, var.name.split(':')[0])
                            for var in tf.global_variables()
                            if var.name.split(':')[0] in saved_shapes
                            and 'batch_normalization' not in var.name])
        restore_vars = []
        with tf.variable_scope('', reuse=True):
            for var_name, saved_var_name in var_names:
                curr_var = tf.get_variable(saved_var_name)
                var_shape = curr_var.get_shape().as_list()
                if var_shape == saved_shapes[saved_var_name]:
                    restore_vars.append(curr_var)
        opt_saver = tf.train.Saver(restore_vars)
        opt_saver.restore(sess, file)

    def save_weight_probs(self, path, epoch, run, weight_probs_dict):
        for layer_key in weight_probs_dict.keys():
            for var_key in weight_probs_dict[layer_key].keys():
                layer_weights = weight_probs_dict[layer_key]
                if len(layer_weights[var_key].keys()) == 2:
                    # Continuous weight with mean and variance
                    np.save(
                        path + '_r' + str(run) + '_e' + str(epoch) + '_' +
                        layer_key + '_' + var_key + '_m.npy',
                        layer_weights[var_key]['m'])
                    np.save(
                        path + '_r' + str(run) + '_e' + str(epoch) + '_' +
                        layer_key + '_' + var_key + '_v.npy',
                        layer_weights[var_key]['v'])
                else:
                    np.save(
                        path + '_r' + str(run) + '_e' + str(epoch) + '_' +
                        layer_key + '_' + var_key + '_p.npy',
                        layer_weights[var_key]['probs'])