コード例 #1
0
 def __init__(self):
     self.functions_table = FunctionsTable()
     self.current_function = Function("void",
                                      "global", [], {},
                                      function_memory=Memory(
                                          MemoryConstants.GLOBAL_INITIAL))
     self.semantic_cube = SemanticCube()
     self.quadruples = QuadruplesTable()
     self.operators_stack = []
     self.operands_stack = []
     self.jumps_stack = []
     self.types_stack = []
     self.temporal_memory = Memory(MemoryConstants.TEMPORAL_INITIAL)
     self.constant_memory = Memory(MemoryConstants.CONSTANT_INITIAL)
     self.constant_exec_memory = ExecMemory(
         MemoryConstants.CONSTANT_INITIAL)
コード例 #2
0
ファイル: news.py プロジェクト: shivani1998cs/ChatBot
 def __init__(self,
              source="google-news",
              apiKey="7488ba8ff8dc43459d36f06e7141c9e5"):
     self.apiKey = apiKey
     self.source = source
     self.url = "https://newsapi.org/v1/articles?source=google-news&sortBy=top&apiKey=7488ba8ff8dc43459d36f06e7141c9e5"
     self.m = Memory()
コード例 #3
0
    def __init__(self, name: str, access_modifier: str, parent: Optional[SymbolTable] = None):
        """The MethodScope object is responsible for keeping track of the information of a method.

        Arguments:
            - name [str]: The name of the method.
            - access_modifier [str]: Whether the method is public of private.
            - parent [SymbolTable]: The method's parent class attribute_directory, only Optional for the Global Scope.
            - parent_memory [Memory]: The memory of the parent class, only Optional for the Global Scope.
        """
        self._name = name
        self._access_modifier = access_modifier
        self._parent = parent
        self._local_memory = Memory(Scopes.LOCAL, ScopeRanges.LOCAL)
        self._instruction_pointer = None

        if parent is not None:
            logger.debug(
                f"Created MethodScope {access_modifier} {name}, parent {parent.name}")

        # These are added later, as there could be multiple arguments,
        # so we parse them one by one, and finally add the return type
        # which goes after the arguments
        self._arguments = SymbolTable(f'{name} Arguments', parent)
        self._ordered_arguments = []
        self._return_type = None
        self._return_memory_address = -1

        self._variables_directory = SymbolTable(name, self._arguments)
コード例 #4
0
	def __init__(self, quad_file):
		self.state = GlobalState(self)
		self.memory = Memory()
		self.jumps = []
		self.returns = []
		self.grabs = []
		self.context = None
		self.quads = self.store_constants(quad_file.read().splitlines())
コード例 #5
0
 def __init__(self):
     self.table = SymbolTable()
     self.oracle = SemanticCube()
     self.quads = QuadGenerator()
     self.memory = Memory()
     self.flow = FlowManager(self.quads, self.memory)
     # Creating the quad for the initial functions jump
     self.create_initial_jump()
コード例 #6
0
 def __init__(self, stop_criteria, memory_size, progress_writer=None):
     self.stop_criteria = stop_criteria
     self.color_permutator = FastColorPermutator()
     self.memory = Memory(memory_size)
     self.progress_writer = progress_writer
     self.root_node = None
     self.best_score = None
     self.best_score_graph = None
     self.color_set = []
コード例 #7
0
def main():
    path = os.getcwd()[:-10]

    env = CarRacingWrapper()
    agent = Agent(env, state_dim=288)
    memory = Memory(num_timesteps=1,
                    batch_size=1,
                    load_model=True,
                    results_dir=path + 'memory/160model_v2')
    vision = VAE(load_model=True, results_dir=path + 'vision')
    episodes = 1
    agent.train(vision, memory, episodes=episodes)
    agent.train(vision, memory, render=True)
コード例 #8
0
def main(robot_name: str, env_monitor: bool = True):
    """
    Main function to learn robot walking using DQL algorithm
    """
    print(robot_name)
    env = gym.make(robot_name)
    env.render(mode=config.MODEL)
    dqn = DeepQNetwork(env=env)
    memory = Memory(param.MEMORY_SIZE)
    if env_monitor:
        env = gym.wrappers.Monitor(
            env=env,
            directory=config.MONITOR_PATH,
            force=True,
            video_callable=lambda episode_id: episode_id % 1 == 0)
    print(f'Start training agent: {robot_name}')
    training(env, dqn, memory)
コード例 #9
0
    def __init__(self):
        super().__init__()
        self.memory = Memory()

        self.setupUi(self)
        self.scan_widget.setEnabled(False)

        self.found_table.setModel(FoundAddressModel(self))
        self.found_table.horizontalHeader().setSectionResizeMode(
            0, QHeaderView.Stretch)

        self.new_scan.clicked.connect(self.new_scan_clicked)
        self.next_scan.clicked.connect(self.next_scan_clicked)
        self.actionAttach.triggered.connect(self.attach_triggered)
        self.found_table.doubleClicked.connect(self.found_table_double_clicked)
        self.saved_results.doubleClicked.connect(
            self.saved_model_double_clicked)
コード例 #10
0
ファイル: MbPA.py プロジェクト: esgl/MbPA
    def __init__(self, sess, args):
        with tf.variable_scope(args.model_name):
            self.args = args
            self.learning_rate = args.learning_rate
            self.session = sess

            self.x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
            self.y = tf.placeholder(tf.float32, shape=[None, 10], name="y")
            # self.trainable = tf.placeholder(tf.int32, shape=(), name="trainable")
            self.memory_sample_batch = tf.placeholder(
                tf.int16, shape=(), name="memory_sample_batch")

            self.embed = self.embedding(self.x)

            self.M = Memory(args.memory_size,
                            self.embed.get_shape()[-1],
                            self.y.get_shape()[-1])
            embs_and_values = tf.py_func(self.get_memory_sample,
                                         [self.memory_sample_batch],
                                         [tf.float64, tf.float64])

            self.memory_batch_x = tf.to_float(embs_and_values[0])
            self.memory_batch_y = tf.to_float(embs_and_values[1])
            self.xa = tf.concat(values=[self.embed, self.memory_batch_x],
                                axis=0)
            self.ya = tf.concat(values=[self.y, self.memory_batch_y], axis=0)

            self.y_ = self.output_network(self.xa)

            self.cross_entropy = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(labels=self.ya,
                                                        logits=self.y_))
            self.optim = tf.train.GradientDescentOptimizer(
                self.learning_rate).minimize(self.cross_entropy)
            self.correct_prediction = tf.equal(tf.argmax(self.ya, 1),
                                               tf.argmax(self.y_, 1))
            self.accuracy = tf.reduce_mean(
                tf.cast(self.correct_prediction, tf.float32))

            self.session.run(tf.global_variables_initializer())
コード例 #11
0
    def __init__(self,
                 name: str,
                 inherits: Optional["ClassScope"] = None,
                 global_scope: Optional["SymbolTable"] = None):
        """ClassScope is responsible for keeping track of a classes methods, attributes, and local memory.

        Arguments:
            - name [str]: The name of the class.
            - inherits [ClassScope]: The class scope of the parent class if there is one.
        """
        self._name = name
        if inherits is None:
            self._method_directory = SymbolTable(name)
            self._attribute_directory = SymbolTable(name, global_scope)
            self._instance_memory = Memory(Scopes.INSTANCE,
                                           ScopeRanges.INSTANCE)
        else:
            self._method_directory = SymbolTable(name,
                                                 inherits.method_directory)
            self._attribute_directory = SymbolTable(
                name, inherits.attribute_directory)
            self._instance_memory = inherits.instance_memory
コード例 #12
0
from semantic_cube.semantic_cube import Cube
from semantic_cube.semantic_cube_helper import (
    code_to_type,
    token_to_code,
    type_to_code,
    type_to_init_value,
    scope_to_code,
    code_to_scope,
)
from error.error_helper import ErrorHelper

parser_helper = ParserHelper()
quad_helper = QuadrupleHelper()
error_helper = ErrorHelper()
semantic_cube = Cube()
memory = Memory()


def p_program(p):
    """program : PROGRAM ID program1"""
    # print("DIR TABLE: ", parser_helper.procedure_directory)


def p_program1(p):
    """program1 : script program2
        | program2"""


def p_program2(p):
    """program2 : htmltag program2
        | empty"""
コード例 #13
0
ファイル: ddpg.py プロジェクト: dkd58/DeepRL
    def __init__(self, env, monitor_path: str, **usercfg) -> None:
        super(DDPG, self).__init__(**usercfg)
        self.env = env
        self.monitor_path: str = monitor_path

        self.config.update(
            n_episodes=100000,
            n_timesteps=env.spec.tags.get(
                "wrapper_config.TimeLimit.max_episode_steps"),
            actor_learning_rate=1e-4,
            critic_learning_rate=1e-3,
            ou_theta=0.15,
            ou_sigma=0.2,
            gamma=0.99,
            batch_size=64,
            tau=0.001,
            l2_loss_coef=1e-2,
            n_actor_layers=2,
            n_hidden_units=64,
            actor_layer_norm=True,
            critic_layer_norm=
            False,  # Batch norm for critic does not seem to work
            replay_buffer_size=1e6,
            replay_start_size=
            10000  # Required number of replay buffer entries to start training
        )
        self.config.update(usercfg)

        self.state_shape: list = list(env.observation_space.shape)
        self.n_actions: int = env.action_space.shape[0]
        self.states = tf.placeholder(tf.float32, [None] + self.state_shape,
                                     name="states")
        self.actions_taken = tf.placeholder(tf.float32, [None, self.n_actions],
                                            name="actions_taken")
        self.critic_target = tf.placeholder(tf.float32, [None, 1],
                                            name="critic_target")
        self.is_training = tf.placeholder(tf.bool, name="is_training")

        with tf.variable_scope("actor"):
            self.action_output, self.actor_vars = self.build_actor_network()

        self.target_action_output, actor_target_update = self.build_target_actor_network(
            self.actor_vars)

        self.q_gradient_input = tf.placeholder("float", [None, self.n_actions],
                                               name="q_grad_input")
        self.actor_policy_gradients = tf.gradients(self.action_output,
                                                   self.actor_vars,
                                                   -self.q_gradient_input,
                                                   name="actor_gradients")
        self.actor_train_op = tf.train.AdamOptimizer(
            self.config["actor_learning_rate"],
            name="actor_optimizer").apply_gradients(
                list(zip(self.actor_policy_gradients, self.actor_vars)))

        with tf.variable_scope("critic"):
            self.q_value_output, self.critic_vars = self.build_critic_network()

        self.target_q_value_output, critic_target_update = self.build_target_critic_network(
            self.critic_vars)

        l2_loss = tf.add_n([
            self.config["l2_loss_coef"] * tf.nn.l2_loss(var)
            for var in self.critic_vars
        ])
        self.critic_loss = tf.reduce_mean(
            tf.square(self.critic_target - self.q_value_output)) + l2_loss
        self.critic_train_op = tf.train.AdamOptimizer(
            self.config["critic_learning_rate"],
            name="critic_optimizer").minimize(self.critic_loss)
        self.action_gradients = tf.gradients(self.q_value_output,
                                             self.actions_taken,
                                             name="action_gradients")

        summaries = []
        for v in self.actor_vars + self.critic_vars:
            summaries.append(tf.summary.histogram(v.name, v))
        self.model_summary_op = tf.summary.merge(summaries)

        self.update_targets_op = tf.group(actor_target_update,
                                          critic_target_update,
                                          name="update_targets")

        self.init_op = tf.global_variables_initializer()

        self.action_noise = OrnsteinUhlenbeckActionNoise(
            self.n_actions, self.config["ou_sigma"], self.config["ou_theta"])

        self.replay_buffer = Memory(int(self.config["replay_buffer_size"]))

        self.n_updates = 0

        self.summary_writer = tf.summary.FileWriter(
            os.path.join(self.monitor_path, "summaries"),
            tf.get_default_graph())
コード例 #14
0
 def setUp(self) -> None:
     self.memory = Memory()
     # just attach to own process, we're overriding the read values anyway
     self.memory.process = psutil.Process()
コード例 #15
0
ファイル: run_z80.py プロジェクト: iohe/qaopm
def run():
    parser = argparse.ArgumentParser(description='Run a Z80 assembly program')
    parser.add_argument('sourcefile',
                        metavar='sourcefile',
                        type=str,
                        help='Z80 binary file')
    parser.add_argument('--dumprange',
                        type=str,
                        help='Range of memory to dump',
                        required=False)
    parser.add_argument('--verbose',
                        '-v',
                        help='Enable verbose output',
                        required=False,
                        action='store_true')
    args = parser.parse_args()

    memory = Memory()
    io = IO()
    processor = Processor(memory, io)

    load_memory(memory, args.sourcefile, 0x0000)

    t_states = 0
    while True:
        executed = processor.execute()
        if args.verbose:
            print(executed)
        else:
            print('.'),
        t_states += executed.t_states()

        if str(executed) == 'nop':
            break

    print('\n')
    print('Completed program execution in {} t-states'.format(t_states))
    print('Main register states:')
    for reg, value in processor.main_registers.items():
        print('{0:}: {1:#04x}\t\t').format(reg, value),

    print('\n')
    print('Alternate register states:')
    for reg, value in processor.alternate_registers.items():
        print('{0:}: {1:#04x}\t\t'.format(reg, value)),

    print('\n')
    print('Special register states:')
    for reg, value in processor.special_registers.items():
        print('{0:}: {1:#06x}\t\t'.format(reg, value)),

    if args.dumprange is not None:
        start = int(args.dumprange.split(':')[0], 16) & 0xffff
        end = int(args.dumprange.split(':')[1], 16) & 0xffff

        print('\n')
        print('Listing of memory values from {0:#06x} to {1:#06x}'.format(
            start, end))
        addr = start
        while True:
            if addr > end:
                break

            values = [0x00] * 8
            for i in range(0, 8):
                values[i] = memory[0xffff & (addr + i)]

            hex_values = ['{:#04x}'.format(val) for val in values]
            chr_values = ['{}'.format(chr(val)) for val in values]

            print '{0:#06x}: {1:}\t\t{2:}'.format(addr, ' '.join(hex_values),
                                                  ' '.join(chr_values))
            addr += 8
コード例 #16
0
ファイル: display_adapter_tests.py プロジェクト: iohe/qaopm
 def __init__(self):
     self.memory = Memory()
     self.display_adapter = DisplayAdapter(self.memory)
コード例 #17
0
ファイル: MbPA_test.py プロジェクト: esgl/MbPA
    def __init__(self, sess, args):
        self.args = args
        self.session = sess
        self.w = {}
        self.eval_w = {}
        with tf.variable_scope(self.args.model_name):
            self.x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
            self.y = tf.placeholder(tf.float32, shape=[None, 10], name="y")
            with tf.variable_scope("training"):
                with tf.variable_scope("embedding"):
                    self.out = tf.reshape(self.x, [-1, 28, 28, 1])
                    with tf.variable_scope("conv"):
                        #     self.out, self.w["l1_w"], self.w["l1_b"] = conv2d(
                        #         x=self.out,
                        #         output_dim=16,
                        #         kernel_size=[8, 8],
                        #         stride=[4, 4],
                        #         activation_fn=tf.nn.relu,
                        #         name="conv1"
                        #     )
                        #     self.out, self.w["l2_w"], self.w["l2_b"] = conv2d(
                        #         x=self.out,
                        #         output_dim=32,
                        #         kernel_size=[4, 4],
                        #         stride=[2, 2],
                        #         activation_fn=tf.nn.relu,
                        #         name="conv2"
                        #     )
                        self.embed = layers.flatten(self.out)
                        self.embed_dim = self.embed.get_shape()[-1]

                with tf.variable_scope("fc"):
                    self.out = self.embed
                    self.out, self.w["l3_w"], self.w["l3_b"] = linear(
                        input_=self.out,
                        output_size=1024,
                        activation_fn=tf.nn.relu,
                        name="fc_1")
                    self.out, self.w["l4_w"], self.w["l4_b"] = linear(
                        input_=self.out, output_size=10, name="fc_2")
                    self.y_ = self.out

                self.cross_entropy = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
                                                            logits=self.y_))

                self.optim = tf.train.GradientDescentOptimizer(
                    self.args.learning_rate).minimize(self.cross_entropy)
                self.correct_prediction = tf.equal(tf.argmax(self.y, 1),
                                                   tf.argmax(self.y_, 1))
                self.accuracy = tf.reduce_mean(
                    tf.cast(self.correct_prediction, tf.float32))

            self.M = Memory(self.args.memory_size,
                            self.embed.get_shape()[-1],
                            self.y.get_shape()[-1])
            # print("self.embed_dim: ", self.embed.get_shape().as_list())
            with tf.variable_scope("prediction"):
                self.x_eval = tf.placeholder(tf.float32,
                                             shape=[None, self.embed_dim],
                                             name="x_test")
                self.y_eval = tf.placeholder(tf.float32,
                                             shape=[None, 10],
                                             name="y_test")
                with tf.variable_scope("test_fc"):
                    self.out = self.x_eval
                    self.out, self.eval_w["l3_w"], self.eval_w[
                        "l3_b"] = linear(input_=self.out,
                                         output_size=1024,
                                         activation_fn=tf.nn.relu,
                                         name="fc_1")
                    self.out, self.eval_w["l4_w"], self.eval_w[
                        "l4_b"] = linear(input_=self.out,
                                         output_size=10,
                                         name="fc_2")
                self.y_eval_ = self.out
                self.cross_entropy_eval = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(
                        labels=self.y_eval, logits=self.y_eval_))
                self.optim_eval = tf.train.GradientDescentOptimizer(
                    self.args.learning_rate / 10).minimize(
                        self.cross_entropy_eval)
                self.correct_prediction_eval = tf.equal(
                    tf.argmax(self.y_eval, 1), tf.argmax(self.y_eval_, 1))
                self.accuracy_eval = tf.reduce_mean(
                    tf.cast(self.correct_prediction_eval, tf.float32))

            with tf.variable_scope("training_to_prediction"):
                self.t_w_input = {}
                self.t_w_assign_op = {}
                for name in self.eval_w.keys():
                    self.t_w_input[name] = tf.placeholder(
                        tf.float32,
                        self.w[name].get_shape().as_list(),
                        name=name)
                    self.t_w_assign_op[name] = self.eval_w[name].assign(
                        self.t_w_input[name])
            self.session.run(tf.global_variables_initializer())
コード例 #18
0
class MemoryTests(unittest.TestCase):
    memory = Memory(3)

    def test_is_in_short_term_memory_method(self):
        self.memory.clear_memory()

        n1 = Node(1, 18)
        n2 = Node(2, 33)
        n3 = Node(3, 52)
        n4 = Node(4, 89)

        self.memory.add_to_memory(n1, 2)
        n1_2_in_memory_first = self.memory.is_in_short_term_memory(n1, 2)
        self.memory.add_to_memory(n2, 3)
        n1_2_in_memory_second = self.memory.is_in_short_term_memory(n1, 2)
        n2_3_in_memory_first = self.memory.is_in_short_term_memory(n2, 3)
        self.memory.add_to_memory(n3, 4)
        n1_2_in_memory_third = self.memory.is_in_short_term_memory(n1, 2)
        n2_3_in_memory_second = self.memory.is_in_short_term_memory(n2, 3)
        n3_4_in_memory_first = self.memory.is_in_short_term_memory(n3, 4)
        self.memory.add_to_memory(n4, 1)
        n1_2_in_memory_fourth = self.memory.is_in_short_term_memory(n1, 2)
        n2_3_in_memory_third = self.memory.is_in_short_term_memory(n2, 3)
        n3_4_in_memory_second = self.memory.is_in_short_term_memory(n3, 4)
        n4_1_in_memory_first = self.memory.is_in_short_term_memory(n4, 1)

        self.assertTrue(n1_2_in_memory_first)
        self.assertTrue(n1_2_in_memory_second)
        self.assertTrue(n2_3_in_memory_first)
        self.assertTrue(n1_2_in_memory_third)
        self.assertTrue(n2_3_in_memory_second)
        self.assertTrue(n3_4_in_memory_first)
        self.assertFalse(n1_2_in_memory_fourth)
        self.assertTrue(n2_3_in_memory_third)
        self.assertTrue(n3_4_in_memory_second)
        self.assertTrue(n4_1_in_memory_first)

    def test_is_in_long_term_memory_method(self):
        self.memory.clear_memory()

        n1 = Node(1)
        n2 = Node(2)
        n3 = Node(3)
        n4 = Node(4)

        self.memory.add_to_memory(n1, 2)
        n1_2_in_memory_first = self.memory.is_in_long_term_memory(n1, 2)
        self.memory.add_to_memory(n2, 3)
        n1_2_in_memory_second = self.memory.is_in_long_term_memory(n1, 2)
        n2_3_in_memory_first = self.memory.is_in_long_term_memory(n2, 3)
        self.memory.add_to_memory(n3, 4)
        n1_2_in_memory_third = self.memory.is_in_long_term_memory(n1, 2)
        n2_3_in_memory_second = self.memory.is_in_long_term_memory(n2, 3)
        n3_4_in_memory_first = self.memory.is_in_long_term_memory(n3, 4)
        self.memory.add_to_memory(n4, 1)
        n1_2_in_memory_fourth = self.memory.is_in_long_term_memory(n1, 2)
        n2_3_in_memory_third = self.memory.is_in_long_term_memory(n2, 3)
        n3_4_in_memory_second = self.memory.is_in_long_term_memory(n3, 4)
        n4_1_in_memory_first = self.memory.is_in_long_term_memory(n4, 1)

        self.assertTrue(n1_2_in_memory_first)
        self.assertTrue(n1_2_in_memory_second)
        self.assertTrue(n2_3_in_memory_first)
        self.assertTrue(n1_2_in_memory_third)
        self.assertTrue(n2_3_in_memory_second)
        self.assertTrue(n3_4_in_memory_first)
        self.assertTrue(n1_2_in_memory_fourth)
        self.assertTrue(n2_3_in_memory_third)
        self.assertTrue(n3_4_in_memory_second)
        self.assertTrue(n4_1_in_memory_first)
コード例 #19
0
ファイル: MbPA_new.py プロジェクト: esgl/MbPA
    def __init__(self, sess, args):
        self.args = args
        self.session = sess
        self.w = {}
        self.eval_w = {}
        with tf.variable_scope(self.args.model_name):
            self.x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
            self.y = tf.placeholder(tf.float32, shape=[None, 10], name="y")
            self.memory_sample_batch = tf.placeholder(
                tf.int16, shape=(), name="memory_sample_batch")
            with tf.variable_scope("training"):
                with tf.variable_scope("embedding"):
                    self.out = tf.reshape(self.x, [-1, 28, 28, 1])
                    with tf.variable_scope("conv"):
                        #         # self.out, self.w["l1_w"], self.w["l1_b"] = conv2d(
                        #         #     x=self.out,
                        #         #     output_dim=16,
                        #         #     kernel_size=[8, 8],
                        #         #     stride=[4, 4],
                        #         #     activation_fn=tf.nn.relu,
                        #         #     name="conv1"
                        #         # )
                        #         # self.out, self.w["l2_w"], self.w["l2_b"] = conv2d(
                        #         #     x=self.out,
                        #         #     output_dim=32,
                        #         #     kernel_size=[4, 4],
                        #         #     stride=[2, 2],
                        #         #     activation_fn=tf.nn.relu,
                        #         #     name="conv2"
                        #         # )
                        self.embed = layers.flatten(self.out)
                #         self.embed_dim = self.embed.get_shape()[-1]
                self.M = Memory(self.args.memory_size,
                                self.x.get_shape()[-1],
                                self.y.get_shape()[-1])
                embs_and_values = tf.py_func(self.get_memory_sample,
                                             [self.memory_sample_batch],
                                             [tf.float64, tf.float64])
                self.memory_batch_x = tf.to_float(embs_and_values[0])
                self.memory_batch_y = tf.to_float(embs_and_values[1])
                self.xa = tf.concat(values=[self.x, self.memory_batch_x],
                                    axis=0)
                self.ya = tf.concat(values=[self.y, self.memory_batch_y],
                                    axis=0)
                with tf.variable_scope("fc"):
                    self.out = self.xa
                    # self.out, self.w["l3_w"], self.w["l3_b"] = linear(
                    #     input_=self.out,
                    #     output_size=1024,
                    #     activation_fn=tf.nn.relu,
                    #     name="fc_1"
                    # )
                    self.out, self.w["l4_w"], self.w["l4_b"] = linear(
                        input_=self.out, output_size=10, name="fc_2")
                    self.ya_ = self.out

                self.cross_entropy = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits(labels=self.ya,
                                                            logits=self.ya_))

                self.optim = tf.train.GradientDescentOptimizer(
                    self.args.learning_rate).minimize(self.cross_entropy)
                self.correct_prediction = tf.equal(tf.argmax(self.ya, 1),
                                                   tf.argmax(self.ya_, 1))
                self.accuracy = tf.reduce_mean(
                    tf.cast(self.correct_prediction, tf.float32))

            self.session.run(tf.global_variables_initializer())