Exemple #1
0
    def _transition_q_learning(self):
        # if self.bucketed_state.as_tuple() not in self.qstore.q:
        #     self.enum.enumerate_state(self.bucketed_state, self.qstore.q)
        if self.state.as_tuple() not in self.qstore.q:
            self.enum.enumerate_state(self.state, self.qstore.q)

        # action_values = self.qstore.q[self.bucketed_state.as_tuple()]
        action_values = self.qstore.q[self.state.as_tuple()]

        if np.random.random() < self.epsilon:
            action = se.State(state_list=action_values['actions'][
                np.random.randint(len(action_values['actions']))])
        else:
            max_q_value = max(action_values['utilities'])
            max_q_indexes = [
                i for i in range(len(action_values['actions']))
                if action_values['utilities'][i] == max_q_value
            ]
            max_actions = [action_values['actions'][i] for i in max_q_indexes]
            action = se.State(
                state_list=max_actions[np.random.randint(len(max_actions))])

        self.state = self.enum.state_action_transition(self.state, action)
        # self.bucketed_state = self.enum.bucket_state(self.state)
        self._post_transition_updates()
    def _transition_q_learning(self):
        ''' Updates self.state according to an epsilon-greedy strategy'''
        if self.bucketed_state.as_tuple() not in self.qstore.q:
            self.enum.enumerate_state(self.bucketed_state, self.qstore.q)

        action_values = self.qstore.q[self.bucketed_state.as_tuple()]
        # epsilon greedy choice
        # TODO: explore new and better exploration stradegy
        if np.random.random() < self.epsilon:
            action = se.State(state_list=action_values['actions'][
                np.random.randint(len(action_values['actions']))])
        else:
            max_q_value = max(action_values['utilities'])
            max_q_indexes = [
                i for i in range(len(action_values['actions']))
                if action_values['utilities'][i] == max_q_value
            ]
            max_actions = [action_values['actions'][i] for i in max_q_indexes]
            action = se.State(
                state_list=max_actions[np.random.randint(len(max_actions))])

        self.state = self.enum.state_action_transition(self.state, action)
        self.bucketed_state = self.enum.bucket_state(self.state)

        self._post_transition_updates()
    def load_q_values(self, q_csv_path):
        self.q = {}
        q_csv = pd.read_csv(q_csv_path)
        values = [
            'start_inference_type', 'start_premise', 'start_terminate',
            'end_inference_type', 'end_premise', 'end_terminate', 'utility'
        ]

        for row in zip(*[q_csv[col].values.tolist() for col in values]):
            start_state = se.State(inference_type=row[0],
                                   premise_curr=[1],
                                   terminate=row[2]).as_tuple()

            end_state = se.State(inference_type=row[3],
                                 premise_curr=[4],
                                 terminate=row[5]).as_tuple()
            utility = row[6]

            if start_state not in self.q:
                self.q[start_state] = {
                    'actions': [end_state],
                    'utilities': [utility]
                }
            else:
                self.q[start_state]['actions'].append(end_state)
                self.q[start_state]['utilities'].append(utility)
    def save_to_csv(self, q_csv_path):
        start_inference_type = []
        start_premise = []
        start_terminate = []
        end_inference_type = []
        end_premise = []
        end_terminate = []
        utility = []

        for start_state_list in self.q.keys():
            start_state = se.State(state_list=start_state_list)
            for to_state_ix in range(len(self.q[start_state_list]['actions'])):
                to_state = se.State(state_list=self.q[start_state_list]
                                    ['actions'][to_state_ix])
                utility.append(
                    self.q[start_state_list]['utilities'][to_state_ix])

                start_inference_type.append(start_state.layer_type)
                start_premise.append(start_state.layer_depth)
                start_terminate.append(start_state.terminate)

                end_inference_type.append(to_state.layer_type)
                end_premise.append(to_state.layer_depth)
                end_terminate.append(to_state.terminate)

        q_csv = pd.DataFrame({
            'start_inference_type': start_inference_type,
            'start_premise': start_premise,
            'start_terminate': start_terminate,
            'end_inference_type': end_inference_type,
            'end_premise': end_premise,
            'end_terminate': end_terminate,
            'utility': utility
        })
        q_csv.to_csv(q_csv_path, index=False)
    def save_to_csv(self, q_csv_path):
        start_layer_type = []
        start_layer_depth = []
        start_filter_depth = []
        start_filter_size = []
        start_stride = []
        start_image_size = []
        start_fc_size = []
        start_terminate = []
        end_layer_type = []
        end_layer_depth = []
        end_filter_depth = []
        end_filter_size = []
        end_stride = []
        end_image_size = []
        end_fc_size = []
        end_terminate = []
        utility = []
        for start_state_list in self.q.keys():
            start_state = se.State(state_list=start_state_list)
            for to_state_ix in range(len(self.q[start_state_list]['actions'])):
                to_state = se.State(state_list=self.q[start_state_list]['actions'][to_state_ix])
                utility.append(self.q[start_state_list]['utilities'][to_state_ix])
                start_layer_type.append(start_state.layer_type)
                start_layer_depth.append(start_state.layer_depth)
                start_filter_depth.append(start_state.filter_depth)
                start_filter_size.append(start_state.filter_size)
                start_stride.append(start_state.stride)
                start_image_size.append(start_state.image_size)
                start_fc_size.append(start_state.fc_size)
                start_terminate.append(start_state.terminate)
                end_layer_type.append(to_state.layer_type)
                end_layer_depth.append(to_state.layer_depth)
                end_filter_depth.append(to_state.filter_depth)
                end_filter_size.append(to_state.filter_size)
                end_stride.append(to_state.stride)
                end_image_size.append(to_state.image_size)
                end_fc_size.append(to_state.fc_size)
                end_terminate.append(to_state.terminate)

        q_csv = pd.DataFrame({'start_layer_type' : start_layer_type,
                              'start_layer_depth' : start_layer_depth,
                              'start_filter_depth' : start_filter_depth,
                              'start_filter_size' : start_filter_size,
                              'start_stride' : start_stride,
                              'start_image_size' : start_image_size,
                              'start_fc_size' : start_fc_size,
                              'start_terminate' : start_terminate,
                              'end_layer_type' : end_layer_type,
                              'end_layer_depth' : end_layer_depth,
                              'end_filter_depth' : end_filter_depth,
                              'end_filter_size' : end_filter_size,
                              'end_stride' : end_stride,
                              'end_image_size' : end_image_size,
                              'end_fc_size' : end_fc_size,
                              'end_terminate' : end_terminate,
                              'utility' : utility})
        q_csv.to_csv(q_csv_path, index=False)
Exemple #6
0
    def __init__(
        self,
        state_space_parameters,
        epsilon,
        data_path='./MNIST',
        state=None,
        qstore=None,

        # replay_dictionary = pd.DataFrame(columns=['net',
        #                                         'accuracy_best_val',
        #                                         'accuracy_last_val',       # uncomment while actual training
        #                                         'epsilon',
        #                                         'train_flag'])):
        replay_dictionary=pd.DataFrame(columns=[
            'net', 'loss_inverse', 'loss', 'epsilon', 'computeLoss_flag'
        ])):

        self.state_list = []
        self.data_path = data_path
        # self.bucketed_state_list = []
        self.state_space_parameters = state_space_parameters

        self.enum = se.StateEnumerator(state_space_parameters)
        self.stringutils = StateStringUtils(state_space_parameters)

        self.state = se.State('start', 0, 1, 0, 0,
                              state_space_parameters.image_size, 0,
                              0) if not state else state
        # self.bucketed_state = self.enum.bucket_state(self.state)

        self.qstore = QValues() if not qstore else qstore
        self.replay_dictionary = replay_dictionary

        self.epsilon = epsilon
    def __init__(
        self,
        premise,
        hypothesis,
        epsilon,
        state_space_parameters,
        state=None,
        qstore=None,
        replaydict=None,
        WeightInitializer=None,
        device=None,
        replay_dictionary=pd.DataFrame(columns=[
            'path', 'epsilon', 'accuracy_best_val', 'accuracy_last_val',
            'accuracy_best_test', 'accuracy_last_test', 'ix_q_value_update'
        ])):

        self.state_list = []
        self.state_space_parameters = state_space_parameters
        self.enumerator = se.StateEnumerator(state_space_parameters)
        self.state = se.State('start', premise, 0, self.state_list)
        self.qstore = QValues()

        if type(qstore) is not type(None):
            self.qstore.laod_q_values(qstore)
            self.replay_dictionary = pd.read_csv(replaydict, index_col=0)
        else:
            self.replay_dictionary = replay_dictionary

        self.epsilon = epsilon
        self.WeightInitializer = WeightInitializer
        self.device = device
Exemple #8
0
    def __init__(
        self,
        state_space_parameters,
        epsilon,
        WeightInitializer=None,
        device=None,
        args=None,
        save_path=None,
        state=None,
        qstore=None,
        replaydict=None,
        replay_dictionary=pd.DataFrame(columns=[
            'net_disc', 'input_size', 'reward', 'epsilon', 'train_flag'
        ])):
        self.state_list = []
        self.state_space_parameters = state_space_parameters
        self.args = args
        self.enum = se.StateEnumerator(state_space_parameters, args)
        self.stringutils = StateStringUtils(state_space_parameters, args)

        self.state = se.State('start', 0, 1, 0, 0, args.patch_size, 0,
                              0) if not state else state
        self.qstore = QValues() if not qstore else qstore
        if type(qstore) is not type(None):
            self.qstore.load_q_values(qstore_)
            self.replay_dictionary = pd.read_csv(replaydict, index_col=0)
        else:
            self.replay_dictionary = replay_dictionary
        self.epsilon = epsilon
        self.WeightInitializer = WeightInitializer
        self.device = device
        self.gpu_mem_0 = GPUMem(torch.device('cuda') == self.device)
        self.save_path = save_path
        # TODO: hard-coded arc no. to resume from if epsilon < 1
        self.count = args.continue_ite - 1
Exemple #9
0
    def __init__(self,
                 state_space_parameters,
                 epsilon,
                 state=None,
                 qstore=None,
                 replay_dictionary=pd.DataFrame(columns=[
                     'net', 'accuracy_best_val', 'accuracy_last_val',
                     'accuracy_best_test', 'accuracy_last_test',
                     'ix_q_value_update', 'epsilon'
                 ])):
        self.state_list = []

        self.state_space_parameters = state_space_parameters

        # Class that will expand states for us
        self.enum = se.StateEnumerator(state_space_parameters)
        self.stringutils = StateStringUtils(state_space_parameters)

        # Starting State
        self.state = se.State('start', 0, 1, 0, 0,
                              state_space_parameters.image_size, 0,
                              0) if not state else state
        self.bucketed_state = self.enum.bucket_state(self.state)

        # Cached Q-Values -- used for q learning update and transition
        self.qstore = QValues() if not qstore else qstore
        self.replay_dictionary = replay_dictionary

        self.epsilon = epsilon  # epsilon: parameter for epsilon greedy strategy
    def _reset_for_new_walk(self):
        '''Reset the state for a new random walk'''
        # Architecture String
        self.state_list = []

        # Starting State
        self.state = se.State('start', 0, 1, 0, 0, self.state_space_parameters.image_size, 0, 0, 0, 0, 0, 0, 0, 0, 0)
        self.bucketed_state = self.enum.bucket_state(self.state)
    def load_q_values(self, q_csv_path):
        self.q = {}
        q_csv = pd.read_csv(q_csv_path)
        for row in zip(*[q_csv[col].values.tolist() for col in ['start_layer_type',
                                              'start_layer_depth',
                                              'start_filter_depth',
                                              'start_filter_size',
                                              'start_stride',
                                              'start_image_size',
                                              'start_fc_size',
                                              'start_terminate',
                                              'end_layer_type',
                                              'end_layer_depth',
                                              'end_filter_depth',
                                              'end_filter_size',
                                              'end_stride',
                                              'end_image_size',
                                              'end_fc_size',
                                              'end_terminate',
                                              'utility']]):
            start_state = se.State(layer_type = row[0],
                                   layer_depth = row[1],
                                   filter_depth = row[2],
                                   filter_size = row[3],
                                   stride = row[4],
                                   image_size = row[5],
                                   fc_size = row[6],
                                   terminate = row[7]).as_tuple()
            end_state = se.State(layer_type = row[8],
                                 layer_depth = row[9],
                                 filter_depth = row[10],
                                 filter_size = row[11],
                                 stride = row[12],
                                 image_size = row[13],
                                 fc_size = row[14],
                                 terminate = row[15]).as_tuple()
            utility = row[16]

            if start_state not in self.q:
                self.q[start_state] = {'actions': [end_state], 'utilities': [utility]}
            else:
                self.q[start_state]['actions'].append(end_state)
                self.q[start_state]['utilities'].append(utility)
    def _reset_for_new_walk(self):
        '''Reset the state for a new random walk'''

        # Inference Path String
        self.state_list = []

        # Starting State
        # TODO: randomly sample an inference alignment with high confidence
        self.state = se.State('start', 0, 1, 0, 0,
                              self.state_space_parameters.image_size, 0, 0)
        self.bucketed_state = self.enum.bucket_state(self.state)
    def convert_model_string_to_states(self, parsed_list, start_state=None):
        '''Takes a parsed model string and returns a recursive list of states.'''

        states = [start_state] if start_state else [se.State('start', 0, 1, 0, 0, self.image_size, 0, 0)]

        for layer in parsed_list:
            if layer[0] == 'conv':
                states.append(se.State(layer_type='conv',
                                    layer_depth=states[-1].layer_depth + 1,
                                    filter_depth=layer[1],
                                    filter_size=layer[2],
                                    stride=layer[3],
                                    image_size=self.enum._calc_new_image_size(states[-1].image_size, layer[2], layer[3]),
                                    fc_size=0,
                                    terminate=0))
            elif layer[0] == 'gap':
                states.append(se.State(layer_type='gap',
                                        layer_depth=states[-1].layer_depth + 1,
                                        filter_depth=0,
                                        filter_size=0,
                                        stride=0,
                                        image_size=1,
                                        fc_size=0,
                                        terminate=0))
            elif layer[0] == 'pool':
                states.append(se.State(layer_type='pool',
                                    layer_depth=states[-1].layer_depth + 1,
                                    filter_depth=0,
                                    filter_size=layer[1],
                                    stride=layer[2],
                                    image_size=self.enum._calc_new_image_size(states[-1].image_size, layer[1], layer[2]),
                                    fc_size=0,
                                    terminate=0))
            elif layer[0] == 'fc':
                states.append(se.State(layer_type='fc',
                                    layer_depth=states[-1].layer_depth + 1,
                                    filter_depth=len([state for state in states if state.layer_type == 'fc']),
                                    filter_size=0,
                                    stride=0,
                                    image_size=0,
                                    fc_size=layer[1],
                                    terminate=0))
            elif layer[0] == 'dropout':
                states.append(se.State(layer_type='dropout',
                                        layer_depth=states[-1].layer_depth,
                                        filter_depth=layer[1],
                                        filter_size=0,
                                        stride=0,
                                        image_size=states[-1].image_size,
                                        fc_size=layer[2],
                                        terminate=0))
            elif layer[0] == 'softmax':
                termination_state = states[-1].copy() if states[-1].layer_type != 'dropout' else states[-2].copy()
                termination_state.terminate=1
                termination_state.layer_depth += 1
                states.append(termination_state)

        return states
    def __init__(self, state_space_parameters, network_number, qstore=None):

        self.state_list = []

        self.state_space_parameters = state_space_parameters

        # Class that will expand states for us
        self.enum = se.StateEnumerator(state_space_parameters)
        self.stringutils = StateStringUtils(state_space_parameters)

        # Starting State
        self.state = se.State('start', 0, 1, 0, 0, state_space_parameters.image_size, 0, 0, 0, 0, 0, 0, 0, 0, 0)# if not state else state
        self.bucketed_state = self.enum.bucket_state(self.state)

        # Cached Q-Values -- used for q learning update and transition
        self.qstore = QValues() if not qstore else qstore
        # self.replay_dictionary = replay_dictionary

        # self.epsilon=epsilon # epsilon: parameter for epsilon greedy strategy
        self.network_number = network_number
    def convert_model_string_to_states(self, parsed_list, start_state=None):
        """
        Takes a parsed model string and returns a recursive list of states
        """

        states = [start_state] if start_state else [
            se.State('start', 0, 1, 0, 0, self.image_size, 0, 0)
        ]
        """
            NOTE:
            flag = 0 => WRN
            flag = 1 => Conv
            flag = 2 => SPP 
        """
        # TODO: fix parser for wrn and spp, currently being saved as conv with a flag
        for layer in parsed_list:
            if layer[0] == 'conv' and layer[4] == 1:
                states.append(
                    se.State(layer_type='conv',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=layer[1],
                             filter_size=layer[2],
                             stride=layer[3],
                             image_size=states[-1].image_size
                             if self.ssp.conv_padding == 'SAME' else
                             self.enum._calc_new_image_size(
                                 states[-1].image_size, layer[2]),
                             fc_size=0,
                             terminate=0))
            elif layer[0] == 'conv' and layer[4] == 0:
                # TODO: fix, filter size of 3 and stride of 1 in wrn hard-coded
                states.append(
                    se.State(layer_type='wrn',
                             layer_depth=states[-1].layer_depth + 2,
                             filter_depth=layer[1],
                             filter_size=3,
                             stride=1,
                             image_size=states[-1].image_size,
                             fc_size=0,
                             terminate=0))
            elif layer[0] == 'conv' and layer[4] == 2:
                # TODO: fix, filter size of 3 and stride of 1 in wrn hard-coded
                states.append(
                    se.State(layer_type='spp',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=layer[1],
                             filter_size=layer[2],
                             stride=0,
                             image_size=int(layer[2] * (layer[2] + 1) *
                                            (2 * layer[2] + 1) / 6.),
                             fc_size=0,
                             terminate=0))
            elif layer[0] == 'gap':
                states.append(
                    se.State(layer_type='gap',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=0,
                             filter_size=0,
                             stride=0,
                             image_size=1,
                             fc_size=0,
                             terminate=0))
            elif layer[0] == 'pool':
                states.append(
                    se.State(layer_type='pool',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=0,
                             filter_size=layer[1],
                             stride=layer[2],
                             image_size=self.enum._calc_new_image_size_pool(
                                 states[-1].image_size, layer[1], layer[2]),
                             fc_size=0,
                             terminate=0))
            elif layer[0] == 'fc':
                states.append(
                    se.State(layer_type='fc',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=len([
                                 state for state in states
                                 if state.layer_type == 'fc'
                             ]),
                             filter_size=0,
                             stride=0,
                             image_size=0,
                             fc_size=layer[1],
                             terminate=0))
            elif layer[0] == 'dropout':
                states.append(
                    se.State(layer_type='dropout',
                             layer_depth=states[-1].layer_depth,
                             filter_depth=layer[1],
                             filter_size=0,
                             stride=0,
                             image_size=states[-1].image_size,
                             fc_size=layer[2],
                             terminate=0))
            elif layer[0] == 'softmax':
                termination_state = states[-1].copy(
                ) if states[-1].layer_type != 'dropout' else states[-2].copy()
                termination_state.terminate = 1
                termination_state.layer_depth += 1
                states.append(termination_state)

        return states
Exemple #16
0
    def _reset_for_new_walk(self):

        self.state_list = []
        self.state = se.State('start', 0, 1, 0, 0, self.args.patch_size, 0, 0)
    def convert_model_string_to_states(self, parsed_list, start_state=None):
        '''Takes a parsed model string and returns a recursive list of states.'''

        states = [start_state] if start_state else [
            se.State('start', 0, 1, 0, 0, self.image_size, 0, 0)
        ]
        activation_list = ['None', 'tf.nn.relu']
        first_layer = 1

        batchsize = random.randrange(1, 129, 1)
        input_image_size = random.randrange(1, 513, 1)
        input_image_channels = random.randrange(1, 3, 1)

        total_conv_filters = 0
        total_conv_kernelsizes = 0
        total_conv_strides = 0
        total_conv_paddings = 0
        total_conv_acts = 0
        total_conv_bias = 0

        total_pool_sizes = 0
        total_pool_strides = 0
        total_pool_paddings = 0

        total_fc_units = 0
        total_fc_acts = 0
        total_fc_bias = 0

        time_list = []
        time_max = None
        time_min = None
        time_median = None
        time_mean = None
        time_trim_mean = None

        tf.reset_default_graph()
        op = None
        for layer in parsed_list:
            if layer[0] == 'conv':
                if first_layer == 1:
                    first_layer = 0
                    op = tf.Variable(
                        tf.random_normal([
                            batchsize, input_image_size, input_image_size,
                            input_image_channels
                        ]))

                op = tf.layers.conv2d(
                    op,
                    filters=layer[1],
                    kernel_size=[layer[2], layer[2]],
                    strides=(layer[3], layer[3]),
                    padding=('SAME' if layer[4] == 1 else 'VALID'),
                    activation=eval(activation_list[layer[5]]),
                    use_bias=layer[6],
                    name='convolution_%d' % (states[-1].layer_depth + 1))

                total_conv_filters = total_conv_filters + layer[1]
                total_conv_kernelsizes = total_conv_kernelsizes + layer[2]**2
                total_conv_strides = total_conv_strides + layer[3]**2
                total_conv_paddings = total_conv_paddings + layer[4]
                total_conv_acts = total_conv_acts + layer[5]
                total_conv_bias = total_conv_bias + layer[6]

                states.append(
                    se.State(layer_type='conv',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=layer[1],
                             filter_size=layer[2],
                             stride=layer[3],
                             image_size=states[-1].image_size,
                             fc_size=0,
                             terminate=0,
                             conv_padding=layer[4],
                             conv_act=layer[5],
                             conv_bias=layer[6],
                             pool_padding=0,
                             fc_act=0,
                             fc_bias=0,
                             state_list=0))
            # elif layer[0] == 'gap':
            #     states.append(se.State(layer_type='gap',
            #                             layer_depth=states[-1].layer_depth + 1,
            #                             filter_depth=0,
            #                             filter_size=0,
            #                             stride=0,
            #                             image_size=1,
            #                             fc_size=0,
            #                             terminate=0))
            elif layer[0] == 'pool':
                if first_layer == 1:
                    first_layer = 0
                    input_image_size = states[-1].image_size**2
                    op = tf.Variable(
                        tf.random_normal([
                            batchsize, input_image_size, input_image_size,
                            input_image_channels
                        ]))

                op = tf.layers.max_pooling2d(
                    op,
                    pool_size=(layer[1], layer[1]),
                    strides=(layer[2], layer[2]),
                    padding=('SAME' if layer[3] == 1 else 'VALID'),
                    name='pooling_%d' % (states[-1].layer_depth + 1))

                total_pool_sizes = total_pool_sizes + layer[1]**2
                total_pool_strides = total_pool_strides + layer[2]**2
                total_pool_paddings = total_pool_paddings + layer[3]

                states.append(
                    se.State(layer_type='pool',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=0,
                             filter_size=layer[1],
                             stride=layer[2],
                             image_size=self.enum._calc_new_image_size(
                                 states[-1].image_size, layer[1], layer[2]),
                             fc_size=0,
                             terminate=0,
                             conv_padding=0,
                             conv_act=0,
                             conv_bias=0,
                             pool_padding=layer[3],
                             fc_act=0,
                             fc_bias=0,
                             state_list=0))
            elif layer[0] == 'fc':
                if first_layer == 1:
                    first_layer = 0
                    input_image_size = states[-1].image_size**2
                    op = tf.Variable(
                        tf.random_normal([
                            batchsize, input_image_size * input_image_size *
                            input_image_channels
                        ]))

                op = tf.layers.dense(
                    inputs=op,
                    units=layer[1],
                    kernel_initializer=tf.ones_initializer(),
                    activation=eval(activation_list[layer[2]]),
                    use_bias=layer[3],
                    name='dense_%d' % (states[-1].layer_depth + 1))

                total_fc_units = total_fc_units + layer[1]
                total_fc_acts = total_fc_acts + layer[2]
                total_fc_bias = total_fc_bias + layer[3]

                states.append(
                    se.State(layer_type='fc',
                             layer_depth=states[-1].layer_depth + 1,
                             filter_depth=len([
                                 state for state in states
                                 if state.layer_type == 'fc'
                             ]),
                             filter_size=0,
                             stride=0,
                             image_size=0,
                             fc_size=layer[1],
                             terminate=0,
                             conv_padding=0,
                             conv_act=0,
                             conv_bias=0,
                             pool_padding=0,
                             fc_act=layer[2],
                             fc_bias=layer[3],
                             state_list=0))
            # elif layer[0] == 'dropout':
            #     states.append(se.State(layer_type='dropout',
            #                             layer_depth=states[-1].layer_depth,
            #                             filter_depth=layer[1],
            #                             filter_size=0,
            #                             stride=0,
            #                             image_size=states[-1].image_size,
            #                             fc_size=layer[2],
            #                             terminate=0))
            # elif layer[0] == 'softmax':
            #     termination_state = states[-1].copy() if states[-1].layer_type != 'dropout' else states[-2].copy()
            #     termination_state.terminate=1
            #     termination_state.layer_depth += 1
            #     states.append(termination_state)
        sess = tf.Session()
        if int((tf.__version__).split('.')[1]) < 12 and int(
            (tf.__version__).split('.')[0]) < 1:
            init = tf.initialize_all_variables()
        else:
            init = tf.global_variables_initializer()
        sess.run(init)
        # Warm-up run
        for _ in range(5):  #args.iter_warmup):
            sess.run(op)
        # Benchmark run
        for _ in range(10):  #args.iter_benchmark):
            start_time = time.time()
            sess.run(op)
            time_list.append(((time.time() - start_time) * 1000))

        np_array_parameters = np.array(time_list)
        time_max = numpy.amax(np_array_parameters)
        time_min = numpy.amin(np_array_parameters)
        time_median = numpy.median(np_array_parameters)
        time_mean = numpy.mean(np_array_parameters)
        time_trim_mean = stats.trim_mean(np_array_parameters, 0.1)

        result_dict = {
            'batchsize': batchsize,
            'input_image_size': input_image_size**2,
            'input_image_channels': input_image_channels,
            'total_conv_filters': total_conv_filters,
            'total_conv_kernelsizes': total_conv_kernelsizes,
            'total_conv_strides': total_conv_strides,
            'total_conv_paddings': total_conv_paddings,
            'total_conv_acts': total_conv_acts,
            'total_conv_bias': total_conv_bias,
            'total_pool_sizes': total_pool_sizes,
            'total_pool_strides': total_pool_strides,
            'total_pool_paddings': total_pool_paddings,
            'total_fc_units': total_fc_units,
            'total_fc_acts': total_fc_acts,
            'total_fc_bias': total_fc_bias,
            'time_max': time_max,
            'time_min': time_min,
            'time_median': time_median,
            'time_mean': time_mean,
            'time_trim_mean': time_trim_mean,
        }
        print result_dict
Exemple #18
0
    def _reset_for_new_walk(self):

        self.state_list = []
        self.bucketed_state_list = []
        self.state = se.State('start', 0, 1, 0, 0,
                              self.state_space_parameters.image_size, 0, 0)