def get_result(self):

        print('processing ' + os.path.split(self.dst_output_path)[-1])

        list_feature_file_path = os.path.join(self.dst_output_path,
                                              'features_379' + '.npy')
        dense_adj_mat_path = os.path.join(self.dst_output_path,
                                          'adj_matrix.npy')
        dense_node_feat_path = os.path.join(self.dst_output_path,
                                            'node_features.npy')
        adj_mat_path = os.path.join(self.dst_output_path, 'adj_matrix.npz')
        node_feat_path = os.path.join(self.dst_output_path,
                                      'node_features.npz')
        if os.path.exists(list_feature_file_path) and os.path.exists(
                adj_mat_path) and os.path.exists(node_feat_path):
            return STATUS_OK
        if os.path.exists(list_feature_file_path) and os.path.exists(
                dense_adj_mat_path) and os.path.exists(dense_node_feat_path):
            sp.save_npz(adj_mat_path,
                        dense_to_sparse(np.load(dense_adj_mat_path)))
            sp.save_npz(node_feat_path,
                        dense_to_sparse(np.load(dense_node_feat_path)))
            os.remove(dense_adj_mat_path)
            os.remove(dense_node_feat_path)
            return STATUS_OK
        if self.__check_prerequisites() != STATUS_OK:
            print('prerequisites not satisfied')
            return STATUS_ERR
        if self.include_permissions_147 and self.__fetch_permissions(
        ) != STATUS_OK:
            print('fetch permissions failed')
            return STATUS_ERR
        if (self.include_intent_actions_126 or self.include_intent_actions_110
            ) and self.__fetch_intent_actions() != STATUS_OK:
            print('fetch intent actions failed')
            return STATUS_ERR
        if self.include_sensitive_apis_106 and self.__fetch_sensitive_apis(
        ) != STATUS_OK:
            print('fetch sensitive apis failed')
            return STATUS_ERR

        np.save(list_feature_file_path,
                np.array(self.feature_list, dtype=np.uint8))

        print('processing ' + os.path.split(self.dst_output_path)[-1])

        list_feature_file_path = os.path.join(self.dst_output_path,
                                              'features_379' + '.npy')
        if os.path.exists(list_feature_file_path):
            return STATUS_OK
        else:
            return STATUS_ERR
示例#2
0
 def _build_train_op(self, logits, Y):
     # self.global_step = tf.Variable(0, trainable=False)
     Y = utils.dense_to_sparse(Y)
     loss = tf.nn.ctc_loss(
         labels=Y,
         inputs=logits,
         sequence_length=self.seq_len,
         # preprocess_collapse_repeated = True,
         # ctc_merge_repeated=False,
     )
     #all_vars   = tf.trainable_variables()
     #lossL2 = tf.add_n([ tf.nn.l2_loss(v) for v in all_vars ]) * FLAGS.decay_weight
     #self.loss = tf.reduce_mean(loss) + lossL2
     self.loss = tf.reduce_mean(loss)
     tf.summary.scalar('loss', self.loss)
     tf.summary.scalar('learning_rate', self.lrn_rate)
     # self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lrn_rate,
     #                                            momentum=FLAGS.momentum).minimize(self.cost,
     #                                                                              global_step=self.global_step)
     # self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.lrn_rate,
     #                                             momentum=FLAGS.momentum,
     #                                             use_nesterov=True).minimize(self.cost,
     #                                                                         global_step=self.global_step)
     grads = self.opt.compute_gradients(self.loss)
     # Option 2: tf.nn.ctc_beam_search_decoder
     # (it's slower but you'll get better results)
     decoded, log_prob = \
          tf.nn.ctc_beam_search_decoder(logits,
                                        self.seq_len,
                              beam_width=5,
                                        merge_repeated=False)
     dense_decoded = tf.sparse_tensor_to_dense(decoded[0], default_value=-1)
     return dense_decoded, grads
示例#3
0
    def __init__(self, A, n_features, hidden_dim=32, code_dim=16):
        super().__init__()
        self.hidden = l.KipfAndWillingConv(n_features, hidden_dim)
        self.encoder = l.KipfAndWillingConv(hidden_dim, code_dim)

        transform = l.KipfAndWillingConv.compute_transform(A)
        self.transform = u.dense_to_sparse(transform)
示例#4
0
    def __init__(self, A, n_features, n_samples, hidden_dim=32, code_dim=16):
        super().__init__()
        self.hidden = l.KipfAndWillingConv(n_features, hidden_dim)

        self.means_encoder = l.KipfAndWillingConv(hidden_dim, code_dim)
        self.log_std_encoder = l.KipfAndWillingConv(hidden_dim, code_dim)

        transform = l.KipfAndWillingConv.compute_transform(A)

        self.transform = u.dense_to_sparse(transform)
        self.n_samples = A.shape[0]
 def __build_node_features(self):
     # Read the adjacency matrix and list of functions
     all_funcs = []
     read_file_to_list(all_funcs,
                       os.path.join(self.dst_output_path, 'all_funcs.txt'))
     node_features = []
     for i in range(len(all_funcs)):
         feature_list = []
         get_filtered_vector(
             feature_list, all_funcs,
             CONSTANTS['SENSITIVE_APIS_106']['REFERENCE_LIST'])
         node_features.append(feature_list)
     sp.save_npz(
         os.path.join(self.dst_output_path, 'node_features_api.npz'),
         dense_to_sparse(np.array(node_features)))