Ejemplo n.º 1
0
 def info2tensor(self, parent_idx, frag_type):
     parent_idx = [parent_idx]
     parent_idx = data2tensor(parent_idx)
     frag_type = [self._type_dict[frag_type]]
     frag_type = data2tensor(frag_type,
                             tensor_type="Float")
     return parent_idx, frag_type
Ejemplo n.º 2
0
def infer_net(test_reader, use_gpu, model_path=None):
    """
    Inference function
    """
    if model_path is None:
        print(str(model_path) + "can not be found")
        return
    # set place, executor
    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # load the saved model
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(model_path, exe)

        for data in test_reader():
            # infer a batch
            pred = exe.run(inference_program,
                           feed=utils.data2tensor(data, place),
                           fetch_list=fetch_targets,
                           return_numpy=True)
            for i, val in enumerate(data):
                class3_label, class2_label = utils.get_predict_label(
                    pred[0][i, 1])
                pos_prob = pred[0][i, 1]
                neg_prob = 1 - pos_prob
                print("predict label: %d, pos_prob: %f, neg_prob: %f" %
                      (class3_label, pos_prob, neg_prob))
Ejemplo n.º 3
0
def infer(test_reader, use_cuda, model_path=None):
    """
    inference function
    """
    if model_path is None:
        print(str(model_path) + " cannot be found")
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(model_path, exe)

        total_acc = 0.0
        total_count = 0
        for data in test_reader():
            acc = exe.run(inference_program,
                          feed=utils.data2tensor(data, place),
                          fetch_list=fetch_targets,
                          return_numpy=True)
            total_acc += acc[0] * len(data)
            total_count += len(data)

        avg_acc = total_acc / total_count
        print("model_path: %s, avg_acc: %f" % (model_path, avg_acc))
Ejemplo n.º 4
0
    def gen_code(self, printer, model):
        stack = []
        ins_cnt = 0
        (seed_name, root, model_input) = self.prepare_seed(model)
        frag, hidden, parent_idx, frag_type = model_input

        while parent_idx != None:
            # Check max insertion condition
            if ins_cnt >= self._max_ins:
                return None
            else:
                ins_cnt += 1

            frag = data2tensor(frag)
            valid_type = frag_type
            parent_idx, frag_type = self.info2tensor(parent_idx, frag_type)
            outputs, hidden = model.run(frag, hidden, parent_idx, frag_type)

            _, cand_tensor = torch.topk(outputs[0][0], self._top_k)
            cand_list = cand_tensor.data.tolist()

            (found, frag, parent_idx,
             frag_type) = self.append_frag(cand_list, valid_type, root, stack)
            if not found:
                msg = 'Failed to select valid frag at %d' % ins_cnt
                print_msg(msg, 'WARN')
                return None

        harness_list = self._harness.get_list(seed_name)
        self.resolve_errors(root, harness_list)

        root = self.postprocess(root, harness_list)
        js_path = printer.ast2code(root)
        return js_path
Ejemplo n.º 5
0
    def prepare_seed(self, model):
        # Prepare AST
        seed_name, frag_seq = self.select_seed()
        (root, pre_seq, parent_idx,
         frag_type) = self.build_seed_tree(seed_name, frag_seq)

        # Prepare input for the model
        frag = [pre_seq[-1]]
        pre_seq = pre_seq[:-1]
        model_input = data2tensor(pre_seq)
        hidden = model.run(model_input)
        model_input = (frag, hidden, parent_idx, frag_type)
        seed_name = trim_seed_name(seed_name)
        return seed_name, root, model_input
Ejemplo n.º 6
0
def eval_net(test_reader, use_gpu, model_path=None):
    """
    Evaluation function
    """
    if model_path is None:
        print(str(model_path) + "can not be found")
        return
    # set place, executor
    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # load the saved model
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(model_path, exe)

        # compute 2class and 3class accuracy
        class2_acc, class3_acc = 0.0, 0.0
        total_count, neu_count = 0, 0

        for data in test_reader():
            # infer a batch
            pred = exe.run(inference_program,
                           feed=utils.data2tensor(data, place),
                           fetch_list=fetch_targets,
                           return_numpy=True)
            for i, val in enumerate(data):
                class3_label, class2_label = utils.get_predict_label(
                    pred[0][i, 1])
                true_label = val[1]
                if class2_label == true_label:
                    class2_acc += 1
                if class3_label == true_label:
                    class3_acc += 1
                if true_label == 1.0:
                    neu_count += 1

            total_count += len(data)

        class2_acc = class2_acc / (total_count - neu_count)
        class3_acc = class3_acc / total_count
        print("[test info] model_path: %s, class2_acc: %f, class3_acc: %f" %
              (model_path, class2_acc, class3_acc))
Ejemplo n.º 7
0
    def prepare_seed(self, model):
        """ Prepare model input by
            - randomly select an AST
            - construct $pre_seq
            - run the model to produce $hidden
            - build $model_input

        :param model:
        :return:
        """
        seed_name, frag_seq = self.select_seed()
        (root,
         pre_seq,
         parent_idx,
         frag_type) = self.build_seed_tree(seed_name, frag_seq)

        # Prepare input for the model
        frag = [pre_seq[-1]]
        pre_seq = pre_seq[:-1]
        model_input = data2tensor(pre_seq)
        hidden = model.run(model_input)
        model_input = (frag, hidden, parent_idx, frag_type)
        seed_name = trim_seed_name(seed_name)
        return seed_name, root, model_input
Ejemplo n.º 8
0
  def run_epoch(self, model, batches, epoch,
                optimizer=None, scheduler=None, mode=None):
    total_cross_entropy = 0.0
    total_diff = 0.0
    total_acc = 0.0
    num_val = 0
    is_train = (optimizer != None)
    if is_train:
      batch_iter = tqdm(batches)
      model.train()
    else:
      batch_iter = batches
      model.eval()

    for batch in batch_iter:
      padded_batch = pad_input(batch)
      (input_frag_chunks,
       pfrag_chunks, type_chunks,
       output_chunks) = map(self.split_batch, padded_batch[:4])
      seq_len_chunks = padded_batch[4]

      num_val += sum(seq_len_chunks)

      hidden = None
      seq_len_chunks = self.split_length(seq_len_chunks)
      data_chunks = zip(input_frag_chunks,
                        pfrag_chunks, output_chunks,
                        seq_len_chunks, type_chunks)

      for data_chunk in data_chunks:
        # Zero out grads
        model.zero_grad()

        (input_frag_chunk,
         pfrag_chunk, output_chunk,
         seq_len_chunk) = map(data2tensor, data_chunk[:4])
        type_chunk = data2tensor(data_chunk[4],
                                 tensor_type='Float')

        # Forward pass
        res = model(input_frag_chunk,
                    pfrag_chunk, type_chunk,
                    hidden, output_chunk, seq_len_chunk)

        hidden, pred, cross_entropy_loss, top_k_loss = res
        hidden = repackage_hidden(hidden)
        if is_train:
          loss = top_k_loss + cross_entropy_loss
          self.backward_pass(loss, optimizer)
        total_diff += float(torch.sum(top_k_loss))
        total_cross_entropy += float(torch.sum(cross_entropy_loss))
        total_acc += float(torch.sum(pred))

    if is_train:
      scheduler.step()

    total_loss = (total_diff + total_cross_entropy) / num_val
    pplx = np.exp(total_cross_entropy / num_val)
    acc = total_acc / num_val
    total_diff = total_diff / num_val

    self.print_metrics(mode, epoch,
                       total_loss, pplx, total_diff, acc)
    return pplx