def __init__(self):
        super(self.__class__, self).__init__()

        # set target_names
        self.target_names = ['background'] + \
            [datum['name']
             for datum in jsk_apc2016_common.get_object_data()]
        n_class = len(self.target_names)
        assert n_class == 40

        # load model
        self.gpu = rospy.get_param('~gpu', 0)
        chainermodel = rospy.get_param('~chainermodel')
        self.model = FCN32s(n_class=n_class)
        S.load_hdf5(chainermodel, self.model)
        if self.gpu != -1:
            self.model.to_gpu(self.gpu)
        jsk_logwarn('>> Model is loaded <<')

        while True:
            self.tote_contents = rospy.get_param('~tote_contents', None)
            if self.tote_contents is not None:
                break
            logwarn_throttle(10, 'param ~tote_contents is not set. Waiting..')
            rospy.sleep(0.1)
        self.label_names = rospy.get_param('~label_names')
        jsk_logwarn('>> Param is set <<')

        self.pub = self.advertise('~output', Image, queue_size=1)
        self.pub_debug = self.advertise('~debug', Image, queue_size=1)
Ejemplo n.º 2
0
def test(args):
  trace('loading model ...')
  src_vocab = Vocabulary.load(args.model + '.srcvocab')
  trg_vocab = Vocabulary.load(args.model + '.trgvocab')
  attmt = AttentionMT.load_spec(args.model + '.spec')
  if args.use_gpu:
    attmt.to_gpu()
  serializers.load_hdf5(args.model + '.weights', attmt)
  
  trace('generating translation ...')
  generated = 0

  with open(args.target, 'w') as fp:
    for src_batch in gens.batch(gens.word_list(args.source), args.minibatch):
      src_batch = fill_batch(src_batch)
      K = len(src_batch)

      trace('sample %8d - %8d ...' % (generated + 1, generated + K))
      hyp_batch = forward(src_batch, None, src_vocab, trg_vocab, attmt, False, args.generation_limit)

      for hyp in hyp_batch:
        hyp.append('</s>')
        hyp = hyp[:hyp.index('</s>')]
        print(' '.join(hyp), file=fp)

      generated += K

  trace('finished.')
Ejemplo n.º 3
0
    def __init__(self, gpu, chainermodel=None):
        self.gpu = gpu

        self.target_names = fcn.pascal.SegmentationClassDataset.target_names
        self.n_class = len(self.target_names)

        if chainermodel is None:
            chainermodel = osp.join(fcn.data_dir,
                                    'fcn8s_from_caffe.chainermodel')
            self.model_name = 'fcn8s'
            self.model = FCN8s(n_class=self.n_class)
        elif osp.basename(chainermodel).startswith('fcn8s'):
            self.model_name = 'fcn8s'
            self.model = FCN8s(n_class=self.n_class)
        elif osp.basename(chainermodel).startswith('fcn16s'):
            self.model_name = 'fcn16s'
            self.model = FCN16s(n_class=self.n_class)
        elif osp.basename(chainermodel).startswith('fcn32s'):
            self.model_name = 'fcn32s'
            self.model = FCN32s(n_class=self.n_class)
        else:
            raise ValueError(
                'Chainer model filename must start with fcn8s, '
                'fcn16s or fcn32s: {0}'.format(osp.basename(chainermodel)))

        S.load_hdf5(chainermodel, self.model)
        if self.gpu != -1:
            self.model.to_gpu(self.gpu)
 def _load_chainer_model(self):
     model_name = rospy.get_param('~model_name')
     if rospy.has_param('~model_h5'):
         rospy.logwarn('Rosparam ~model_h5 is deprecated,'
                       ' and please use ~model_file instead.')
         model_file = rospy.get_param('~model_h5')
     else:
         model_file = rospy.get_param('~model_file')
     n_class = len(self.target_names)
     if model_name == 'fcn32s':
         self.model = fcn.models.FCN32s(n_class=n_class)
     elif model_name == 'fcn16s':
         self.model = fcn.models.FCN16s(n_class=n_class)
     elif model_name == 'fcn8s':
         self.model = fcn.models.FCN8s(n_class=n_class)
     elif model_name == 'fcn8s_at_once':
         self.model = fcn.models.FCN8sAtOnce(n_class=n_class)
     else:
         raise ValueError('Unsupported ~model_name: {}'.format(model_name))
     rospy.loginfo('Loading trained model: {0}'.format(model_file))
     if model_file.endswith('.npz'):
         S.load_npz(model_file, self.model)
     else:
         S.load_hdf5(model_file, self.model)
     rospy.loginfo('Finished loading trained model: {0}'.format(model_file))
     if self.gpu != -1:
         self.model.to_gpu(self.gpu)
     if LooseVersion(chainer.__version__) < LooseVersion('2.0.0'):
         self.model.train = False
Ejemplo n.º 5
0
def test(args):
  trace('loading model ...')
  word_vocab = Vocabulary.load(args.model + '.words')
  phrase_vocab = Vocabulary.load(args.model + '.phrases')
  semiterminal_vocab = Vocabulary.load(args.model + '.semiterminals')
  parser = Parser.load_spec(args.model + '.spec')
  if args.use_gpu:
    parser.to_gpu()
  serializers.load_hdf5(args.model + '.weights', parser)

  embed_cache = {}
  parser.reset()

  trace('generating parse trees ...')
  with open(args.source) as fp:
    for l in fp:
      word_list = to_vram_words(convert_word_list(l.split(), word_vocab))
      tree = combine_xbar(
          restore_labels(
              parser.forward(word_list, None, args.unary_limit, embed_cache),
              phrase_vocab,
              semiterminal_vocab))
      print('( ' + tree_to_string(tree) + ' )')

  trace('finished.')
Ejemplo n.º 6
0
    def __init__(self, gpu, target_names, chainermodel=None):
        self.gpu = gpu

        self.target_names = target_names
        self.n_class = len(self.target_names)

        if chainermodel is None:
            print('Please specify chainermodel.', file=sys.stderr)
            sys.exit(1)
        elif osp.basename(chainermodel).startswith('fcn8s'):
            self.model_name = 'fcn8s'
            self.model = FCN8s(n_class=self.n_class)
        elif osp.basename(chainermodel).startswith('fcn16s'):
            self.model_name = 'fcn16s'
            self.model = FCN16s(n_class=self.n_class)
        elif osp.basename(chainermodel).startswith('fcn32s'):
            self.model_name = 'fcn32s'
            self.model = FCN32s(n_class=self.n_class)
        else:
            raise ValueError(
                'Chainer model filename must start with fcn8s, '
                'fcn16s or fcn32s: {0}'.format(osp.basename(chainermodel)))

        S.load_hdf5(chainermodel, self.model)
        if self.gpu != -1:
            self.model.to_gpu(self.gpu)
    def test(self):
        trace('loading model ...')
        src_vocab = Vocabulary.load(self.model + '.srcvocab')
        trg_vocab = Vocabulary.load(self.model + '.trgvocab')
        encdec = EncoderDecoder.load_spec(self.model + '.spec')
        serializers.load_hdf5(self.model + '.weights', encdec)

        trace('generating translation ...')
        generated = 0

        with open(self.target, 'w') as fp:
            for src_batch in gens.batch(gens.word_list(self.source), self.minibatch):
                src_batch = fill_batch(src_batch)
                K = len(src_batch)

                trace('sample %8d - %8d ...' % (generated + 1, generated + K))
                hyp_batch = self.forward(src_batch, None, src_vocab, trg_vocab, encdec, False, self.generation_limit)

                source_cuont = 0
                for hyp in hyp_batch:
                    hyp.append('</s>')
                    hyp = hyp[:hyp.index('</s>')]
                    print("src : " + "".join(src_batch[source_cuont]).replace("</s>", ""))
                    print('hyp : ' +''.join(hyp))
                    print(' '.join(hyp), file=fp)
                    source_cuont = source_cuont + 1

                generated += K

        trace('finished.')
def load_model(path):
    model_base_path = os.path.dirname(path)
    model_def_path = os.path.join(model_base_path, MODEL_DEF_NAME)
    with open(model_def_path, 'rb') as f:
        model = pickle.load(f)  # load model definition
        load_hdf5(path, model)  # load parameters
    return model
Ejemplo n.º 9
0
def test(args):
    source_vocab = Vocab.load(args.model_path+SRC_VOCAB_NAME)
    target_vocab= Vocab.load(args.model_path+TAR_VOCAB_NAME) 
    vocab_size, hidden_size, maxout_hidden_size, embed_size = Backup.load(args.model_path+HPARAM_NAME)

    att_encdec = ABED(vocab_size, hidden_size, maxout_hidden_size, embed_size)
    if args.use_gpu:
        att_encdec.to_gpu()
    serializers.load_hdf5(args.model_path+str(args.epochs)+'.attencdec', att_encdec)

    with open(args.output+str(args.epochs), 'w') as fp:
        source_gen = word_list(args.source)
        target_gen = word_list(args.target)
        batch_gen = batch(sort(source_gen, target_gen, 100*args.minibatch), args.minibatch) 
        for source_batch, target_batch in batch_gen: 
            source_batch = fill_batch_end(source_batch)
            target_batch = fill_batch_end(target_batch) 
            if args.beam_search:
                hyp_batch = forward_beam(source_batch, None, source_vocab, target_vocab, att_encdec, False, args.limit, args.beam_size)
            else:
                hyp_batch = forward(source_batch, None, source_vocab, target_vocab, att_encdec, False, args.limit)
            for i, hyp in enumerate(hyp_batch):
                hyp.append(END)
                hyp = hyp[:hyp.index(END)]
                show(source_batch[i], target_batch[i], hyp, "TEST")
                fwrite(source_batch[i], target_batch[i], hyp, fp)
Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('raw_path')
    parser.add_argument('model_path')
    args = parser.parse_args()

    raw_path = args.raw_path
    model_path = args.model_path

    mask_path = raw_to_mask_path(raw_path)
    raw = imread(raw_path)
    mask = imread(mask_path)
    y_min, x_min, y_max, x_max = mask_to_roi(mask)

    im = raw[y_min:y_max, x_min:x_max]

    model = VGG_mini_ABN()
    serializers.load_hdf5(model_path, model)

    im = resize(im, (128, 128), preserve_range=True)
    x_data = np.array([im_to_blob(im)], dtype=np.float32)
    x = Variable(x_data, volatile=True)
    model.train = False
    y = model(x)
    y_data = y.data
    print(OBJECT_CLASSES[np.argmax(y_data[0])])
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('chainermodel')
    parser.add_argument('--gpu', type=int, default=0)
    args = parser.parse_args()

    chainermodel = args.chainermodel
    gpu = args.gpu

    save_dir = 'test_result'
    if not osp.exists(save_dir):
        os.makedirs(save_dir)

    dataset = apc2016.APC2016Dataset()
    n_class = len(dataset.target_names)

    model = VGG16(n_class=n_class)
    S.load_hdf5(chainermodel, model)
    if gpu != -1:
        model.to_gpu(gpu)

    batch_size = 25
    index = 0
    sum_accuracy = 0
    label_true_all = []
    label_pred_all = []
    for index_start in xrange(0, len(dataset.test), batch_size):
        indices = range(index_start,
                        min(len(dataset.test), index_start + batch_size))
        x, t = dataset.next_batch(batch_size, type='test',
                                  type_indices=indices)
        if gpu != -1:
            x = cuda.to_gpu(x, gpu)
            t = cuda.to_gpu(t, gpu)
        x = Variable(x, volatile=True)
        t = Variable(t, volatile=True)
        model(x, t)

        x_data = cuda.to_cpu(x.data)
        accuracy = float(cuda.to_cpu(model.acc.data))
        sum_accuracy += accuracy * len(x_data)
        label_true = cuda.to_cpu(t.data)
        label_pred = cuda.to_cpu(model.pred.data).argmax(axis=1)
        label_true_all.extend(label_true.tolist())
        label_pred_all.extend(label_pred.tolist())

        fname = '{0}_{1}-{2}_{3:.2}.png'.format(
            osp.basename(chainermodel), indices[0], indices[-1], accuracy)
        fname = osp.join(save_dir, fname)
        draw_test_result(dataset, fname, x_data,
                         label_true, label_pred, n_class)
        print('Saved {0}.'.format(fname))
    mean_accuracy = sum_accuracy / len(dataset.test)
    print('Accuracy: {0}'.format(mean_accuracy))
    print(classification_report(
        y_true=label_true_all,
        y_pred=label_pred_all,
        labels=np.arange(len(dataset.target_names)),
        target_names=dataset.target_names,
    ))
Ejemplo n.º 12
0
def get_model_optimizer(args):
    model = get_model(args)

    if 'opt' in args:
        # prepare optimizer
        if args.opt == 'AdaGrad':
            optimizer = optimizers.AdaGrad(lr=args.lr)
        elif args.opt == 'MomentumSGD':
            optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
        elif args.opt == 'Adam':
            optimizer = optimizers.Adam()
        else:
            raise Exception('No optimizer is selected')

        optimizer.setup(model)

        if args.resume_opt is not None:
            serializers.load_hdf5(args.resume_opt, optimizer)
            args.epoch_offset = int(
                re.search('epoch-([0-9]+)', args.resume_opt).groups()[0])

        return model, optimizer

    else:
        print('No optimizer generated.')
        return model
Ejemplo n.º 13
0
def main():
    # Settings
    act = F.relu

    # Model
    ae = AutoEncoder(act)

    # Load
    fpath = "/home/kzk/tmp/cnn/ae_00010.h5py"
    serializers.load_hdf5(fpath, ae)

    # Data
    home = os.environ.get("HOME")
    train_path = os.path.join(home, "datasets/mnist/train.npz")
    data = np.load(train_path)

    # Generate random vector(s)
    idx = 150
    x = data["x"][idx, :].reshape(1, 1, 28, 28).astype(np.float32)
    x = (x - 127.5) / 127.5
    y = ae.encoder(x)
    #y = F.softmax(y)

    # Generate sample(s)
    x = ae.decoder(y, test=True)
    x = x.data * 127.5 + 127.5

    cv2.imwrite("./dec_mnist_{:05d}.png".format(0), x.reshape(28, 28))
 def __judge_print(self):
     """
     judge slack call for chainer
     Example:
         chainer:{your sentence}
             chainer return the sentence
         chainer_train:{your sentence}
             start train
     """
     if len(self.data) >= 1 and "text" in self.data[0]:
         print(self.data[0]["text"])
         if "chainer:" in self.data[0]["text"]:
             # input sentence
             src_batch = self.__input_sentence()
             # predict
             hyp_batch = self.__predict_sentence(src_batch)
             # show predict word
             word = ''.join(hyp_batch[0]).replace("</s>", "")
             print(self.slack_channel.api_call("chat.postMessage", user=self.usr, channel=self.chan, text=word))
         if "chainer_train" in self.data[0]["text"]:
             self.__setting_parameter()
             model = AttentionDialogue.load_spec(self.model_name + '.spec', self.XP)
             dialogue = EncoderDecoderModelAttention(self.parameter)
             serializers.load_hdf5(self.model_name + '.weights', model)
             dialogue.attention_dialogue = model
             dialogue.word2vecFlag = False
             dialogue.train()
Ejemplo n.º 15
0
def inspect(image_path, mean, model_path, label, network, gpu=-1):
    network = network.split(os.sep)[-1]
    model_name = re.sub(r"\.py$", "", network)
    model_module = load_module(os.path.dirname(model_path), model_name)
    mean_image = pickle.load(open(mean, 'rb'))
    model = model_module.Network()
    serializers.load_hdf5(model_path, model)
    if gpu >= 0:
        cuda.check_cuda_available()
        cuda.get_device(gpu).use()
        model.to_gpu()
    cropwidth = 256 - model.insize
    img = read_image(image_path, model, mean_image, cropwidth)
    x = np.ndarray((1, 3, model.insize, model.insize), dtype=np.float32)
    x[0] = img
    if gpu >= 0:
        x = cuda.to_gpu(x)
    score = model.predict(x)
    score = cuda.to_cpu(score.data)
    categories = np.loadtxt(label, str, delimiter="\t")
    top_k = 20
    prediction = zip(score[0].tolist(), categories)
    prediction.sort(cmp=lambda x, y:cmp(x[0], y[0]), reverse=True)
    ret = []
    for rank, (score, name) in enumerate(prediction[:top_k], start=1):
        ret.append({"rank": rank, "name": name, "score": "{0:4.1f}%".format(score*100)})
    return ret
Ejemplo n.º 16
0
def init_model():
    global model, optimizer
    model = PBLLogi()
    optimizer = optimizers.Adam()
    optimizer.setup(model)

    serializers.load_hdf5('pbllogi.model', model)
    serializers.load_hdf5('pbllogi.state', optimizer)
Ejemplo n.º 17
0
 def __predict_sentence(self, src_batch):
     dialogue = EncoderDecoderModelForwardSlack(self.parameter)
     src_vocab = Vocabulary.load(self.model_name + '.srcvocab')
     trg_vocab = Vocabulary.load(self.model_name + '.trgvocab')
     model = EncoderDecoder.load_spec(self.model_name + '.spec')
     serializers.load_hdf5(dialogue.model + '.weights', model)
     hyp_batch = dialogue.forward(src_batch, None, src_vocab, trg_vocab, model, False, self.generation_limit)
     return hyp_batch
Ejemplo n.º 18
0
 def load_model(self):
     model_fn = os.path.basename(self.args.model)
     self.model = imp.load_source(
         model_fn.split('.')[0], self.args.model).model
     self.model.train = False
     serializers.load_hdf5(self.args.param, self.model)
     if self.args.gpu >= 0:
         self.model.to_gpu()
Ejemplo n.º 19
0
def load_model(args):
    model_fn = os.path.basename(args.model)
    model_name = model_fn.split('.')[0]
    model = imp.load_source(model_name, args.model).model
    serializers.load_hdf5(args.param, model)
    model.train = False

    return model
Ejemplo n.º 20
0
def load_model(filename, model):
    print('Loading pretrained model...')
    try:
        serializers.load_hdf5(filename, model)
        print('Loaded pretrained model')
    except OSError as err:
        print('OS error: {}'.format(err))
        print('Could not find a pretrained model. Proceeding with a randomly initialized model.')
Ejemplo n.º 21
0
	def load(self):
		filename = "fc.model"
		if os.path.isfile(filename):
			serializers.load_hdf5(filename, self.fc)
			print "model loaded successfully."
		filename = "fc.optimizer"
		if os.path.isfile(filename):
			serializers.load_hdf5(filename, self.optimizer_fc)
			print "optimizer loaded successfully."
Ejemplo n.º 22
0
	def load(self):
		filename = "conv.model"
		if os.path.isfile(filename):
			serializers.load_hdf5(filename, self.conv)
			print "convolutional network loaded."
		if self.fcl_eliminated is False:
			filename = "fc.model"
			if os.path.isfile(filename):
				serializers.load_hdf5(filename, self.fc)
				print "fully-connected network loaded."
Ejemplo n.º 23
0
def load_model(filename, model):
    """Load the model with the file data."""
    print('Loading pretrained model...')
    try:
        serializers.load_hdf5(filename, model)
        print('Successfully loaded pretrained model')
    except OSError as err:
        print('OS error: {}'.format(err))
        print('Could not find a pretrained model. \
                Proceeding with a randomly initialized model.')
Ejemplo n.º 24
0
def main():
    rospy.init_node('cae_param_node')

    model = CAEOnes(n_param=1)
    S.load_hdf5('cae_ones_model_inbin_trained.h5', model)

    model.train = False
    CAENode(model)

    rospy.spin()
Ejemplo n.º 25
0
    def __init__(self, net_size, model_filename, optimizer_filename):
        """ Create the underlying neural network model """
        self.model = L.Classifier(BaseNetwork(net_size))
        if (model_filename != ""):
            serializers.load_hdf5(model_filename, self.model)

        """ Create the underlying optimizer """
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
        if (optimizer_filename != ""):
            serializers.load_hdf5(optimizer_filename, self.optimizer)
Ejemplo n.º 26
0
  def __init__(self, args):
    trace('loading model ...')
    self.args = args
    self.src_vocab = Vocabulary.load(args.model + '.srcvocab')
    self.trg_vocab = Vocabulary.load(args.model + '.trgvocab')
    self.encdec = EncoderDecoder.load_spec(args.model + '.spec')
    if args.use_gpu:
      self.encdec.to_gpu()
    serializers.load_hdf5(args.model + '.weights', self.encdec)

    trace('generating translation ...')
Ejemplo n.º 27
0
 def load_model(self, model_filename):
     """Load a network model form a file
     """
     serializers.load_hdf5(model_filename, self.model)
     copy_param.copy_param(target_link=self.model,
                           source_link=self.shared_model)
     opt_filename = model_filename + '.opt'
     if os.path.exists(opt_filename):
         print('WARNING: {0} was not found, so loaded only a model'.format(
             opt_filename))
         serializers.load_hdf5(model_filename + '.opt', self.optimizer)
Ejemplo n.º 28
0
def load_latest(train=True):
    model = load_new(train)

    params = os.listdir('speaker-model')
    params.sort()

    if len(params) > 0:
        serializers.load_hdf5(os.path.join('speaker-model', params[-1]), model)
        return model, params[-1]
    else:
        return model, None
Ejemplo n.º 29
0
def get_model(name):
    if name == 'vgg16':
        model = VGG16()
    elif name == 'caffenet':
        model = CaffeNet()
    elif name == 'vgg_cnn_m_1024':
        model = VGG_CNN_M_1024()
    else:
        raise ValueError('Unsupported model name: %s' % name)
    S.load_hdf5('models/%s.chainermodel' % name, model)
    model.to_gpu()
    return model
Ejemplo n.º 30
0
def test(args):
    """ 予測を行うメソッド
    """

    batchsize   = args.batchsize  # バッチサイズ

    # 語彙辞書の読込
    src_vocab2id, src_id2vocab, vocab_size = util.load_vocab(args.model + ".srcvocab")

    # モデルの読込
    model = NLM.load_spec(args.model + ".spec")

    # GPUを使うかどうか
    if args.use_gpu:
        cuda.check_cuda_available()
        cuda.get_device(1).use()
        model.to_gpu()

    xp = cuda.cupy if args.use_gpu else np # args.gpu <= 0: use cpu, otherwise: use gpu
    serializers.load_hdf5(args.model + ".weights", model)

    # Source sequence for test
    print 'loading source data for test...'
    # データセット読み込み
    test_src_dataset = util.load_test_src_data(args.src, src_vocab2id)

    generated = 0
    N = len(test_src_dataset) # テストの事例数

    word_list = src_vocab2id.keys()

    # 単語wordのembeddingを取得
    word_id_list = Variable(xp.asarray([src_vocab2id[word] for word in word_list ], dtype=xp.int32))
    embedding_list = model.get_embedding(word_id_list)
    src_embed = embedding_list.data[word_list.index(args.src_word)]
    #print model.embed.W.data.shape

    print "src word:", args.src_word
    print src_embed
    #src_embed = model.embed.W.data[src_vocab2id[args.src_word]]

    trg_embed_list = {}
    for _word, _id in src_vocab2id.items():
        trg_embed = embedding_list.data[word_list.index(_word)]
        #trg_embed = model.embed.W.data[src_vocab2id[_word]]
        trg_embed_list[_word] = 1 - scipy.spatial.distance.cosine(src_embed, trg_embed)

    # 上位10件を表示
    for i, (word, sim) in enumerate(sorted(trg_embed_list.items(), key=lambda x:x[1], reverse=True)):
        print word, sim

        if i == 10:
            break
Ejemplo n.º 31
0
 def load_model(self, filename):
     serializers.load_hdf5(filename, self)
Ejemplo n.º 32
0

if __name__ == '__main__':
    args = parse()
    vocab = pickle.load(open(args.vocab, 'rb'))

    print("Loading Model...")
    if args.mode == 'naive':
        from Recursive_model import RecursiveNet
        model = RecursiveNet(len(vocab), args.embed_size, args.unit,
                             args.label)
        if args.gpu >= 0:
            cuda.get_device(args.gpus).use()
            model.to_gpu(args.gpu)
            xp = cuda.cupy
        serializers.load_hdf5(args.model, model)
        print("Begin reordering...")
        for i, fp in enumerate(args.reorderfile):
            with codecs.open(fp.split('/')[-1] + '.re', 'w', 'utf-8') as fre:
                for tree in read_reorder(fp, vocab, args.tree_type):
                    _, pred, order = traverse(model,
                                              tree,
                                              0,
                                              train=False,
                                              pred=True)
                    if args.output_format == 'text':
                        print(' '.join(pred), file=fre)
                    elif args.output_format == 'order':
                        hd = {}
                        for oi, o in enumerate(order):
                            hd[o] = oi
Ejemplo n.º 33
0
 def load(self, filename):
     if os.path.isfile(filename):
         print("loading {} ...".format(filename))
         serializers.load_hdf5(filename, self)
     else:
         print(filename, "not found.")
Ejemplo n.º 34
0
def main(params):
    target_save_dir = osp.join(params['save_dir'], 'prepro',
                               params['dataset'] + '_' + params['splitBy'])
    model_dir = osp.join(params['save_dir'], 'model',
                         params['dataset'] + '_' + params['splitBy'])

    if params['old']:
        params['data_json'] = 'old' + params['data_json']
        params['data_h5'] = 'old' + params['data_h5']
        params['image_feats'] = 'old' + params['image_feats']
        params['ann_feats'] = 'old' + params['ann_feats']
        params['id'] = 'old' + params['id']

    if params['dataset'] in ['refcoco', 'refcoco+', 'refcocog']:
        global_shapes = (224, 224)
        image_root = params['coco_image_root']
    elif params['dataset'] == 'refgta':
        global_shapes = (480, 288)
        image_root = params['gta_image_root']

    with open(target_save_dir + params["split"] + '_' + params['id'] +
              params['id2'] + str(params['beam_width']) + '.json') as f:
        data = json.load(f)
    ref_to_beams = {item['ref_id']: item['beam'] for item in data}

    # add ref_id to each beam
    for ref_id, beams in ref_to_beams.items():
        for beam in beams:
            beam['ref_id'] = ref_id  # make up ref_id in beam

    loader = DataLoader(params)
    featsOpt = {
        'sp_ann': osp.join(target_save_dir, params['sp_ann_feats']),
        'ann_input': osp.join(target_save_dir, params['ann_feats']),
        'img': osp.join(target_save_dir, params['image_feats']),
        'shapes': osp.join(target_save_dir, params['ann_shapes'])
    }
    loader.loadFeats(featsOpt)
    loader.shuffle('train')
    loader.loadFeats(featsOpt)
    chainer.config.train = False
    chainer.config.enable_backprop = False

    gpu_id = params['gpu_id']
    cuda.get_device(gpu_id).use()
    xp = cuda.cupy

    rl_crit = ListenerReward(len(loader.ix_to_word),
                             global_shapes=global_shapes).to_gpu(gpu_id)
    serializers.load_hdf5(osp.join(model_dir, params['id'] + ".h5"), rl_crit)
    #serializers.load_hdf5(osp.join(model_dir, "attn_rank.h5"), rl_crit)
    img_to_ref_ids, img_to_ref_confusion = calc_confusion(
        loader, data, ref_to_beams, rl_crit, params, xp)

    sys.path.insert(0, osp.join('pyutils', 'refer2'))
    sys.path.insert(0, osp.join('pyutils', 'refer2', 'evaluation'))
    from refer import REFER
    from refEvaluation import RefEvaluation
    from crossEvaluation import CrossEvaluation
    refer = REFER(params['data_root'],
                  image_root,
                  params['dataset'],
                  params['splitBy'],
                  old_version=params['old'])

    if params['dataset'] == 'refcoco':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refcoco+':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refcocog':
        lambda1 = 5
        lambda2 = 5
    elif params['dataset'] == 'refgta':
        lambda1 = 5
        lambda2 = 5
    else:
        error('No such dataset option for ', params['dataset'])

    # compute unary potential, img_to_ref_unary
    # let's firstly try one image
    Res = []
    for image_id in img_to_ref_confusion:
        # ref_ids and confusion matrices for this image
        img_ref_ids = img_to_ref_ids[image_id]
        ref_to_confusion = img_to_ref_confusion[image_id]
        # compute unary potential for each ref_id
        for ref_id in img_ref_ids:
            confusion = ref_to_confusion[ref_id]  # (beam_size, #img_ref_ids)
            beams = ref_to_beams[ref_id]  # [{ppl, sent, logp}] of beam_size
            compute_unary(ref_id, beams, confusion, img_ref_ids, lambda1,
                          lambda2)

        # here's more preparation
        ref_beam_to_ix, ix_to_ref_beam, all_beams = make_index(
            img_ref_ids, ref_to_beams)

        # compute pairwise potentials
        pairwise_ref_beam_ids = compute_pairwise(img_ref_ids, ref_to_beams)

        # call cplex
        res = bilp(img_ref_ids, ref_to_beams, all_beams, pairwise_ref_beam_ids,
                   ref_beam_to_ix, loader)
        Res += res
    # evaluate
    eval_cider_r = params['dataset'] == 'refgta'
    refEval = RefEvaluation(refer, Res, eval_cider_r=eval_cider_r)
    refEval.evaluate()
    overall = {}
    for metric, score in refEval.eval.items():
        overall[metric] = score
    print(overall)

    if params['write_result'] > 0:
        refToEval = refEval.refToEval
        for res in Res:
            ref_id, sent = res['ref_id'], res['sent']
            refToEval[ref_id]['sent'] = sent
        with open('' + params['id'] + params['id2'] + '_out.json',
                  'w') as outfile:
            json.dump({'overall': overall, 'refToEval': refToEval}, outfile)

    # CrossEvaluation takes as input [{ref_id, sent}]
    ceval = CrossEvaluation(refer, Res)
    ceval.cross_evaluate()
    ceval.make_ref_to_evals()
    ref_to_evals = ceval.ref_to_evals  # ref_to_evals = {ref_id: {ref_id: {method: score}}}

    # compute cross score
    xcider = ceval.Xscore('CIDEr')
def output_file(data_path, model_path):
    """
    :param data_path: the path of corpus you made model learn
    :param model_path: the path of model you made learn
    :return:
    """
    # call dictionary class
    corpus = JaConvCorpus(create_flg=False)
    corpus.load(load_dir=data_path)
    print('Vocabulary Size (number of words) :', len(corpus.dic.token2id))
    print('')

    # rebuild seq2seq model
    model = FineTuneSeq2Seq(all_vocab_size=len(corpus.dic.token2id), emotion_vocab_size=len(corpus.emotion_set),
                            feature_num=args.feature_num, hidden_num=args.hidden_num,
                            label_num=args.label_num, label_embed_num=args.label_embed, batch_size=1, gpu_flg=args.gpu)
    serializers.load_hdf5(model_path, model)
    emo_label_index = [index for index in range(args.label_num)]
    topic_label_index = [index for index in range(args.topic_num)]

    # run conversation system
    r_label = re.compile("(__label__)([0-9]+)")
    pattern = "(.+?)(\t)(.+?)(\n|\r\n)"
    r = re.compile(pattern)
    for line in open(T2V_OUTPUT, 'r', encoding='utf-8'):
        m = r.search(line)
        if m is not None:
            topic_label = m.group(1)
            sentence = m.group(3)

            # check a topic tag
            label_info = r_label.search(topic_label)
            if int(label_info.group(2)) < len(topic_label_index):
                topic_label_id = int(label_info.group(2))
            else:
                print('domain label がドメイン数の上限を超えています.')
                raise ValueError

            # parse text by mecab
            input_vocab = [unicodedata.normalize('NFKC', word.lower()) for word in parse_ja_text(sentence)]
            input_vocab_rev = input_vocab[::-1]

            # convert word into ID
            input_sentence = [corpus.dic.token2id[word] for word in input_vocab if not corpus.dic.token2id.get(word) is None]
            input_sentence_rev = [corpus.dic.token2id[word] for word in input_vocab_rev if not corpus.dic.token2id.get(word) is None]

            print("input -> ", sentence, "domain:", topic_label_id)
            model.initialize(batch_size=1)
            for emo_label in range(LABEL_NUM):
                sentence = model.generate(input_sentence, input_sentence_rev, sentence_limit=len(input_sentence) + 20,
                                          emo_label_id=emo_label, topic_label_id=topic_label_id,
                                          word2id=corpus.dic.token2id, id2word=corpus.dic)
                if emo_label == 0:
                    print("neg -> ", sentence)
                elif emo_label == 1:
                    print("neu -> ", sentence)
                elif emo_label == 2:
                    print("pos -> ", sentence)
                else:
                    raise ValueError
            print('')
def interpreter(data_path, model_path):
    """
    Run this function, if you want to talk to seq2seq model.
    if you type "exit", finish to talk.
    :param data_path: the path of corpus you made model learn
    :param model_path: the path of model you made learn
    :return:
    """
    # call dictionary class
    corpus = JaConvCorpus(create_flg=False)
    corpus.load(load_dir=data_path)
    print('Vocabulary Size (number of words) :', len(corpus.dic.token2id))
    print('')

    # rebuild seq2seq model
    model = FineTuneSeq2Seq(all_vocab_size=len(corpus.dic.token2id), emotion_vocab_size=len(corpus.emotion_set),
                            feature_num=args.feature_num, hidden_num=args.hidden_num,
                            label_num=args.label_num, label_embed_num=args.label_embed, batch_size=1, gpu_flg=args.gpu)
    serializers.load_hdf5(model_path, model)
    emo_label_index = [index for index in range(args.label_num)]
    topic_label_index = [index for index in range(args.topic_num)]

    # run conversation system
    print('The system is ready to run, please talk to me!')
    print('( If you want to end a talk, please type "exit". )')
    print('')
    while True:
        print('>> ', end='')
        sentence = input()
        if sentence == 'exit':
            print('See you again!')
            break

        # check a sentiment tag
        input_vocab = sentence.split(' ')
        emo_label_id = input_vocab.pop(-1)
        topic_label_id = input_vocab.pop(-1)
        label_false_flg = 1

        for index in emo_label_index:
            if emo_label_id == str(index):
                emo_label_id = index               # TODO: ラベルのインデックスに注意.今は3値分類 (0, 1, 2)
                label_false_flg = 0
                break
        if label_false_flg:
            print('caution: you donot set any enable tags! (emotion label)')
            emo_label_id = -1

        # check a topic tag                        # TODO: 本当はユーザ側の指定ではなく,tweet2vecの判定から決定する
        label_false_flg = 1
        for index in topic_label_index:
            if topic_label_id == str(index):
                topic_label_id = index             # TODO: ラベルのインデックスに注意.今は3値分類 (0, 1, 2)
                label_false_flg = 0
                break
        if label_false_flg:
            print('caution: you donot set any enable tags! (topic label)')
            topic_label_id = -1

        input_vocab = [unicodedata.normalize('NFKC', word.lower()) for word in parse_ja_text(sentence)]
        input_vocab_rev = input_vocab[::-1]

        # convert word into ID
        input_sentence = [corpus.dic.token2id[word] for word in input_vocab if not corpus.dic.token2id.get(word) is None]
        input_sentence_rev = [corpus.dic.token2id[word] for word in input_vocab_rev if not corpus.dic.token2id.get(word) is None]

        model.initialize(batch_size=1)
        if args.beam_search:
            hypotheses = model.beam_search(model.initial_state_function, model.generate_function,
                                           input_sentence, input_sentence_rev, start_id=corpus.dic.token2id['<start>'],
                                           end_id=corpus.dic.token2id['<eos>'], emo_label_id=emo_label_id,
                                           topic_label_id=topic_label_id)
            for hypothesis in hypotheses:
                generated_indices = hypothesis.to_sequence_of_values()
                generated_tokens = [corpus.dic[i] for i in generated_indices]
                print("--> ", " ".join(generated_tokens))
        else:
            sentence = model.generate(input_sentence, input_sentence_rev, sentence_limit=len(input_sentence) + 20,
                                      emo_label_id=emo_label_id, topic_label_id=topic_label_id,
                                      word2id=corpus.dic.token2id, id2word=corpus.dic)
        print("-> ", sentence)
        print('')
Ejemplo n.º 37
0
#try to load from here
item_path = "./items"
item_files = glob.glob(item_path + "/*")
image_file = args.path

labels = []

input_height, input_width = (224, 224)
for i, item in enumerate(item_files):
    labels.append(item.split("/")[-1].split(".")[0])
    print(item.split("/")[-1].split(".")[0])

weight_file_test = "./backup/1001.model"
model_test = Darknet19Predictor(Darknet19())
serializers.load_hdf5(weight_file_test, model_test)  # load saved model
model_test.predictor.train = False

# read image and process on it
img1 = cv2.imread(image_file)
img2 = cv2.imread("./items/schweps.png")
img3 = cv2.imread("./items/coca-zero.png")

# forward
x = []
img_list = []

img_list.append(img1)
img_list.append(img2)
img_list.append(img3)
cv2.imshow('test feed', img1)
Ejemplo n.º 38
0
n_epoch = 81

# 保存先をチェックする
if os.path.exists('models/' + args.dir):
    print('selected dir exists!')
else:
    print('selected dir does not exits! make dir.')
    os.mkdir('models/' + args.dir)

# 各種必要なパラメータを読み込み

# VGGmodelを読み込む
if args.usevgg == 1:
    print('loading VGG model...')
    vgg = VGGNet()
    serializers.load_hdf5('/tmp/VGG.model', vgg)
    print('loaded VGG!')


# ############学習の高速化のためにパラメータを事前に整理しておく###################
if args.usevgg == 1:
    print('preprocessing vgg data')
    vgg_img_param = []

    start = time.time()
    if os.path.isfile('features/vggparam_leather.pickle'):
        print('vgg checkpoint pickle is exist! loading pickle')
        with open('features/vggparam_leather.pickle', mode='rb') as f:
            vgg_img_param = pickle.load(f)
    else:
        for filename in filenames:
Ejemplo n.º 39
0
vocab_size = 3000
seq_length = 30
start_token = 0

if args.ae_pretrain:
    encoder = SeqEncoder(vocab_size=vocab_size, emb_dim=args.gen_emb_dim, hidden_dim=args.gen_hidden_dim,
                         sequence_length=seq_length)
else:
    encoder = None

# generator
generator = SeqGAN(vocab_size=vocab_size, emb_dim=args.gen_emb_dim, hidden_dim=args.gen_hidden_dim,
                   sequence_length=seq_length, start_token=start_token, lstm_layer=args.num_lstm_layer,
                   dropout=args.dropout, free_pretrain=args.free_pretrain, encoder=encoder).to_gpu()
if args.gen:
    serializers.load_hdf5(args.gen, generator)

# discriminator
discriminator = TextCNN(num_classes=2, vocab_size=vocab_size, embedding_size=args.dis_embedding_dim,
                        filter_sizes=[int(n) for n in args.dis_filter_sizes.split(',')],
                        num_filters=[int(n) for n in args.dis_num_filters.split(',')]
                        ).to_gpu()
if args.dis:
    serializers.load_hdf5(args.dis, discriminator)

# set optimizer
if args.ae_pretrain:
    enc_optimizer = optimizers.Adam(alpha=args.gen_lr)
    enc_optimizer.setup(encoder)
    enc_optimizer.add_hook(chainer.optimizer.GradientClipping(args.gen_grad_clip))
Ejemplo n.º 40
0
sty_len[sty_idx] = lengths

# Count all token frequencies
tok_idx, freq = np.unique(flattened, return_counts=True)
term_frequency = np.zeros(n_vocab, dtype='int32')
term_frequency[tok_idx] = freq

model = LDA2Vec(n_stories=n_stories,
                n_story_topics=n_story_topics,
                n_units=n_units,
                n_vocab=n_vocab,
                counts=term_frequency,
                n_samples=15)
if os.path.exists('lda2vec.hdf5'):
    print "Reloading from saved"
    serializers.load_hdf5("lda2vec.hdf5", model)
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
clip = chainer.optimizer.GradientClipping(5.0)
optimizer.add_hook(clip)

author_id = story_id
j = 0
epoch = 0
fraction = batchsize * 1.0 / flattened.shape[0]
for epoch in range(5000):
    ts = prepare_topics(
        cuda.to_cpu(model.mixture_sty.weights.W.data).copy(),
        cuda.to_cpu(model.mixture_sty.factors.W.data).copy(),
        cuda.to_cpu(model.sampler.W.data).copy(), words)
Ejemplo n.º 41
0
momentum = 0.9
weight_decay = 0.0005

# load image generator
print("loading image generator...")
generator = ImageGenerator(item_path, background_path)

with open(label_file, "r") as f:
    labels = f.read().strip().split("\n")

# load model
print("loading model...")
model = Darknet19Predictor(Darknet19())
backup_file = "%s/backup.model" % (backup_path)
if os.path.isfile(backup_file):
    serializers.load_hdf5(initial_weights_file, model)  # load saved model
model.predictor.train = True
model.predictor.finetune = True
cuda.get_device(0).use()
model.to_gpu()  # for gpu

optimizer = optimizers.MomentumSGD(lr=learning_rate, momentum=momentum)
optimizer.use_cleargrads()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay))

# start to train
print("start training")
for batch in range(max_batches):
    # generate sample
    x, t = generator.generate_samples(n_samples=batch_size,
Ejemplo n.º 42
0
                transport,
                n_update,
                k=k,
                lr=args.lr,
                optmode=args.optmode)
            fileobj.write("n_update:{}\n".format(n_update))
            fileobj.write("IS:{}pm{}\n".format(inception_mean, inception_std))
            fileobj.write("FID:{}\n\n".format(fid))


if __name__ == '__main__':
    args = parse_args()
    if not os.path.exists("scores"):
        os.mkdir("scores")
    evmodel = Inception()
    serializers.load_hdf5('metric/inception_score.model', evmodel)
    G, D, data = load_GD(args.G, args.D)
    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        evmodel.to_gpu()
        G.to_gpu()
        D.to_gpu()
    G, D = DOT.thermalize_spectral_norm(G, D)
    if args.k == None:
        k = DOT.eff_k(G, D)
    else:
        k = args.k * xp.ones([1])
    main(args,
         G,
         D,
         data,