Exemplo n.º 1
0
 def image_grab(id):
     try:
     new_image = get_image(id,path=coco_dir+'/processed_flatten/')
     except IOError:
         new_image = predict_image(id)[1]
     try:
         new_class = get_image(id,path=coco_dir+'/processed_predictions/')
     except IOError:
         new_class = predict_image(id)[2]
Exemplo n.º 2
0
 def image_grab(id):
     try:
         new_image = get_image(id, path='/extra' + '/processed_flatten/')
     except IOError:
         new_image = predict_image(id)[1]
     try:
         new_class = get_image(id,
                               path='/extra' + '/processed_predictions/')
     except IOError:
         new_class = predict_image(id)[2]
     return new_class, new_image
Exemplo n.º 3
0
    def image_grab(id):

        #word_to_idx, idx_to_word = load_refexp_dicts()

        #classes, images, alt_classes,alt_images, partial_captions, next_words_one_hot, \
        #  vocab_size, idx_to_word, word_to_idx = load_stream(stream_num=cur_stream_num, stream_size=stream_size, preprocess=preproc,
        #                                                     max_caption_len=max_caption_len, word_to_idx=word_to_idx)

        #return images[:1],classes[:1],alt_images[:1],alt_classes[:1]
        try:
            new_image = get_image(id, path='/extra' + '/processed_flatten/')
        except IOError:
            new_image = predict_image(id)[1]
        try:
            new_class = get_image(id,
                                  path='/extra' + '/processed_predictions/')
        except IOError:
            new_class = predict_image(id)[2]
        return new_class, new_image
Exemplo n.º 4
0
def load_stream(stream_num, stream_size, preprocess, max_caption_len,
                word_to_idx):
    # Preprocess the data if necessary
    if preprocess:
        preprocess_refexp_images(stream_num=stream_num,
                                 stream_size=stream_size,
                                 word_to_idx=word_to_idx,
                                 max_cap_len=max_caption_len,
                                 coco_dir=coco_dir,
                                 out_file=data_path)

    with open(data_path, 'rb') as handle:
        X, next_words = pickle.load(handle)

    if False:

        new_X1 = []
        new_X0 = []
        new_y = []
        for i, x in enumerate(X[1]):
            if x[-5] == 0 and x[-6] != 0:
                new_X1.append(x[:-5])
                new_X0.append(X[0][i])
                new_y.append(next_words[i])
        next_words = np.asarray(new_y)
        X[0] = np.asarray(new_X0)
        X[1] = np.asarray(new_X1)

    print([([idx_to_word[x] for x in X[1][p]], idx_to_word[next_words[p]])
           for (p, q) in enumerate(X[1][:50])])

    image_ids = X[0]
    #bounding_boxes = [x[1] for x in image_ids]
    #image_ids = [x[0] for x in image_ids]
    partial_captions = X[1]
    vocab_size = len(word_to_idx)

    # Load the CNN feature representation of each image
    images = []
    classes = []
    alt_images = []
    alt_classes = []
    for image_id in image_ids:
        number = str(('_0000000000000' + str(image_id[0]))[-12:])

        try:
            x_whole = get_image(number, path='/extra' + '/processed_flatten/')
            class_whole = get_image(number,
                                    path='/extra' + '/processed_predictions/')
            x_region = get_image(number,
                                 path='/extra' + '/processed_flatten_b/')
            class_region = get_image(number,
                                     path='/extra' +
                                     '/processed_predictions_b/')

            images.append(x_region)
            classes.append(class_region)
            alt_images.append(x_whole)
            alt_classes.append(class_whole)
        except IOError:
            x = predict_image(image_id[0], image_id[1])
            images.append(x[4])
            classes.append(x[5])
            alt_images.append(x[1])
            alt_classes.append(x[2])

    images = np.asarray(images).transpose((1, 0, 2))[0]
    alt_images = np.asarray(alt_images).transpose((1, 0, 2))[0]
    classes = np.asarray(classes).transpose((1, 0, 2))[0]
    alt_classes = np.asarray(alt_classes).transpose((1, 0, 2))[0]

    # Convert next words to one hot vectors
    new_next_words = []
    for x in next_words:
        a = np.zeros(vocab_size)
        a[x] = 1
        new_next_words.append(a)
    next_words_one_hot = np.asarray(new_next_words)

    return classes, images, alt_classes, alt_images, partial_captions, next_words_one_hot, \
        vocab_size, idx_to_word, word_to_idx
Exemplo n.º 5
0
    # print([idx_to_word[n] if n!=0 else "null" for n in y],"NEXT_WORDS")

    # print("SANITY CHECK",idx_to_word[word_to_idx['the']],word_to_idx[idx_to_word[1]])

    # print([idx_to_word[x] if x!=0 else "null" for x in X[1][0]])
    # print(idx_to_word[y[0]])

    images = []
    for image_id in image_ids:
        number = str(('_0000000000000' + str(image_id))[-12:])

        try:
            x = get_image(number, path='../external/coco/processed/')
        except IOError:
            x = predict_image(str(image_id))

        images.append(x)

    X[0] = np.asarray(images).transpose((1, 0, 2))[0]
    partial_captions = X[1]  # (batch_size,16)
    max_caption_len = partial_captions.shape[1]

    next_words = y  # vocab_size
    new_next_words = []
    for x in next_words:
        # print x
        a = np.zeros(vocab_size)
        a[x] = 1
        new_next_words.append(a)
    next_words = np.asarray(new_next_words)
Exemplo n.º 6
0
        cap = ['$START$']
        while len(cap) < max_caption_len:
            result = model.predict([new_class,new_image, words_to_caption(cap,word_to_idx,max_caption_len)])
            out = idx_to_word[np.argmax(result[0])]
            cap.append(out)
        return cap

    print(unroll('000000000431'))



    p = '000000000431'
    try:
        new_image = get_image(p,path=coco_dir+'/processed_flatten/')
    except IOError:
        new_image = predict_image(p)[1]
    try:
        new_class = get_image(p,path=coco_dir+'/processed_predictions/')
    except IOError:
        new_class = predict_image(p)[2]
    #new_image = np.zeros((1,num_img_features))
    cap = ['$START$']
    while len(cap) < max_caption_len:
        result = model.predict([new_class,new_image, words_to_caption(cap,word_to_idx,max_caption_len)])
        #m = max(result[0])
        # print(result)
        #out = idx_to_word[[i for i, j in enumerate(result[0]) if j == m][0]]
        
        out = idx_to_word[np.argmax(result[0])]
        cap.append(out)
        print(cap)
    # # # cap =

    # # # print(words_to_caption(cap,word_to_idx))
    # # result = model.predict([new_image, inp])
    # result2 = model.predict([new_image, words_to_caption(cap,word_to_idx, max_caption_len)])
    # # print("EQUAL?",np.array_equal(result,result2))
    # print(result[0][np.argmax(result[0])],"PROB DIST")
    # result = idx_to_word[np.argmax(result[0])]
    # # print(result)

    #new_image = images[0].reshape((1, num_img_features))
    try:
        new_image = get_image('000000000036',
                              path=coco_dir + '/processed_flatten/')
    except IOError:
        new_image = predict_image('000000000036')[1]
    #new_image = np.zeros((1,num_img_features))
    cap = ['$START$']
    while len(cap) < max_caption_len:
        result = model.predict(
            [new_image,
             words_to_caption(cap, word_to_idx, max_caption_len)])
        m = max(result[0])
        # print(result)
        out = idx_to_word[[i for i, j in enumerate(result[0]) if j == m][0]]
        # out = idx_to_word[sample(result[0])]
        cap.append(out)
        print(cap)
        # if out == STOP_TOKEN:
        #     break
def load_stream(stream_num, stream_size, preprocess, max_caption_len, word_to_idx):
    # Preprocess the data if necessary
    if preprocess:
        preprocess_captioned_images(stream_num=stream_num, stream_size=stream_size, word_to_idx=word_to_idx,
                                    max_cap_len=max_caption_len, coco_dir=coco_dir, category_name='person',
                                    out_file=data_path)

    with open(data_path, 'rb') as handle:
        X, next_words = pickle.load(handle)

    if False:

        new_X1 = []
        new_X0 = []
        new_y = []
        for i,x in enumerate(X[1]):
            if x[-5] == 0 and x[-6] != 0:
                new_X1.append(x[:-5])
                new_X0.append(X[0][i])
                new_y.append(next_words[i])
	next_words = np.asarray(new_y)
	X[0] = np.asarray(new_X0)
	X[1] = np.asarray(new_X1)

    print([([idx_to_word[x] for x in X[1][p]],idx_to_word[next_words[p]]) for (p,q) in enumerate(X[1][:50])])

    image_ids = X[0]
    partial_captions = X[1]
    vocab_size = len(word_to_idx)



    # Load the CNN feature representation of each image
    images = []
    for image_id in image_ids:
        number = str(('_0000000000000'+str(image_id))[-12:])

        try:
            x = get_image(number,path=coco_dir+'/processed/')
        except IOError:
            x = predict_image(str(image_id))[0]

        images.append(x)

    images = np.asarray(images).transpose((1,0,2))[0]

    classes = []
    for image_id in image_ids:
        number = str(('_0000000000000'+str(image_id))[-12:])

        try:
            x = get_image(number,path='../external/coco/processed_predictions/')
        except IOError:
            x = predict_image(str(image_id))[2]

        classes.append(x)

    classes = np.asarray(classes).transpose((1,0,2))[0]

    # Convert next words to one hot vectors
    new_next_words = []
    for x in next_words:
        a = np.zeros(vocab_size)
        a[x] = 1
        new_next_words.append(a)
    next_words_one_hot = np.asarray(new_next_words)

    return classes, images, partial_captions, next_words_one_hot, \
        vocab_size, idx_to_word, word_to_idx
    # # # cap = 

    # # # print(words_to_caption(cap,word_to_idx))
    # # result = model.predict([new_image, inp])
    # result2 = model.predict([new_image, words_to_caption(cap,word_to_idx, max_caption_len)])
    # # print("EQUAL?",np.array_equal(result,result2))
    # print(result[0][np.argmax(result[0])],"PROB DIST")
    # result = idx_to_word[np.argmax(result[0])]
    # # print(result)

    #new_image = images[0].reshape((1, num_img_features))
    p = '000000000094'
    try:
        new_image = get_image(p,path=coco_dir+'/processed/')
    except IOError:
        new_image = predict_image(p)[0]
    try:
        new_class = get_image(p,path=coco_dir+'/processed_predictions/')
    except IOError:
        new_class = predict_image(p)[2]
    #new_image = np.zeros((1,num_img_features))
    cap = ['$START$']
    while len(cap) < max_caption_len:
        result = model.predict([new_class,new_image, words_to_caption(cap,word_to_idx,max_caption_len)])
        m = max(result[0])
        # print(result)
        out = idx_to_word[[i for i, j in enumerate(result[0]) if j == m][0]]
        # out = idx_to_word[sample(result[0])]
        cap.append(out)
        print(cap)
        # if out == STOP_TOKEN: