def cifar_generator():
    """
    X_data : (data_size, 32, 32, 3)
    y_label : (data_size, 100)
    batch_size : batch size
    """
    (X_train, y_train), (X_test, y_test) = c100.load_data()  # Load Data
    y_train, y_test = prepare_output_data(y_train, y_test)  # prepare y_label
    data_size = len(X_train)
    batch_iter_per_epoch = int(data_size / batch_size)
    while True:
        shuffle_idx = perm(np.arange(len(X_train)))
        print('\n')
        print("*" * 30)
        print("Data Size : {}".format(data_size))
        print("Batch Size : {}".format(batch_size))
        print("Batch iterations per Epoch : {}".format(batch_iter_per_epoch))
        print("*" * 30)
        print(shuffle_idx[0:10])

        for b in range(batch_iter_per_epoch):
            batch_features = np.zeros((batch_size, inp_w, inp_h, inp_c))
            batch_labels = np.zeros((batch_size, 100))

            for b_i, i in enumerate(
                    range(b * batch_size, b * batch_size + batch_size)):
                # choose random index in features
                batch_features[b_i] = preprocess(X_train[shuffle_idx[i]],
                                                 color_type='RGB')
                batch_labels[b_i] = y_train[shuffle_idx[i]]
            yield batch_features, batch_labels

    print("Done Generator")
示例#2
0
def _batch(chunk, is_test=False):
    """
	Takes a chunk of parsed annotations
	returns value for placeholders of net's 
	input & loss layer correspond to this chunk
	chunk : ['006098.jpg', [375, 500, [['boat', 92, 74, 292, 178], ['bird', 239, 88, 276, 133], ['bird', 93, 100, 142, 140]]]]
	"""
    S, B = cfg.cell_size, cfg.boxes_per_cell
    C, labels = cfg.num_classes, cfg.classes_name

    # preprocess
    jpg = chunk[0]
    w, h, allobj_ = chunk[1]
    allobj = deepcopy(allobj_)
    if not is_test:
        path = os.path.join(cfg.imageset_location, jpg)
    else:
        path = os.path.join(cfg.test_imageset_location, jpg)
    img = preprocess(path, allobj)

    # Calculate regression target
    cellx = 1. * w / S
    celly = 1. * h / S
    for obj in allobj:
        centerx = .5 * (obj[1] + obj[3])  #xmin, xmax
        centery = .5 * (obj[2] + obj[4])  #ymin, ymax
        cx = centerx / cellx
        cy = centery / celly
        if cx >= S or cy >= S: return None, None
        obj[3] = float(obj[3] - obj[1]) / w
        obj[4] = float(obj[4] - obj[2]) / h
        obj[3] = np.sqrt(obj[3])
        obj[4] = np.sqrt(obj[4])
        obj[1] = cx - np.floor(cx)  # centerx
        obj[2] = cy - np.floor(cy)  # centery
        obj += [int(np.floor(cy) * S + np.floor(cx))]

    # show(im, allobj, S, w, h, cellx, celly) # unit test

    # Calculate placeholders' values
    probs = np.zeros([S * S, C])
    confs = np.zeros([S * S, B])
    coord = np.zeros([S * S, B, 4])
    proid = np.zeros([S * S, C])
    prear = np.zeros([S * S, 4])
    for obj in allobj:
        # print(type(obj), obj) # <class 'list'> ['horse', 0.024000000000000021, 0.48952095808383245, 0.92303846073714613, 0.85995404416970578, 24]
        probs[obj[5], :] = [0.] * C
        probs[obj[5], labels.index(obj[0])] = 1.
        proid[obj[5], :] = [1] * C
        coord[obj[5], :, :] = [obj[1:5]] * B
        prear[obj[5], 0] = obj[1] - obj[3]**2 * .5 * S  # xleft
        prear[obj[5], 1] = obj[2] - obj[4]**2 * .5 * S  # yup
        prear[obj[5], 2] = obj[1] + obj[3]**2 * .5 * S  # xright
        prear[obj[5], 3] = obj[2] + obj[4]**2 * .5 * S  # ybot
        confs[obj[5], :] = [1.] * B

    # Finalise the placeholders' values
    upleft = np.expand_dims(prear[:, 0:2], 1)
    botright = np.expand_dims(prear[:, 2:4], 1)
    wh = botright - upleft
    area = wh[:, :, 0] * wh[:, :, 1]
    upleft = np.concatenate([upleft] * B, 1)
    botright = np.concatenate([botright] * B, 1)
    areas = np.concatenate([area] * B, 1)

    # value for placeholder at input layer
    inp_feed_val = img
    # value for placeholder at loss layer
    loss_feed_val = {
        'probs': probs,
        'confs': confs,
        'coord': coord,
        'proid': proid,
        'areas': areas,
        'upleft': upleft,
        'botright': botright
    }

    return inp_feed_val, loss_feed_val
示例#3
0
frameNum = -1
items = []
cap = cv2.VideoCapture(video_path)
try:
    while True:
        ret, frame = cap.read()
        frameNum += 1

        if frameNum > maxFrame or not ret:
            break

        if frameNum % frameSteps != 0:
            continue

        print("FrameNum : {}".format(frameNum))
        img = preprocess(frame)
        batch = np.expand_dims(img, axis=0)
        net_out = model.predict(batch)
        out_img, objects, is_object = post_progress(net_out[0],
                                                    im=frame,
                                                    is_save=False,
                                                    threshold=test_threshold)

        if is_object:
            for each_object in objects:
                items.append(
                    DB_Item(videoId, frameNum, each_object[0], each_object[1],
                            each_object[2], each_object[3], each_object[4],
                            each_object[5]))

        if len(items) >= 10:
示例#4
0
# # In[7]:

# imagePath = './test/my_testset/001618.jpg'
# imagePath = './test/my_testset/000892.jpg'
# imagePath = './test/my_testset/000906.jpg'
# imagePath = './test/my_testset/000467.jpg'
# imagePath = './test/my_testset/000386.jpg'
# imagePath = './test/my_testset/many_person.jpg'
imagePath = './test/my_testset/person.jpg'
# imagePath = './test/my_testset/person_car3.jpg'
# imagePath = './test/my_testset/person.jpg'
# imagePath = './test/my_testset/car1.jpg'
image = cv2.imread(imagePath)
print("1", image.shape)
image = preprocess(image)

batch = np.expand_dims(image, axis=0)
print("4", batch.shape)

# # In[8]:

# out = model.predict(batch)
# print("5", out.shape)

# # In[9]:

# out_img = post_progress(out[0], im=image, is_save=False, threshold=0.1)
# print("6", out_img.shape)
# out_img = cv2.cvtColor(out_img, cv2.COLOR_BGR2RGB)
# plt.imshow(out_img)