예제 #1
0
        def _get_one_triplet(input_data, input_labels):
            input_labels = np.array(input_labels)
            index = np.random.choice(n_labels, 2, replace=False)
            label_positive = index[0]
            label_negative = index[1]

            indexes = utils.get_index(input_labels, index[0])
            np.random.shuffle(indexes)
            # print(indexes[0])
            data_anchor = input_data[indexes[0], :, :, :]
            data_anchor = utils.prewhiten(data_anchor)
            data_anchor = utils.flip(data_anchor, random_flip=True)
            data_anchor = utils.random_crop(data_anchor, image_size=299)
            data_anchor = utils.random_rotate_image(data_anchor)

            data_positive = input_data[indexes[1], :, :, :]
            data_positive = utils.prewhiten(data_positive)
            data_positive = utils.flip(data_positive, random_flip=True)
            data_positive = utils.random_crop(data_positive, image_size=299)
            data_positive = utils.random_rotate_image(data_positive)

            indexes = utils.get_index(input_labels, index[1])
            # print(indexes)
            np.random.shuffle(indexes)
            data_negative = input_data[indexes[0], :, :, :]
            data_negative = utils.prewhiten(data_negative)
            data_negative = utils.flip(data_negative, random_flip=True)
            data_negative = utils.random_crop(data_negative, image_size=299)
            data_negative = utils.random_rotate_image(data_negative)
            # print(np.shape(data_negative))


            return data_anchor, data_positive, data_negative, \
                   label_positive, label_positive, label_negative
def correct_img_torch(x_s, scale, r, s, device, for_dag = True, eps = 1e-9, pad='circular'):
    conv_shape = (s.shape[2] + r.shape[2] - 1, s.shape[3] + r.shape[3] - 1)
    S = utils.fft2(s/s.sum(), conv_shape[1], conv_shape[0])
    R = utils.fft2(utils.flip(r)/r.sum(), conv_shape[1], conv_shape[0])
    Q_unscaled = utils.mul_complex(R, S)
    q_unscaled = torch.irfft(Q_unscaled, signal_ndim=2, normalized=False, onesided=False)
    q = q_unscaled[:,:,np.mod(q_unscaled.shape[2], scale)::scale, np.mod(q_unscaled.shape[3], scale)::scale]
    Q = torch.rfft(q, signal_ndim=2, normalized=False, onesided=False)

    # Q_star = utils.conj(Q)
    # abs2_Q = utils.abs2(Q)
    # H = torch.cat( (Q_star[:,:,:,:,0:1]/(abs2_Q[:,:,:,:,0:1] + eps), Q_star[:,:,:,:,1:2]/(abs2_Q[:,:,:,:,0:1] + eps)), dim=4)

    H = utils.inv_complex(Q, eps)

    h_ = torch.irfft(H, signal_ndim=2, normalized=False, onesided=False)
    h = utils.roll_y(utils.roll_x(h_/h_.sum(), -1), -1)

    x_h = utils.filter_2D_torch(x_s, utils.flip(h), device, pad=pad)

    if(for_dag):
        x_h = utils.bicubic_up(x_h, scale, device)
        x_h = utils.downsample_bicubic(x_h, scale, device)

    return x_h
예제 #3
0
def _main_():
    args = argparser.parse_args()
    config_path = args.config
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())
    weights_path = args.weights_path
    sm_model = SMModel(config['model'])
    sm_model.model.summary()
    sm_model.model.load_weights(weights_path)
    test_generator = DataGenerator(config=config['test'],
                                   preprocessing=sm_model.preprocessing,
                                   n_class=sm_model.n_class,
                                   split='test')
    encoded_pixels = []
    image_id_class_id = []
    for X, filenames in tqdm(list(test_generator)):
        preds = sm_model.model.predict_on_batch(X)
        if config['test']['tta']:
            for flip_type in ['ud', 'lr', 'udlr']:
                X_temp = flip(X.copy(), flip_type)
                pred_temp = sm_model.model.predict_on_batch(X_temp)
                preds += flip(pred_temp, flip_type)
            preds /= 4
        preds = postprocess(preds, config['postprocess'], True)
        for i in range(len(preds)):
            for j in range(4):
                encoded_pixels.append(run_length_encode(preds[i, :, :, j]))
                image_id_class_id.append(filenames[i] + '_{}'.format(j + 1))
    df = pd.DataFrame(data=encoded_pixels,
                      index=image_id_class_id,
                      columns=['EncodedPixels'])
    df.index.name = 'ImageId_ClassId'
    df.to_csv('submission.csv')
예제 #4
0
    def valid_epoch(self):

        self.net.eval()
        gt = []
        pred = []
        for i, (X1, X2, y_true) in enumerate(self.verifyLoader):

            if cuda.is_available():
                X1, X2, y_true = X1.cuda(), X2.cuda(), y_true.cuda()

            feat1 = torch.cat([self.net(X1), self.net(flip(X1))],
                              dim=1).view(-1)
            feat2 = torch.cat([self.net(X2), self.net(flip(X2))],
                              dim=1).view(-1)
            # cosine = distCosine(feat1, feat2)

            dist = torch.exp(-torch.norm(feat1 - feat2)**2)

            gt += [y_true]
            pred += [dist.detach()]

        gt = torch.cat(list(map(lambda x: x, gt)), dim=0).cpu().numpy()
        pred = torch.cat(list(map(lambda x: x.unsqueeze(0), pred)),
                         dim=0).cpu().numpy()
        thresh, acc, f1 = cvSelectThreshold(pred, gt)

        return thresh, acc, f1
예제 #5
0
def rotateImageCallBack():
    data.matrix = utils.flip(data.matrix)
    data.frameA = utils.flip(data.frameA)
    data.frameB = utils.flip(data.frameB)
    img = Image.fromarray(data.matrix)
    data.image = img
    logScreen("Imagen rotada.")
    previewImageCallBack(1)
예제 #6
0
def main(datapath='../../data/lfw-Aligned',
         modelpath='../MobileFacenet_best.pkl',
         thresh=0.3):

    dataset = LFWPairs(datapath=datapath)
    dataloader = DataLoader(dataset)
    net = load_net(modelpath)  # TODO
    if cuda.is_available(): net.cuda()

    f = open("../../cosine_score_py.txt", 'w')
    gt = []
    pred = []
    print('\033[2J')
    print('\033[1;1H')

    net.eval()
    for i, (X1, X2, y_true) in enumerate(dataloader):

        if cuda.is_available():
            X1 = X1.cuda()
            X2 = X2.cuda()
            y_true = y_true.cuda()

        feat1 = torch.cat([net.get_feature(X1),
                           net.get_feature(flip(X1))],
                          dim=1).view(-1)
        feat2 = torch.cat([net.get_feature(X2),
                           net.get_feature(flip(X2))],
                          dim=1).view(-1)
        cosine = distCosine(feat1, feat2)

        gt += [y_true.cpu().numpy()]
        pred += [cosine.detach().cpu().numpy()]

        line = "{:d} {:d} {:f}\n".format(i, int(gt[i]), float(pred[i]))
        f.write(line)

        print('\033[0;0H\033[K')
        print('[{:4d}]/[{:4d}] {:s}'.format(i, len(dataset), line))

    gt = np.array(gt)
    pred = np.array(pred)
    pd = np.zeros(len(pred))
    pd[pred > thresh] = 1

    gt = gt.reshape(-1)
    pd = pd.reshape(-1)
    acc = accuracy_score(gt, pd)
    p = precision_score(gt, pd)
    r = recall_score(gt, pd)

    print('accuracy: {:.4f}, precision: {:.4f}, recall: {:.4f}'.format(
        acc, p, r))

    f.close()
예제 #7
0
    def new_split(self, rect):
        if rect.w > MIN_BSP_SIZE * 2 and rect.h > MIN_BSP_SIZE * 2:
            if utils.flip() and rect.x + rect.w / 2 + MIN_BSP_SIZE < len(
                    self.map) and rect.y + rect.h / 2 + MIN_BSP_SIZE < len(self.map[0]):

                x = libtcod.random_get_int(0, rect.x + MIN_BSP_SIZE, rect.x + rect.w - MIN_BSP_SIZE)
                w = (rect.x + rect.w) - x

                baby = Rect(rect.w - w, rect.h, rect.x, rect.y)
                baby.bsp(rect)
                baby_2 = Rect(w - 1, rect.h, x, rect.y)
                baby_2.bsp(rect)

                rect.babies.append(baby)
                rect.babies.append(baby_2)
            else:
                y = libtcod.random_get_int(0, rect.y + MIN_BSP_SIZE, rect.y + rect.h - MIN_BSP_SIZE)

                h = (rect.y + rect.h) - y

                baby = Rect(rect.w, rect.h - h, rect.x, rect.y)
                baby.bsp(rect)
                baby_2 = Rect(rect.w, h - 1, rect.x, y)
                baby_2.bsp(rect)

                rect.babies.append(baby)
                rect.babies.append(baby_2)
        else:
            rect.end = True

        for baby in rect.babies:
            if baby.end != True:
                self.new_split(baby)
예제 #8
0
 def epsilon_linear_policy(self, epsilon, w, s):
     best = np.argmax(
         [np.dot(w, self.phi(s, a)) for a in range(self.nactions)])
     if flip(epsilon):
         return pr.choice(self.actions)
     else:
         return best
예제 #9
0
    def split(self, rect):
        if rect.w - 1 >= MIN_BSP_SIZE * 1.5 or rect.h - 1 >= MIN_BSP_SIZE * 1.5:
            if utils.flip() and rect.w >= MIN_BSP_SIZE:
                x = libtcod.random_get_int(0, rect.x + MIN_BSP_SIZE, rect.x + rect.w - MIN_BSP_SIZE)
                w = (rect.x + rect.w) - x

                tries = 0
                while x + w >= len(self.map) or tries <= 3:
                    print x, w, " horz out of bounds"
                    x = rect.x + 1
                    w = rect.w / 2 - 1

                baby = Rect(rect.w - w, rect.h, rect.x, rect.y)
                baby.bsp(rect)
                baby_2 = Rect(w, rect.h, x, rect.y)
                baby_2.bsp(rect)

                rect.babies.append(baby)
                rect.babies.append(baby_2)
            else:
                if rect.w < MIN_BSP_SIZE:
                    y = libtcod.random_get_int(0, rect.y + MIN_BSP_SIZE, rect.y + rect.h - MIN_BSP_SIZE)
                    h = (rect.y + rect.h) - y

                    tries = 0
                    while y + h > len(self.map[0]) or tries <= 3:
                        print y, h, "vert out of bounds"
                        y = rect.y + 1
                        h = rect.h / 2

                    baby = Rect(rect.w, rect.h - h, rect.x, rect.y)
                    baby.bsp(rect)
                    rect.babies.append(baby)

                else:
                    y = libtcod.random_get_int(0, rect.y + MIN_BSP_SIZE, rect.y + rect.h - MIN_BSP_SIZE)

                    h = (rect.y + rect.h) - y

                    tries = 0
                    while y + h > len(self.map[0]) or tries <= 3:
                        print y, h, "vert out of bounds"
                        y = rect.y + 1
                        h = rect.h / 2

                    baby = Rect(rect.w, rect.h - h, rect.x, rect.y)
                    baby.bsp(rect)
                    baby_2 = Rect(rect.w, h, rect.x, y)
                    baby_2.bsp(rect)

                    rect.babies.append(baby)
                    rect.babies.append(baby_2)

        else:
            rect.end = True

        for baby in rect.babies:
            if baby.end != True:
                self.split(baby)
예제 #10
0
 def _read_image(im):
     im = cv2.imread(im)
     im = utils.prewhiten(im)
     im = utils.flip(im, random_flip=True)
     im = utils.random_crop(im, image_size=299)
     im = cv2.resize(im, (128, 128))
     im = utils.random_rotate_image(im)
     return im
예제 #11
0
def singleton_detection_nso(U_slice, **kwargs):
    n = kwargs.get("n")
    chunks = sign(np.reshape(U_slice, (len(U_slice) // (n + 1), n + 1)))
    chunks = (np.mod((chunks.T + chunks[:, 0]).T, 2)).astype(dtype=int)[:, 1:]
    choices = np.vstack(
        (np.sum(chunks, axis=0), np.sum([flip(c) for c in chunks], axis=0)))
    nso_k = np.argmin(choices, axis=0)
    return nso_k, 1
예제 #12
0
 def train_load_and_preprocess_from_path_label_c(self, path, label,
                                                 crop_bool, flip_bool):
     image = load_and_preprocess_image(path, self.IMG_X_SIZE,
                                       self.IMG_Y_SIZE)
     if flip_bool:
         image = flip(image)
     if crop_bool:
         image = crop(image, self.IMG_Y_SIZE, self.IMG_X_SIZE)
     return image, tf.one_hot(indices=[label], depth=self.N_CLASSES)[0]
예제 #13
0
    def extract_hidden_layers(self, input, hidden, index_target):

        input_f, input_b = input

        index_target_f = index_target
        index_target_b = len(input_f) - index_target + 1

        emb_f = self.drop(self.encoder(input_f[:index_target_f]))
        emb_b = self.drop(self.encoder(input_b[:index_target_b]))

        hidden_f = hidden[0]
        hidden_b = hidden[1]

        output_f, hidden_f = self.forward_lstm(emb_f, hidden_f)
        output_b, hidden_b = self.backward_lstm(emb_b, hidden_b)

        predictive_hidden_layers = []
        for i in range(len(hidden_f)):
            f = hidden_f[i][0]
            b = utils.flip(hidden_b[i][0], dim=0)
            output_i = torch.cat((f, b), dim=2)
            predictive_hidden_layers.append(output_i.squeeze(0).squeeze(0))

        hidden = self.init_hidden(1)

        emb_f = self.drop(self.encoder(input_f[:index_target_f + 1]))
        emb_b = self.drop(self.encoder(input_b[:index_target_b + 1]))

        hidden_f = hidden[0]
        hidden_b = hidden[1]

        output_f, hidden_f = self.forward_lstm(emb_f, hidden_f)
        output_b, hidden_b = self.backward_lstm(emb_b, hidden_b)

        current_hidden_layers = []
        for i in range(len(hidden_f)):
            f = hidden_f[i][0]
            b = utils.flip(hidden_b[i][0], dim=0)
            output_i = torch.cat((f, b), dim=2)
            current_hidden_layers.append(output_i.squeeze(0).squeeze(0))

        return predictive_hidden_layers, current_hidden_layers
예제 #14
0
    def __getitem__(self, index):
        images = self.loader(self.images[index])
        targets = torch.Tensor(self.boxes[index])

        if self.aug:
            images = photometric_distort(images)
            if random.random() < 0.5:
                images, targets = flip(images, targets)

        inputs = self.transform(images)
        width, height = images.size
        targets = self.target_transform(targets, (width, height))

        return inputs, targets
예제 #15
0
def process_csv_line(line):
    """
    Given a csv line, it loads the image and performs some modifications
    with the purpose of equalizing steering angles distribution.
    """
    img, angle = load_img_from_csv_line(line)

    # image perturbations
    img, angle = translate(img, angle)
    img = perturb_brightness(img)
    img = add_shadow(img)
    if do_flip():
        img, angle = flip(img, angle)

    return img, angle
예제 #16
0
def batch_generator(images, angles, augment_data= True, batch_size=64):
    batch_images = []
    batch_angles = []
    sample_count = 0
    while True:
        for i in np.random.permutation(images.shape[0]):
            center_path = 'data/'+images.iloc[i,0]
            left_path = 'data/'+images.iloc[i,1]
            right_path = 'data/'+images.iloc[i,2]
            center_path = center_path.replace(" ", "")
            left_path = left_path.replace(" ", "")
            right_path = right_path.replace(" ", "")

            center_image = utils.load_image(center_path)
            angle = float(angles.iloc[i])
            batch_images.append(center_image)
            batch_angles.append(angle)

            sample_count += 1

            if augment_data:
                flipped_image = utils.flip(center_path)
                flipped_angle = -1.0 * angle
                batch_images.append(flipped_image)
                batch_angles.append(flipped_angle)

                tint_image = utils.tint_image(center_path)
                tint_angle = angle
                batch_images.append(tint_image)
                batch_angles.append(tint_angle)

                jittered_image, jitter_angle = utils.jitter_image(center_path,angle)
                batch_images.append(jittered_image)
                batch_angles.append(jitter_angle)

                left_image = utils.load_image(left_path)
                left_angle = min(1.0, angle+ SIDE_STEERING_CONSTANT)
                batch_images.append(left_image)
                batch_angles.append(left_angle)

                right_image = utils.load_image(right_path)
                right_angle = max(-1.0, angle - SIDE_STEERING_CONSTANT)
                batch_images.append(right_image)
                batch_angles.append(right_angle)

            if ((sample_count%batch_size == 0) or (sample_count % len(images)==0)):
                yield np.array(batch_images),np.array(batch_angles)
                batch_angles, batch_images = [], []
예제 #17
0
    def forward(self, input, hidden):
        input_f, input_b = input
        emb_f = self.drop(self.encoder(input_f))
        emb_b = self.drop(self.encoder(input_b))

        hidden_f = hidden[0]
        hidden_b = hidden[1]

        output_f, hidden_f = self.forward_lstm(emb_f, hidden_f)
        output_b, hidden_b = self.backward_lstm(emb_b, hidden_b)

        output = output_f + utils.flip(
            output_b, dim=0)  # output is sum of forward and backward

        decoded = self.decoder(
            output.view(output.size(0) * output.size(1), output.size(2)))
        return decoded.view(output.size(0), output.size(1),
                            decoded.size(1)), (hidden_f, hidden_b)
예제 #18
0
def saveImageThermalCallBack():
    root.filename = tkinter.filedialog.asksaveasfilename(
        initialdir=dir,
        title="Select file",
        filetypes=(("pdf files", "*.pdf"), ("all files", "*.*")))
    data.filename = root.filename
    thermal_matrix = np.array(data.thermal_matrix)
    numrows, numcols = thermal_matrix.shape

    def format_coord(x, y):
        z = -150.0 + thermal_matrix[y, x] * 200 / 255.0
        return z

    imgt = Image.fromarray(thermal_matrix)

    y = np.arange(0, numrows - 1, 1)
    x = np.arange(0, numcols - 1, 1)
    X, Y = np.meshgrid(x, y)

    # Calculating the output and storing it in the array Z
    Z = format_coord(X, Y)
    plt.figure(figsize=(8, 6))
    img = plt.imshow(utils.flip(Z),
                     cmap='gnuplot2',
                     interpolation='none',
                     origin='upper')
    plt.grid(True)
    plt.title("Temperatura ($^\circ$C )")
    plt.colorbar(img)
    plt.savefig(root.filename, dpi=300)
    plt.show()
    '''
    fig, ax = plt.subplots(dpi = 300)
    cax = ax.imshow(imgt, cmap='jet')
    ax.format_coord = format_coord
    ax.grid(True)
    cbar = fig.colorbar(cax)
    '''
    logScreen("Imagen guardada en " + root.filename)
예제 #19
0
    def new_split(self, rect):
        if rect.w > MIN_BSP_SIZE * 2 and rect.h > MIN_BSP_SIZE * 2:
            if utils.flip() and rect.x + rect.w / 2 + MIN_BSP_SIZE < len(
                    self.map) and rect.y + rect.h / 2 + MIN_BSP_SIZE < len(
                        self.map[0]):

                x = libtcod.random_get_int(0, rect.x + MIN_BSP_SIZE,
                                           rect.x + rect.w - MIN_BSP_SIZE)
                w = (rect.x + rect.w) - x

                baby = Rect(rect.w - w, rect.h, rect.x, rect.y)
                baby.bsp(rect)
                baby_2 = Rect(w - 1, rect.h, x, rect.y)
                baby_2.bsp(rect)

                rect.babies.append(baby)
                rect.babies.append(baby_2)
            else:
                y = libtcod.random_get_int(0, rect.y + MIN_BSP_SIZE,
                                           rect.y + rect.h - MIN_BSP_SIZE)

                h = (rect.y + rect.h) - y

                baby = Rect(rect.w, rect.h - h, rect.x, rect.y)
                baby.bsp(rect)
                baby_2 = Rect(rect.w, h - 1, rect.x, y)
                baby_2.bsp(rect)

                rect.babies.append(baby)
                rect.babies.append(baby_2)
        else:
            rect.end = True

        for baby in rect.babies:
            if baby.end != True:
                self.new_split(baby)
예제 #20
0
def generate_hdf5(ftxt, output, fname, argument=False):

    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []

    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert(img is not None)
        logger("process %s" % imgPath)
        # F
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom+1,f_bbox.left:f_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(f_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (39, 39))
            F_imgs.append(face_flipped.reshape((1, 39, 39)))
            F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), 5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), -5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

        # EN
        # en_bbox = bbox.subBBox(-0.05, 1.05, -0.04, 0.84)
        # en_face = img[en_bbox.top:en_bbox.bottom+1,en_bbox.left:en_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark)

        # NM
        # nm_bbox = bbox.subBBox(-0.05, 1.05, 0.18, 1.05)
        # nm_face = img[nm_bbox.top:nm_bbox.bottom+1,nm_bbox.left:nm_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape((1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)

    #imgs, landmarks = process_images(ftxt, output)

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs),np.asarray(NM_landmarks)

    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)

    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
def main(args):

    IMG_SIZE = args.IMG_SIZE
    print("Image size: " + str(IMG_SIZE))

    save_dir = '../Data/' + str(IMG_SIZE)
    landmark_save_dir = '../Data/' + str(IMG_SIZE) + '/landmark'
    record_save_dir = '../Data/' + str(IMG_SIZE) + '/landmark_record.pkl'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    if not os.path.exists(landmark_save_dir):
        os.mkdir(landmark_save_dir)

    f = open(RECORD_PATH, 'rb')
    info = pkl.load(f)
    f.close()

    idx = 0
    aug_list = []

    for i in range(len(info)):

        pic_info = info[i]
        image_name = pic_info[0]
        roi = pic_info[1]
        landmark = pic_info[2]

        print("Processing the image: " + image_name)
        print(str(idx) + " images generated. ")

        image = cv2.imread(
            os.path.join('../Data/CelebA/Img/img_celeba', image_name))
        height, width, channel = image.shape

        # Crop out the face area
        x1, y1, x2, y2 = roi
        face = image[y1:y2, x1:x2]
        w = x2 - x1 + 1
        h = y2 - y1 + 1
        # Calculate the relative position of each landmark
        transfered_landmark = np.zeros((6, 2))
        for k in range(6):
            transfered_landmark[k][0] = (landmark[k * 2] - x1) / (x2 - x1)
            transfered_landmark[k][1] = (landmark[k * 2 + 1] - y1) / (y2 - y1)
        '''Verify
        temp = face.copy()
        for k in range(6):
            cv2.circle(temp, (int(transfered_landmark[k][0] * (x2 - x1)), int(transfered_landmark[k][1] * (y2 - y1))), 2, (0, 255, 0), 1)
        cv2.imshow('t', temp)
        cv2.waitKey()
        cv2.destroyAllWindows() # Verified
        del temp
        '''
        # Save the resized face image
        resized_face = cv2.resize(face, (IMG_SIZE, IMG_SIZE),
                                  interpolation=cv2.INTER_LINEAR)
        saving_name = str(idx) + '.jpg'
        saving_path = os.path.join(landmark_save_dir, saving_name)
        success = cv2.imwrite(saving_path, resized_face)
        if not success:
            raise Exception("Landmark picture " + str(idx) +
                            " saving failed. ")
        aug_list.append([
            'landmark/' + saving_name, -2,
            np.array([-1, -1, -1, -1]),
            np.squeeze(np.array(transfered_landmark.reshape(1, -1)))
        ])
        idx = idx + 1

        # Mirror
        flipped_face, flipped_landmark = flip(face, transfered_landmark)
        '''Verify
        temp = flipped_face.copy()
        for k in range(6):
            cv2.circle(temp, (int(flipped_landmark[k][0] * (x2 - x1)), int(flipped_landmark[k][1] * (y2 - y1))), 2, (0, 255, 0), 1)
        cv2.imshow('t', temp)
        cv2.waitKey()
        cv2.destroyAllWindows() # Verified
        del temp
        '''
        # Save the resized face image
        resized_flipped_face = cv2.resize(flipped_face, (IMG_SIZE, IMG_SIZE),
                                          interpolation=cv2.INTER_LINEAR)
        saving_name = str(idx) + '.jpg'
        saving_path = os.path.join(landmark_save_dir, saving_name)
        success = cv2.imwrite(saving_path, resized_flipped_face)
        if not success:
            raise Exception("Landmark picture " + str(idx) +
                            " saving failed. ")
        aug_list.append([
            'landmark/' + saving_name, -2,
            np.array([-1, -1, -1, -1]),
            np.squeeze(np.array(flipped_landmark.reshape(1, -1)))
        ])
        idx = idx + 1

        # Drop the faces that are too small or exceed the boundaries
        if max(w, h) < 40 or x1 < 0 or y1 < 0:
            continue

        # Augment the cropped face
        for j in range(10):

            # Randomly pick a new size & shifts
            new_size = np.random.randint(
                int(min(w, h) * 0.8),
                np.ceil(max(w, h) * 1.25))  # new_size = width/height - 1
            delta_x = np.random.randint(-w * 0.15, w * 0.15)
            delta_y = np.random.randint(-h * 0.15, h * 0.15)

            new_x1 = int(max(0, x1 + w / 2 - new_size / 2 + delta_x))
            new_y1 = int(max(0, y1 + h / 2 - new_size / 2 + delta_y))
            new_x2 = new_x1 + new_size
            new_y2 = new_y1 + new_size

            if new_x2 > width or new_y2 > height:
                continue

            crop_box = np.array([new_x1, new_y1, new_x2, new_y2])
            iou = IoU(crop_box, np.array(roi).reshape(1, -1))

            if iou > 0.65:

                cropped_image = image[new_y1:new_y2 + 1, new_x1:new_x2 + 1]
                resized_image = cv2.resize(cropped_image, (IMG_SIZE, IMG_SIZE),
                                           interpolation=cv2.INTER_LINEAR)
                transfered_landmark = np.zeros((6, 2))
                for k in range(6):
                    transfered_landmark[k][0] = (landmark[k * 2] -
                                                 new_x1) / new_size
                    transfered_landmark[k][1] = (landmark[k * 2 + 1] -
                                                 new_y1) / new_size
                '''Verify
                temp = cropped_image.copy()
                for k in range(6):
                    cv2.circle(temp, (int(transfered_landmark[k][0] * new_size), int(transfered_landmark[k][1] * new_size)), 2, (0, 255, 0), 1)
                cv2.imshow('t', temp)
                cv2.waitKey()
                cv2.destroyAllWindows() # Verified
                del temp
                '''
                saving_name = str(idx) + '.jpg'
                saving_path = os.path.join(landmark_save_dir, saving_name)
                success = cv2.imwrite(saving_path, resized_image)
                if not success:
                    raise Exception("Landmark picture " + str(idx) +
                                    " saving failed. ")
                aug_list.append([
                    'landmark/' + saving_name, -2,
                    np.array([-1, -1, -1, -1]),
                    np.squeeze(np.array(transfered_landmark.reshape(1, -1)))
                ])
                idx = idx + 1

                # Mirror
                # if random.choice([0,1]) == 1:
                flipped_image, flipped_landmark = flip(cropped_image,
                                                       transfered_landmark)
                '''Verify
                temp = flipped_image.copy()
                for k in range(6):
                    cv2.circle(temp, (int(flipped_landmark[k][0] * new_size), int(flipped_landmark[k][1] * new_size)), 2, (0, 255, 0), 1)
                    cv2.imshow('t', temp)
                    cv2.waitKey()
                    cv2.destroyAllWindows() # Verified
                del temp
                '''
                resized_image = cv2.resize(flipped_image, (IMG_SIZE, IMG_SIZE),
                                           interpolation=cv2.INTER_LINEAR)
                saving_name = str(idx) + '.jpg'
                saving_path = os.path.join(landmark_save_dir, saving_name)
                success = cv2.imwrite(saving_path, resized_image)
                if not success:
                    raise Exception("Landmark picture " + str(idx) +
                                    " saving failed. ")
                aug_list.append([
                    'landmark/' + saving_name, -2,
                    np.array([-1, -1, -1, -1]),
                    np.squeeze(np.array(flipped_landmark.reshape(1, -1)))
                ])
                idx = idx + 1

                # Anti-Clockwise Rotate
                # if random.choice([0,1]) == 1:
                # a. Anti-clockwise rotate
                theta = np.random.randint(5, 15)
                rotated_face, rotated_landmark = rotate(
                    image, crop_box, landmark, theta
                )  # rotated_landmark here has not been transfered yet!
                resized_rotated_face = cv2.resize(
                    rotated_face, (IMG_SIZE, IMG_SIZE),
                    interpolation=cv2.INTER_LINEAR)
                transfered_rotated_landmark = np.zeros((6, 2))
                for k in range(6):
                    transfered_rotated_landmark[k][0] = (
                        rotated_landmark[k][0] - new_x1) / new_size
                    transfered_rotated_landmark[k][1] = (
                        rotated_landmark[k][1] - new_y1) / new_size
                '''Verify
                temp = rotated_face.copy()
                for k in range(6):
                    cv2.circle(temp, (int(transfered_rotated_landmark[k][0] * new_size), int(transfered_rotated_landmark[k][1] * new_size)), 2, (0, 255, 0), 1)
                    cv2.imshow('t', temp)
                    cv2.waitKey()
                    cv2.destroyAllWindows() # Verified
                del temp
                '''
                saving_name = str(idx) + '.jpg'
                saving_path = os.path.join(landmark_save_dir, saving_name)
                success = cv2.imwrite(saving_path, resized_rotated_face)
                if not success:
                    raise Exception("Landmark picture " + str(idx) +
                                    " saving failed. ")
                aug_list.append([
                    'landmark/' + saving_name, -2,
                    np.array([-1, -1, -1, -1]),
                    np.squeeze(
                        np.array(transfered_rotated_landmark.reshape(1, -1)))
                ])
                idx = idx + 1

                # b. Anti-clockwise rotate & mirror
                flipped_rotated_face, flipped_transfered_rotated_landmark = flip(
                    rotated_face, transfered_rotated_landmark)
                '''Verify
                temp = flipped_rotated_face.copy()
                for k in range(6):
                    cv2.circle(temp, (int(flipped_transfered_rotated_landmark[k][0] * new_size), int(flipped_transfered_rotated_landmark[k][1] * new_size)), 2, (0, 255, 0), 1)
                    cv2.imshow('t', temp)
                    cv2.waitKey()
                    cv2.destroyAllWindows() # Verified
                del temp
                '''
                resized_flipped_rotated_face = cv2.resize(
                    flipped_rotated_face, (IMG_SIZE, IMG_SIZE),
                    interpolation=cv2.INTER_LINEAR)
                saving_name = str(idx) + '.jpg'
                saving_path = os.path.join(landmark_save_dir, saving_name)
                success = cv2.imwrite(saving_path,
                                      resized_flipped_rotated_face)
                if not success:
                    raise Exception("Landmark picture " + str(idx) +
                                    " saving failed. ")
                aug_list.append([
                    'landmark/' + saving_name, -2,
                    np.array([-1, -1, -1, -1]),
                    np.squeeze(
                        np.array(
                            flipped_transfered_rotated_landmark.reshape(1,
                                                                        -1)))
                ])
                idx = idx + 1

                # Clockwise Rotate
                # if random.choice([0,1]) == 1:
                # a. Clockwise rotate
                theta = np.random.randint(5, 15)
                rotated_face, rotated_landmark = rotate(
                    image, crop_box, landmark, -theta
                )  # rotated_landmark here has not been transfered yet!
                resized_rotated_face = cv2.resize(
                    rotated_face, (IMG_SIZE, IMG_SIZE),
                    interpolation=cv2.INTER_LINEAR)
                transfered_rotated_landmark = np.zeros((6, 2))
                for k in range(6):
                    transfered_rotated_landmark[k][0] = (
                        rotated_landmark[k][0] - new_x1) / new_size
                    transfered_rotated_landmark[k][1] = (
                        rotated_landmark[k][1] - new_y1) / new_size
                '''Verify
                temp = rotated_face.copy()
                for k in range(6):
                    cv2.circle(temp, (int(transfered_rotated_landmark[k][0] * new_size), int(transfered_rotated_landmark[k][1] * new_size)), 2, (0, 255, 0), 1)
                    cv2.imshow('t', temp)
                    cv2.waitKey()
                    cv2.destroyAllWindows() # Verified
                del temp
                '''
                saving_name = str(idx) + '.jpg'
                saving_path = os.path.join(landmark_save_dir, saving_name)
                success = cv2.imwrite(saving_path, resized_rotated_face)
                if not success:
                    raise Exception("Landmark picture " + str(idx) +
                                    " saving failed. ")
                aug_list.append([
                    'landmark/' + saving_name, -2,
                    np.array([-1, -1, -1, -1]),
                    np.squeeze(
                        np.array(transfered_rotated_landmark.reshape(1, -1)))
                ])
                idx = idx + 1

                # b. Clockwise rotate & mirror
                flipped_rotated_face, flipped_transfered_rotated_landmark = flip(
                    rotated_face, transfered_rotated_landmark)
                '''Verify
                temp = flipped_rotated_face.copy()
                for k in range(6):
                    cv2.circle(temp, (int(flipped_transfered_rotated_landmark[k][0] * new_size), int(flipped_transfered_rotated_landmark[k][1] * new_size)), 2, (0, 255, 0), 1)
                    cv2.imshow('t', temp)
                    cv2.waitKey()
                    cv2.destroyAllWindows() # Verified
                del temp
                '''
                resized_flipped_rotated_face = cv2.resize(
                    flipped_rotated_face, (IMG_SIZE, IMG_SIZE),
                    interpolation=cv2.INTER_LINEAR)
                saving_name = str(idx) + '.jpg'
                saving_path = os.path.join(landmark_save_dir, saving_name)
                success = cv2.imwrite(saving_path,
                                      resized_flipped_rotated_face)
                if not success:
                    raise Exception("Landmark picture " + str(idx) +
                                    " saving failed. ")
                aug_list.append([
                    'landmark/' + saving_name, -2,
                    np.array([-1, -1, -1, -1]),
                    np.squeeze(
                        np.array(
                            flipped_transfered_rotated_landmark.reshape(1,
                                                                        -1)))
                ])
                idx = idx + 1

    # Save the augmentation list
    file = open(record_save_dir, 'wb+')
    pkl.dump(aug_list, file)
    file.close()

    print("Processing Finished. ")
예제 #22
0
def get_train_test_loader(Data_Band_Scaler, GroundTruth, class_num,
                          shot_num_per_class):
    print(Data_Band_Scaler.shape)  # (610, 340, 103)
    [nRow, nColumn, nBand] = Data_Band_Scaler.shape
    '''label start'''
    num_class = int(np.max(GroundTruth))
    data_band_scaler = utils.flip(Data_Band_Scaler)
    groundtruth = utils.flip(GroundTruth)
    del Data_Band_Scaler
    del GroundTruth

    HalfWidth = 4
    G = groundtruth[nRow - HalfWidth:2 * nRow + HalfWidth,
                    nColumn - HalfWidth:2 * nColumn + HalfWidth]
    data = data_band_scaler[nRow - HalfWidth:2 * nRow + HalfWidth,
                            nColumn - HalfWidth:2 * nColumn + HalfWidth, :]

    [Row, Column] = np.nonzero(G)  # (10249,) (10249,)
    # print(Row)
    del data_band_scaler
    del groundtruth

    nSample = np.size(Row)
    print('number of sample', nSample)

    # Sampling samples
    train = {}
    test = {}
    da_train = {}  # Data Augmentation
    m = int(np.max(G))  # 9
    nlabeled = TEST_LSAMPLE_NUM_PER_CLASS
    print('labeled number per class:', nlabeled)
    print((200 - nlabeled) / nlabeled + 1)
    print(math.ceil((200 - nlabeled) / nlabeled) + 1)

    for i in range(m):
        indices = [
            j for j, x in enumerate(Row.ravel().tolist())
            if G[Row[j], Column[j]] == i + 1
        ]
        np.random.shuffle(indices)
        nb_val = shot_num_per_class
        train[i] = indices[:nb_val]
        da_train[i] = []
        for j in range(math.ceil((200 - nlabeled) / nlabeled) + 1):
            da_train[i] += indices[:nb_val]
        test[i] = indices[nb_val:]

    train_indices = []
    test_indices = []
    da_train_indices = []
    for i in range(m):
        train_indices += train[i]
        test_indices += test[i]
        da_train_indices += da_train[i]
    np.random.shuffle(test_indices)

    print('the number of train_indices:', len(train_indices))  # 520
    print('the number of test_indices:', len(test_indices))  # 9729
    print('the number of train_indices after data argumentation:',
          len(da_train_indices))  # 520
    print('labeled sample indices:', train_indices)

    nTrain = len(train_indices)
    nTest = len(test_indices)
    da_nTrain = len(da_train_indices)

    imdb = {}
    imdb['data'] = np.zeros(
        [2 * HalfWidth + 1, 2 * HalfWidth + 1, nBand, nTrain + nTest],
        dtype=np.float32)  # (9,9,100,n)
    imdb['Labels'] = np.zeros([nTrain + nTest], dtype=np.int64)
    imdb['set'] = np.zeros([nTrain + nTest], dtype=np.int64)

    RandPerm = train_indices + test_indices

    RandPerm = np.array(RandPerm)

    for iSample in range(nTrain + nTest):
        imdb['data'][:, :, :,
                     iSample] = data[Row[RandPerm[iSample]] -
                                     HalfWidth:Row[RandPerm[iSample]] +
                                     HalfWidth + 1, Column[RandPerm[iSample]] -
                                     HalfWidth:Column[RandPerm[iSample]] +
                                     HalfWidth + 1, :]
        imdb['Labels'][iSample] = G[Row[RandPerm[iSample]],
                                    Column[RandPerm[iSample]]].astype(np.int64)

    imdb['Labels'] = imdb['Labels'] - 1  # 1-16 0-15
    imdb['set'] = np.hstack(
        (np.ones([nTrain]), 3 * np.ones([nTest]))).astype(np.int64)
    print('Data is OK.')

    train_dataset = utils.matcifar(imdb, train=True, d=3, medicinal=0)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=class_num *
                                               shot_num_per_class,
                                               shuffle=False,
                                               num_workers=0)
    del train_dataset

    test_dataset = utils.matcifar(imdb, train=False, d=3, medicinal=0)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=100,
                                              shuffle=False,
                                              num_workers=0)
    del test_dataset
    del imdb

    # Data Augmentation for target domain for training
    imdb_da_train = {}
    imdb_da_train['data'] = np.zeros(
        [2 * HalfWidth + 1, 2 * HalfWidth + 1, nBand, da_nTrain],
        dtype=np.float32)  # (9,9,100,n)
    imdb_da_train['Labels'] = np.zeros([da_nTrain], dtype=np.int64)
    imdb_da_train['set'] = np.zeros([da_nTrain], dtype=np.int64)

    da_RandPerm = np.array(da_train_indices)
    for iSample in range(da_nTrain):  # radiation_noise,flip_augmentation
        imdb_da_train['data'][:, :, :, iSample] = utils.radiation_noise(
            data[Row[da_RandPerm[iSample]] -
                 HalfWidth:Row[da_RandPerm[iSample]] + HalfWidth + 1,
                 Column[da_RandPerm[iSample]] -
                 HalfWidth:Column[da_RandPerm[iSample]] + HalfWidth + 1, :])
        imdb_da_train['Labels'][iSample] = G[
            Row[da_RandPerm[iSample]],
            Column[da_RandPerm[iSample]]].astype(np.int64)

    imdb_da_train['Labels'] = imdb_da_train['Labels'] - 1  # 1-16 0-15
    imdb_da_train['set'] = np.ones([da_nTrain]).astype(np.int64)
    print('ok')

    return train_loader, test_loader, imdb_da_train, G, RandPerm, Row, Column, nTrain
def process_data(data, size, augmentation):
    face_images = []
    face_landmarks = []
    idx = 0
    for d in data:
        image_file = d['image_file']
        bbox = d['bbox']
        points = d['landmark']

        img = cv2.imread(image_file)
        im_h, im_w, _ = img.shape
        x1, y1, x2, y2 = bbox
        face_roi = img[y1:y2, x1:x2]
        face_roi = resize(face_roi, size)

        # 归一化
        landmark = normalize_landmark(points, x1, x2, y1, y2)
        face_images.append(face_roi)
        face_landmarks.append(np.array(landmark).reshape(10))

        #数据增强
        if augmentation:
            idx = idx + 1
            
            # gt's width
            bbox_w = x2 - x1 + 1
            # gt's height
            bbox_h = y2 - y1 + 1
            if max(bbox_w, bbox_h) < 40 or x1 < 0 or y1 < 0:
                continue
            # random shift
            for i in range(10):
                bbox_size, nx1, nx2, ny1, ny2 = new_bbox(bbox_h, bbox_w, x1, y1)
                if nx2 > im_w or ny2 > im_h:
                    continue

                crop_box = np.array([nx1, ny1, nx2, ny2])
                cropped_im = img[ny1:ny2 + 1, nx1:nx2 + 1, :]

                _iou = iou(crop_box, np.expand_dims(bbox, 0))

                if _iou > 0.65:
                    resize_im = resize(cropped_im, size)
                    face_images.append(resize_im)
                    # normalize
                    landmark = normalize_landmark2(bbox_size, points, nx1, ny1)
                    face_landmarks.append(landmark.reshape(10))

                    landmark_ = landmark.reshape(-1, 2)

                    nbbox = nx1, ny1, nx2, ny2

                    # mirror
                    if random.choice([0, 1]) > 0:
                        face_flipped, landmark_flipped = flip(resize_im, landmark_)
                        face_flipped = resize(face_flipped, size)
                        # c*h*w
                        face_images.append(face_flipped)
                        face_landmarks.append(landmark_flipped.reshape(10))
                    # rotate
                    if random.choice([0, 1]) > 0:
                        _landmark = reproject_landmark(nbbox, landmark_)
                        face_rotated_by_alpha, landmark_rotated = rotate(img, nbbox, _landmark, 5)
                        # landmark_offset
                        landmark_rotated = project_landmark(nbbox, landmark_rotated)

                        face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size, size))
                        face_images.append(face_rotated_by_alpha)
                        face_landmarks.append(landmark_rotated.reshape(10))

                        # flip
                        face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                        face_flipped = cv2.resize(face_flipped, (size, size))
                        face_images.append(face_flipped)
                        face_landmarks.append(landmark_flipped.reshape(10))

                    # inverse clockwise rotation
                    if random.choice([0, 1]) > 0:
                        _landmark = reproject_landmark(nbbox, landmark_)
                        face_rotated_by_alpha, landmark_rotated = rotate(img, nbbox, _landmark, -5)  # 顺时针旋转

                        landmark_rotated = project_landmark(nbbox, landmark_rotated)
                        face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size, size))
                        face_images.append(face_rotated_by_alpha)
                        face_landmarks.append(landmark_rotated.reshape(10))

                        face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                        face_flipped = cv2.resize(face_flipped, (size, size))
                        face_images.append(face_flipped)
                        face_landmarks.append(landmark_flipped.reshape(10))

    return face_images, face_landmarks
예제 #24
0
def generateSampleBox(img_path,
                      bndbox,
                      part,
                      nJoints,
                      imgset,
                      scale_factor,
                      dataset,
                      train=True):

    nJoints_coco = 17
    nJoints_mpii = 16
    img = load_image(img_path)
    if train:
        img[0].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
        img[1].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
        img[2].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)

    ori_img = img.clone()
    img[0].add_(-0.406)
    img[1].add_(-0.457)
    img[2].add_(-0.480)

    upLeft = torch.Tensor((int(bndbox[0][0]), int(bndbox[0][1])))
    bottomRight = torch.Tensor((int(bndbox[0][2]), int(bndbox[0][3])))
    ht = bottomRight[1] - upLeft[1]
    width = bottomRight[0] - upLeft[0]
    imght = img.shape[1]
    imgwidth = img.shape[2]
    scaleRate = random.uniform(*scale_factor)

    upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
    upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
    bottomRight[0] = min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2)
    bottomRight[1] = min(imght - 1, bottomRight[1] + ht * scaleRate / 2)

    # Doing Random Sample
    if opt.addDPG:
        PatchScale = random.uniform(0, 1)
        if PatchScale > 0.85:
            ratio = ht / width
            if width < ht:
                patchWidth = PatchScale * width
                patchHt = patchWidth * ratio
            else:
                patchHt = PatchScale * ht
                patchWidth = patchHt / ratio

            xmin = upLeft[0] + random.uniform(0, 1) * (width - patchWidth)
            ymin = upLeft[1] + random.uniform(0, 1) * (ht - patchHt)

            xmax = xmin + patchWidth + 1
            ymax = ymin + patchHt + 1
        else:
            xmin = max(
                1,
                min(upLeft[0] + np.random.normal(-0.0142, 0.1158) * width,
                    imgwidth - 3))
            ymin = max(
                1,
                min(upLeft[1] + np.random.normal(0.0043, 0.068) * ht,
                    imght - 3))
            xmax = min(
                max(xmin + 2,
                    bottomRight[0] + np.random.normal(0.0154, 0.1337) * width),
                imgwidth - 3)
            ymax = min(
                max(ymin + 2,
                    bottomRight[1] + np.random.normal(-0.0013, 0.0711) * ht),
                imght - 3)

        upLeft[0] = xmin
        upLeft[1] = ymin
        bottomRight[0] = xmax
        bottomRight[1] = ymax

    # Counting Joints number
    jointNum = 0
    if imgset == 'coco':
        for i in range(17):
            if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
                    and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
                jointNum += 1
    else:
        for i in range(16):
            if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
                    and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
                jointNum += 1

    # Doing Random Crop
    if opt.addDPG:
        if jointNum > 13 and train:
            switch = random.uniform(0, 1)
            if switch > 0.96:
                bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
                bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
            elif switch > 0.92:
                upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
                bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
            elif switch > 0.88:
                upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
                bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
            elif switch > 0.84:
                upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
                upLeft[1] = (upLeft[1] + bottomRight[1]) / 2
            elif switch > 0.80:
                bottomRight[0] = (upLeft[0] + bottomRight[0]) / 2
            elif switch > 0.76:
                upLeft[0] = (upLeft[0] + bottomRight[0]) / 2
            elif switch > 0.72:
                bottomRight[1] = (upLeft[1] + bottomRight[1]) / 2
            elif switch > 0.68:
                upLeft[1] = (upLeft[1] + bottomRight[1]) / 2

    ori_inp = cropBox(ori_img, upLeft, bottomRight, opt.inputResH,
                      opt.inputResW)
    inp = cropBox(img, upLeft, bottomRight, opt.inputResH, opt.inputResW)
    if jointNum == 0:
        inp = torch.zeros(3, opt.inputResH, opt.inputResW)

    out_bigcircle = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
    out_smallcircle = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
    out = torch.zeros(nJoints, opt.outputResH, opt.outputResW)
    setMask = torch.zeros(nJoints, opt.outputResH, opt.outputResW)

    # Draw Label
    if imgset == 'coco':
        for i in range(nJoints_coco):
            if part[i][0] > 0 and part[i][0] > upLeft[0] and part[i][1] > upLeft[1] \
               and part[i][0] < bottomRight[0] and part[i][1] < bottomRight[1]:
                out_bigcircle[i] = drawBigCircle(
                    out_bigcircle[i],
                    transformBox(part[i], upLeft, bottomRight, opt.inputResH,
                                 opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss * 2)
                out_smallcircle[i] = drawSmallCircle(
                    out_smallcircle[i],
                    transformBox(part[i], upLeft, bottomRight, opt.inputResH,
                                 opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss)
                out[i] = drawGaussian(
                    out[i],
                    transformBox(part[i], upLeft, bottomRight, opt.inputResH,
                                 opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss)
            setMask[i].add_(1)
    elif imgset == 'mpii':
        for i in range(nJoints_coco, nJoints_coco + nJoints_mpii):
            if part[i - nJoints_coco][0] > 0 and part[i - nJoints_coco][0] > upLeft[0] and part[i - nJoints_coco][1] > upLeft[1] \
               and part[i - nJoints_coco][0] < bottomRight[0] and part[i - nJoints_coco][1] < bottomRight[1]:
                out_bigcircle[i] = drawBigCircle(
                    out_bigcircle[i],
                    transformBox(part[i - nJoints_coco], upLeft, bottomRight,
                                 opt.inputResH, opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss * 2)
                out_smallcircle[i] = drawSmallCircle(
                    out_smallcircle[i],
                    transformBox(part[i - nJoints_coco], upLeft, bottomRight,
                                 opt.inputResH, opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss)
                out[i] = drawGaussian(
                    out[i],
                    transformBox(part[i - nJoints_coco], upLeft, bottomRight,
                                 opt.inputResH, opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss)
            setMask[i].add_(1)
    else:
        for i in range(nJoints_coco, nJoints_coco + nJoints_mpii):
            if part[i - nJoints_coco][0] > 0 and part[i - nJoints_coco][0] > upLeft[0] and part[i - nJoints_coco][1] > upLeft[1] \
               and part[i - nJoints_coco][0] < bottomRight[0] and part[i - nJoints_coco][1] < bottomRight[1]:
                out_bigcircle[i] = drawBigCircle(
                    out_bigcircle[i],
                    transformBox(part[i - nJoints_coco], upLeft, bottomRight,
                                 opt.inputResH, opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss * 2)
                out_smallcircle[i] = drawSmallCircle(
                    out_smallcircle[i],
                    transformBox(part[i - nJoints_coco], upLeft, bottomRight,
                                 opt.inputResH, opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss)
                out[i] = drawGaussian(
                    out[i],
                    transformBox(part[i - nJoints_coco], upLeft, bottomRight,
                                 opt.inputResH, opt.inputResW, opt.outputResH,
                                 opt.outputResW), opt.hmGauss)
            if i != 6 + nJoints_coco and i != 7 + nJoints_coco:
                setMask[i].add_(1)

    if opt.debug:
        preds_hm, preds_img, preds_scores = getPrediction(
            out.unsqueeze(0), upLeft.unsqueeze(0), bottomRight.unsqueeze(0),
            opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
        tmp_preds = preds_hm.mul(opt.inputResH / opt.outputResH)
        drawCOCO(ori_inp.unsqueeze(0), tmp_preds, preds_scores)

    if train:
        # Flip
        if random.uniform(0, 1) < 0.5:
            inp = flip(inp)
            ori_inp = flip(ori_inp)
            out_bigcircle = shuffleLR(flip(out_bigcircle), dataset)
            out_smallcircle = shuffleLR(flip(out_smallcircle), dataset)
            out = shuffleLR(flip(out), dataset)
        # Rotate
        r = rnd(opt.rotate)
        if random.uniform(0, 1) < 0.6:
            r = 0
        if r != 0:
            inp = cv_rotate(inp, r, opt.inputResW, opt.inputResH)
            out_bigcircle = cv_rotate(out_bigcircle, r, opt.outputResW,
                                      opt.outputResH)
            out_smallcircle = cv_rotate(out_smallcircle, r, opt.outputResW,
                                        opt.outputResH)
            out = cv_rotate(out, r, opt.outputResW, opt.outputResH)

    return inp, out_bigcircle, out_smallcircle, out, setMask
예제 #25
0
    def generate_dataset(self, mode='train'):
        assert mode in ['train', 'test']
        try:
            if mode == 'train':
                os.mkdir(self.train_path)
                os.mkdir(os.path.join(self.train_path, 'images'))
            else:
                os.mkdir(self.test_path)
                os.mkdir(os.path.join(self.test_path, 'images'))
            print(f'created data/{mode} folder')
        except:
            print(
                f"data/{mode} folder already exist .. delete it to generate a new dataset"
            )
            return

        lines = self.train_lines if mode == 'train' else self.test_lines
        save_path = self.train_path if mode == 'train' else self.test_path

        # annotation for all train/test dataset strings
        all_annotations_str = []

        k = 0
        for annotations_line in lines:
            # read annotations
            annotations = self.read_annotations(annotations_line)
            image_full_path = annotations['path']
            image = self.read_image(image_full_path)
            rect = annotations['rect']
            landmarks = annotations['landmarks']
            attributes = annotations['attributes']

            # ============= Data Augumentation =================
            all_images = []
            all_landmarks = []

            if mode == 'test':
                image, landmarks, skip = self.crop_face_landmarks(
                    image, landmarks, False)
                if skip:
                    continue
                all_images = [image]
                all_landmarks = [landmarks]
            else:
                for i in range(self.n_augumentation):
                    angle = np.random.randint(-30, 30)

                    augument_image, augument_landmarks = self.rotate(
                        np.copy(image), np.copy(landmarks), angle)
                    augument_image, augument_landmarks, skip = self.crop_face_landmarks(
                        augument_image, augument_landmarks)
                    if skip:
                        continue

                    if np.random.choice((True, False)):
                        augument_image, augument_landmarks = flip(
                            augument_image, augument_landmarks)

                    # # visualize
                    # img = np.copy(augument_image)
                    # for point in augument_landmarks:
                    #     point = (int(point[0]), int(point[1]))
                    #     cv2.circle(img, point, 1, (0,255,0), -1)
                    # # img = cv2.resize(img, (300,300))
                    # cv2.imshow("image", img)
                    # if cv2.waitKey(0) == 27:
                    #     exit(0)
                    # print("*"*80)

                    all_images.append(augument_image)
                    all_landmarks.append(augument_landmarks)

            # for every augumented image
            for i, img in enumerate(all_images):
                img = all_images[i]
                landmark = all_landmarks[i] / 112

                # generate euler angles from landmarks
                _, _, euler_angles = self.euler_estimator.eular_angles_from_landmarks(
                    landmark)
                euler_str = ' '.join(
                    [str(round(angle, 2)) for angle in euler_angles])

                # get image name
                new_image_path = self.save_image(
                    img, image_full_path, k, i,
                    save_path)  # id should be unique for every img

                # convert landmarks to string
                landmarks_list = landmark.reshape(196, ).tolist()
                landmarks_str = ' '.join([str(l) for l in landmarks_list])

                # attributes list to string
                attributes_str = ' '.join(
                    [str(attribute) for attribute in attributes])

                # annotation string = image_name + 98 landmarks + attributes + euler
                new_annotation = ' '.join(
                    [new_image_path, landmarks_str, attributes_str, euler_str])
                all_annotations_str.append(new_annotation)
                # print(new_annotation)

            k += 1
            if k % 100 == 0:
                print(f'{mode} dataset: {k} generated data')

        # ========= Save annotations ===============
        one_annotation_str = '\n'.join(
            [annotation for annotation in all_annotations_str])
        annotations_path = os.path.join(save_path, 'annotations.txt')
        annotations_file = open(annotations_path, 'w')
        annotations_file.write(one_annotation_str)
        annotations_file.close()
        print(
            '*' * 60,
            f'\n\t {mode} annotations is saved @ data/{mode}/annotations.txt')
        time.sleep(2)
예제 #26
0
def generate_hdf5(ftxt, output, fname, argument=False):

    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []

    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE)  #读取灰度图
        assert (img is not None)
        logger("process %s" % imgPath)
        # F
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom + 1,
                     f_bbox.left:f_bbox.right + 1]  #截图

        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(
                f_face, landmarkGt)  #图片翻转,数据增强,相应坐标也要发生变换
            face_flipped = cv2.resize(face_flipped, (39, 39))  #固定图片大小
            F_imgs.append(face_flipped.reshape((
                1, 39,
                39)))  #caffe要求输入的格式  ['batch_size','channel','height','width']
            F_landmarks.append(landmark_flipped.reshape(10))  #把10个关键坐标点值变成一维向量
            ### rotation
            # if np.random.rand() > 0.5:
            #     face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox,
            #         bbox.reprojectLandmark(landmarkGt), 5)
            #     landmark_rotated = bbox.projectLandmark(landmark_rotated)
            #     face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
            #     F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
            #     F_landmarks.append(landmark_rotated.reshape(10))
            #     ### flip with rotation
            #     face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
            #     face_flipped = cv2.resize(face_flipped, (39, 39))
            #     F_imgs.append(face_flipped.reshape((1, 39, 39)))
            #     F_landmarks.append(landmark_flipped.reshape(10))
            # ### rotation
            # if np.random.rand() > 0.5:
            #     face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox,
            #         bbox.reprojectLandmark(landmarkGt), -5)
            #     landmark_rotated = bbox.projectLandmark(landmark_rotated)
            #     face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (39, 39))
            #     F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
            #     F_landmarks.append(landmark_rotated.reshape(10))
            #     ### flip with rotation
            #     face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
            #     face_flipped = cv2.resize(face_flipped, (39, 39))
            #     F_imgs.append(face_flipped.reshape((1, 39, 39)))
            #     F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

        # EN
        # en_bbox = bbox.subBBox(-0.05, 1.05, -0.04, 0.84)
        # en_face = img[en_bbox.top:en_bbox.bottom+1,en_bbox.left:en_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape(
                (1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark)

        # NM
        # nm_bbox = bbox.subBBox(-0.05, 1.05, 0.18, 1.05)
        # nm_face = img[nm_bbox.top:nm_bbox.bottom+1,nm_bbox.left:nm_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape(
                (1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)

    #imgs, landmarks = process_images(ftxt, output)

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs), np.asarray(NM_landmarks)

    F_imgs = processImage(F_imgs)  #数据标准化
    shuffle_in_unison_scary(F_imgs, F_landmarks)  #随机打乱
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)  #D:.\deep_landmark\dataset\train\1_F\train.h5
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)

    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
예제 #27
0
def fog_train(args,
              model,
              fog_graph,
              nodes,
              X_trains,
              y_trains,
              device,
              epoch,
              loss_fn,
              consensus,
              rounds,
              radius,
              d2d,
              factor=10,
              alpha_store={},
              prev_grad=0,
              shuffle_worker_data=False):
    # fog learning with model averaging
    if loss_fn == 'nll':
        loss_fn_ = F.nll_loss
    elif loss_fn == 'hinge':
        loss_fn_ = multiClassHingeLoss()

    log = []
    log_head = []
    if args.var_theta:
        if args.true_eps:
            log_head.append('est')
        log_head += ['div', 'true_grad']
        if args.dynamic_alpha:
            log_head += [
                'D', 'mu', args.delta_or_psi, 'eta', 'grad', 'omega', 'N', 'L',
                'phi'
            ]
        log_head += ['rounds', 'agg', 'rho', 'sig', 'cls_n']
        log_head.append('rounded')
    log.append(log_head)

    model.train()

    worker_data = {}
    worker_targets = {}
    worker_num_samples = {}
    worker_models = {}
    worker_optims = {}
    worker_losses = {}

    # send data, model to workers
    # setup optimizer for each worker
    if shuffle_worker_data:
        data = list(zip(X_trains, y_trains))
        shuffle(data)
        X_trains, y_trains = zip(*data)

    workers = [_ for _ in nodes.keys() if 'L0' in _]
    for w, x, y in zip(workers, X_trains, y_trains):
        worker_data[w] = x.send(nodes[w])
        worker_targets[w] = y.send(nodes[w])
        worker_num_samples[w] = x.shape[0]

    for w in workers:
        worker_models[w] = model.copy().send(nodes[w])
        node_model = worker_models[w].get()
        worker_optims[w] = optim.SGD(
            params=node_model.parameters(),
            lr=args.lr * np.exp(-0.01 * epoch) if args.nesterov else args.lr,
            weight_decay=args.decay if loss_fn == 'hinge' else 0,
        )
        data = worker_data[w].get()
        target = worker_targets[w].get()
        dataloader = get_dataloader(data, target, args.batch_size)

        for data, target in dataloader:
            data, target = data.to(device), target.to(device)
            worker_optims[w].zero_grad()
            output = node_model(data)
            loss = loss_fn_(output, target)
            loss.backward()
            worker_optims[w].step()
        worker_models[w] = node_model.send(nodes[w])
        worker_losses[w] = loss.item()

    num_rounds = []
    num_div = []
    var_radius = type(radius) == list
    for l in range(1, len(args.num_clusters) + 1):

        aggregators = [_ for _ in nodes.keys() if 'L{}'.format(l) in _]
        N = len(aggregators)
        cluster_rounds = []
        cluster_div = []
        for a in aggregators:
            agg_log = []

            worker_models[a] = model.copy().send(nodes[a])
            worker_num_samples[a] = 1
            children = fog_graph[a]

            for child in children:
                worker_models[child].move(nodes[a])

            if consensus == 'averaging' or flip(1 - d2d):
                model_sum = averaging_consensus(children, worker_models,
                                                worker_num_samples)
                worker_models[a].load_state_dict(model_sum)
            elif consensus == 'laplacian':
                num_nodes_in_cluster = len(children)
                V = consensus_matrix(
                    num_nodes_in_cluster,
                    radius if not var_radius else radius[l - 1], factor,
                    args.topology)
                eps = get_cluster_eps(children, worker_models,
                                      worker_num_samples, nodes, fog_graph)
                if args.true_eps:
                    est_eps = eps
                    agg_log.append(est_eps)
                    eps = get_true_cluster_eps(children, worker_models,
                                               worker_num_samples, nodes,
                                               fog_graph)
                agg_log.append(eps)
                cluster_div.append(eps)

                if args.var_theta:
                    Z = V - (1 / num_nodes_in_cluster)
                    eig, dump = np.linalg.eig(Z)
                    lamda = eig.max()
                    true_grad = estimate_true_gradient(prev_grad, args.omega)
                    agg_log.append(true_grad)
                    if args.dynamic_alpha:
                        if true_grad:
                            phi = sum(args.num_clusters)
                            L = len(args.num_clusters) + 1
                            num_params = get_num_params(model)
                            if args.delta_or_psi == 'delta':
                                alpha, alpha_log = get_alpha_closed_form(
                                    args, true_grad, phi, N, L, num_params, l)
                            elif args.delta_or_psi == 'psi':
                                alpha, alpha_log = get_alpha_using_psi(
                                    args, phi, N, L, num_params, l)
                            agg_log += alpha_log
                            rounds = estimate_rounds(alpha,
                                                     num_nodes_in_cluster, eps,
                                                     lamda)
                        else:
                            rounds = 50
                            agg_log += [''] * 9
                            alpha = 'N/A'
                    else:
                        alpha_store = get_alpha(num_nodes_in_cluster, eps, a,
                                                alpha_store, args.alpha,
                                                args.dynamic_alpha)
                        alpha = alpha_store[a]
                        rounds = estimate_rounds(alpha, num_nodes_in_cluster,
                                                 eps, lamda)
                    agg_log += [rounds, a, lamda, alpha, num_nodes_in_cluster]
                    try:
                        rounds = int(np.ceil(rounds))
                    except TypeError:
                        rounds = 50
                    if rounds > 50:
                        rounds = 50
                    elif rounds < 1:
                        rounds = 1
                    cluster_rounds.append(rounds)
                    agg_log.append(rounds)
                model_sum = laplacian_consensus(children, worker_models,
                                                worker_num_samples,
                                                V.to(device), rounds)
                agg_model = worker_models[a].get()
                agg_model.load_state_dict(model_sum)
                worker_models[a] = agg_model.send(nodes[a])
            else:
                raise Exception
            log.append(agg_log)

        num_rounds.append(cluster_rounds)
        num_div.append(cluster_div)

    table = AsciiTable(log)
    print(table.table)
    assert len(aggregators) == 1
    master = get_model_weights(worker_models[aggregators[0]].get(),
                               1 / args.num_train)

    grad = model_gradient(model.state_dict(), master, args.lr)
    model.load_state_dict(master)

    if epoch % args.log_interval == 0:
        loss = np.array([_ for dump, _ in worker_losses.items()])
        print('Train Epoch: {}({}) \tLoss: {:.6f} +- {:.6f} \tGrad: {}'.format(
            epoch, len(dataloader), loss.mean(), loss.std(),
            dict(grad).values()))

    return grad, num_rounds, num_div, alpha_store
예제 #28
0
    def split(self, rect):
        if rect.w - 1 >= MIN_BSP_SIZE * 1.5 or rect.h - 1 >= MIN_BSP_SIZE * 1.5:
            if utils.flip() and rect.w >= MIN_BSP_SIZE:
                x = libtcod.random_get_int(0, rect.x + MIN_BSP_SIZE,
                                           rect.x + rect.w - MIN_BSP_SIZE)
                w = (rect.x + rect.w) - x

                tries = 0
                while x + w >= len(self.map) or tries <= 3:
                    print x, w, " horz out of bounds"
                    x = rect.x + 1
                    w = rect.w / 2 - 1

                baby = Rect(rect.w - w, rect.h, rect.x, rect.y)
                baby.bsp(rect)
                baby_2 = Rect(w, rect.h, x, rect.y)
                baby_2.bsp(rect)

                rect.babies.append(baby)
                rect.babies.append(baby_2)
            else:
                if rect.w < MIN_BSP_SIZE:
                    y = libtcod.random_get_int(0, rect.y + MIN_BSP_SIZE,
                                               rect.y + rect.h - MIN_BSP_SIZE)
                    h = (rect.y + rect.h) - y

                    tries = 0
                    while y + h > len(self.map[0]) or tries <= 3:
                        print y, h, "vert out of bounds"
                        y = rect.y + 1
                        h = rect.h / 2

                    baby = Rect(rect.w, rect.h - h, rect.x, rect.y)
                    baby.bsp(rect)
                    rect.babies.append(baby)

                else:
                    y = libtcod.random_get_int(0, rect.y + MIN_BSP_SIZE,
                                               rect.y + rect.h - MIN_BSP_SIZE)

                    h = (rect.y + rect.h) - y

                    tries = 0
                    while y + h > len(self.map[0]) or tries <= 3:
                        print y, h, "vert out of bounds"
                        y = rect.y + 1
                        h = rect.h / 2

                    baby = Rect(rect.w, rect.h - h, rect.x, rect.y)
                    baby.bsp(rect)
                    baby_2 = Rect(rect.w, h, rect.x, y)
                    baby_2.bsp(rect)

                    rect.babies.append(baby)
                    rect.babies.append(baby_2)

        else:
            rect.end = True

        for baby in rect.babies:
            if baby.end != True:
                self.split(baby)
예제 #29
0
# 
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING.  If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.

import sys
sys.path.insert(0, './extras/')
sys.path.insert(0, './')

import apt
import utils
import numpy as np
import scipy.signal

from PIL import Image

matrix = apt.decode('wav/am_demod/sample.wav')


utils.plot_histogram(matrix,'Imagen completa')
utils.plot_image(matrix,'Imagen completa')

# Giro la imagen 180 grados porque el satélite recorría de Sur a Norte
frameA = utils.flip(utils.get_frame(matrix,"A"))
frameB = utils.flip(utils.get_frame(matrix,"B"))

#
utils.plot_histogram(frameB,'Histograma Banda Infrarroja', save = True)
utils.plot_histogram(frameA,'Histograma Espectro visible', save = True)
예제 #30
0
 def epsilon_linear_policy(self, epsilon, w, s):
     best = np.argmax([np.dot(w,self.phi(s,a)) for a in range(self.nactions)])
     if flip(epsilon):
         return pr.choice(self.actions)
     else:
         return best
예제 #31
0
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X_scaled, 
                                                    Y, 
                                                    A,
                                                    test_size = 0.2,
                                                    random_state=0,
                                                    stratify=Y)

# Work around indexing bug
X_train = X_train.reset_index(drop=True)
A_train = A_train.reset_index(drop=True)
X_test = X_test.reset_index(drop=True)
A_test = A_test.reset_index(drop=True)
# A_test = A_test.map({ 0:"female", 1:"male"})

# flip across different groups
Y_noised = flip(Y_train, A_train, error_rate=error_rate)
noise_matrix = generate_noise_matrix(Y_noised, Y_train)
est_error_rate = estimation(X_train.values, Y_noised, A_train.values, ngroups=2**args.ngroups)
print(f"True error rate is {error_rate}.\nEstimated error rate is {est_error_rate}.")

# Learning with Noisy Labels
lnl = LearningWithNoisyLabels(clf=LogisticRegression())
lnl.fit(X=X_train.values, s=Y_noised, noise_matrix=noise_matrix)
Y_lnlt = lnl.predict(X_train.values).astype(int)
lnl.fit(X=X_train.values, s=Y_noised)
Y_lnle = lnl.predict(X_train.values).astype(int)


def run_corrupt(fairness_constraints):
    all_results = {}
    all_results['eps'] = fairness_constraints
예제 #32
0
    for epoch in range(1, args.max_epoch + 1):
        lr_scheduler.step()

        model.train()

        tl = Averager()
        ta = Averager()

        for i, batch in enumerate(train_loader, 1):
            data, _ = [_.cuda() for _ in batch]
            p = args.shot * args.train_way
            qq = p + args.query * args.train_way
            data_shot, data_query = data[:p], data[p:qq]

            if args.shot == 1:
                data_shot = torch.cat((data_shot, flip(data_shot, 3)), dim=0)

            proto = model(data_shot)
            proto = proto.reshape(shot_num, args.train_way, -1)
            proto = torch.transpose(proto, 0, 1)
            hyperplanes, mu = projection_pro.create_subspace(proto, args.train_way, shot_num)

            label = torch.arange(args.train_way).repeat(args.query)
            label = label.type(torch.cuda.LongTensor)

            logits, disc = projection_pro.projection_metric(model(data_query), hyperplanes, mu=mu)
            loss = F.cross_entropy(logits, label) + 0.05*disc
            acc = count_acc(logits, label)


            tl.add(loss.item())
예제 #33
0
def generate_hdf5(ftxt, output, fname, argument=False):

    data = getDataFromTxt(ftxt)
    F_imgs = []
    F_landmarks = []
    EN_imgs = []
    EN_landmarks = []
    NM_imgs = []
    NM_landmarks = []

    for (imgPath, bbox, landmarkGt) in data:
        img = cv2.imread(imgPath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        assert (img is not None)
        logger("process %s" % imgPath)
        # F
        f_bbox = bbox.subBBox(-0.05, 1.05, -0.05, 1.05)
        f_face = img[f_bbox.top:f_bbox.bottom + 1,
                     f_bbox.left:f_bbox.right + 1]

        ## data argument
        if argument and np.random.rand() > -1:
            ### flip
            face_flipped, landmark_flipped = flip(f_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (39, 39))
            F_imgs.append(face_flipped.reshape((1, 39, 39)))
            F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), 5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha,
                                                   (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha,
                                                      landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))
            ### rotation
            if np.random.rand() > 0.5:
                face_rotated_by_alpha, landmark_rotated = rotate(img, f_bbox, \
                    bbox.reprojectLandmark(landmarkGt), -5)
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha,
                                                   (39, 39))
                F_imgs.append(face_rotated_by_alpha.reshape((1, 39, 39)))
                F_landmarks.append(landmark_rotated.reshape(10))
                ### flip with rotation
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha,
                                                      landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (39, 39))
                F_imgs.append(face_flipped.reshape((1, 39, 39)))
                F_landmarks.append(landmark_flipped.reshape(10))

        f_face = cv2.resize(f_face, (39, 39))
        en_face = f_face[:31, :]
        nm_face = f_face[8:, :]

        f_face = f_face.reshape((1, 39, 39))
        f_landmark = landmarkGt.reshape((10))
        F_imgs.append(f_face)
        F_landmarks.append(f_landmark)

        # EN
        # en_bbox = bbox.subBBox(-0.05, 1.05, -0.04, 0.84)
        # en_face = img[en_bbox.top:en_bbox.bottom+1,en_bbox.left:en_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(en_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape(
                (1, 31, 39))
            landmark_flipped = landmark_flipped[:3, :].reshape((6))
            EN_imgs.append(face_flipped)
            EN_landmarks.append(landmark_flipped)

        en_face = cv2.resize(en_face, (31, 39)).reshape((1, 31, 39))
        en_landmark = landmarkGt[:3, :].reshape((6))
        EN_imgs.append(en_face)
        EN_landmarks.append(en_landmark)

        # NM
        # nm_bbox = bbox.subBBox(-0.05, 1.05, 0.18, 1.05)
        # nm_face = img[nm_bbox.top:nm_bbox.bottom+1,nm_bbox.left:nm_bbox.right+1]

        ## data argument
        if argument and np.random.rand() > 0.5:
            ### flip
            face_flipped, landmark_flipped = flip(nm_face, landmarkGt)
            face_flipped = cv2.resize(face_flipped, (31, 39)).reshape(
                (1, 31, 39))
            landmark_flipped = landmark_flipped[2:, :].reshape((6))
            NM_imgs.append(face_flipped)
            NM_landmarks.append(landmark_flipped)

        nm_face = cv2.resize(nm_face, (31, 39)).reshape((1, 31, 39))
        nm_landmark = landmarkGt[2:, :].reshape((6))
        NM_imgs.append(nm_face)
        NM_landmarks.append(nm_landmark)

    #imgs, landmarks = process_images(ftxt, output)

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    EN_imgs, EN_landmarks = np.asarray(EN_imgs), np.asarray(EN_landmarks)
    NM_imgs, NM_landmarks = np.asarray(NM_imgs), np.asarray(NM_landmarks)

    F_imgs = processImage(F_imgs)
    shuffle_in_unison_scary(F_imgs, F_landmarks)
    EN_imgs = processImage(EN_imgs)
    shuffle_in_unison_scary(EN_imgs, EN_landmarks)
    NM_imgs = processImage(NM_imgs)
    shuffle_in_unison_scary(NM_imgs, NM_landmarks)

    # full face
    base = join(OUTPUT, '1_F')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = F_imgs.astype(np.float32)
        h5['landmark'] = F_landmarks.astype(np.float32)

    # eye and nose
    base = join(OUTPUT, '1_EN')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = EN_imgs.astype(np.float32)
        h5['landmark'] = EN_landmarks.astype(np.float32)

    # nose and mouth
    base = join(OUTPUT, '1_NM')
    createDir(base)
    output = join(base, fname)
    logger("generate %s" % output)
    with h5py.File(output, 'w') as h5:
        h5['data'] = NM_imgs.astype(np.float32)
        h5['landmark'] = NM_landmarks.astype(np.float32)
예제 #34
0
파일: example.py 프로젝트: gobelc/sdr-apt
        satellite = sat.NOAA_15()
    '''
    Display Raw Image (flipped if needed..)
    '''

    display = True
    if display == True:
        img = Image.fromarray(matrix)
        img.show()
        img.histogram()
        img.save(image_dir + prefix + '_raw_image.png')
    '''
    Sobel Filter
    '''
    if flip_condition:
        matrix_filtered = utils.flip(matrix)
    img_filtered = utils.sobel_filter(matrix_filtered)
    img_filtered.astype(np.uint8)

    display = True
    if display == True:
        img = Image.fromarray(img_filtered)
        if img.mode != 'RGB':
            img = img.convert('RGB')
        img.show()
        img.save(image_dir + prefix + '_sobel_filter.png')
    '''
    Normalize image with Telemetry Frame
    '''

    frame = "A"