Пример #1
0
def check_hat(args):
    """ Helper function that handles hat manipulations, like flip and scale """
    try:
        folder = get_imgs("crimmis_hats/")
        hat = Image.open(folder.get('0'))
        for arg in args:
            if arg.startswith('type='):
                value = arg.split('=')[1]
                hat = Image.open(folder.get(value))

        w_offset, h_offset = 150, 0
        hat_width, hat_height = 350, 300
        for arg in args:
            if arg == 'flip':
                hat = hat.transpose(Image.FLIP_LEFT_RIGHT)
                w_offset, h_offset = 0, 0
            if arg.startswith('scale='):
                value = float(arg.split('=')[1])
                hat_width, hat_height = int(hat.width * value), int(hat.height * value)

        hat = hat.resize((hat_width, hat_height))
    except:
        return None, None, None

    return hat, w_offset, h_offset
Пример #2
0
def train(n_clusters=N_CLUSTER):
    imgs = get_imgs("database/")
    sift = cv2.xfeatures2d.SIFT_create()

    desc_list = []
    for word, img_list in imgs.items():
        for img in img_list:
            kp, desc = sift.detectAndCompute(img, None)

            desc_list.extend(desc)

    kmeans = MiniBatchKMeans(n_clusters=n_clusters, n_init=1)
    kmeans.fit(desc_list)

    dump(kmeans, f'kmeans{n_clusters}.joblib')
Пример #3
0
dataset=FLAGS.dataset
img_size= (640,640) if dataset=='DRIVE' else (720,720) # (h,w)  [original img size => DRIVE : (584, 565), STARE : (605,700) ]
img_out_dir="{}/segmentation_results_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
model_out_dir="{}/model_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
auc_out_dir="{}/auc_{}_{}".format(FLAGS.dataset,FLAGS.discriminator,FLAGS.ratio_gan2seg)
train_dir="../data/{}/training/".format(dataset)
test_dir="../data/{}/test/".format(dataset)
if not os.path.isdir(img_out_dir):
    os.makedirs(img_out_dir)
if not os.path.isdir(model_out_dir):
    os.makedirs(model_out_dir)
if not os.path.isdir(auc_out_dir):
    os.makedirs(auc_out_dir)
 
# set training and validation dataset
train_imgs, train_vessels =utils.get_imgs(train_dir, augmentation=True, img_size=img_size, dataset=dataset)
train_vessels=np.expand_dims(train_vessels, axis=3)
n_all_imgs=train_imgs.shape[0]
n_train_imgs=int((1-val_ratio)*n_all_imgs)
train_indices=np.random.choice(n_all_imgs,n_train_imgs,replace=False)
train_batch_fetcher=utils.TrainBatchFetcher(train_imgs[train_indices,...], train_vessels[train_indices,...], batch_size)
val_imgs, val_vessels=train_imgs[np.delete(range(n_all_imgs),train_indices),...], train_vessels[np.delete(range(n_all_imgs),train_indices),...]
# set test dataset
test_imgs, test_vessels, test_masks=utils.get_imgs(test_dir, augmentation=False, img_size=img_size, dataset=dataset, mask=True)

# create networks
g = generator(img_size, n_filters_g) 
if FLAGS.discriminator=='pixel':
    d, d_out_shape = discriminator_pixel(img_size, n_filters_d,init_lr)
elif FLAGS.discriminator=='patch1':
    d, d_out_shape = discriminator_patch1(img_size, n_filters_d,init_lr)
Пример #4
0
    _, Y_train, _, _ = utils.load_data()
    X_train = np.load(hog_train_imgs_file)
    X_test = np.load(hog_test_imgs_file)
elif mode == 'train' or not os.path.exists(utils.train_imgs_file) or not os.path.exists(utils.train_labels_file) \
        or not os.path.exists(utils.test_imgs_file) or not os.path.exists(utils.test_labels_file)\
        or not os.path.exists(hog_train_imgs_file) or not os.path.exists(hog_test_imgs_file):
    print('Train BEGIN')
    utils.repartition_data(father_path=os.path.abspath(
        os.path.join(os.getcwd(), "..")),
                           test_rate=0)
    imgs_train, Y_train, _, _ = utils.load_data()
    X_train = utils.get_hog_features(imgs_train)
    # X_val = utils.get_hog_features(imgs_val)
    imgs_test = utils.get_imgs([test_imgs_path],
                               max_pool=True,
                               homomorphic=False,
                               file_type_list=['.bmp', '.png'],
                               equalize=False,
                               morphology=False)
    X_test = utils.get_hog_features(imgs_test)  # 获得HOG特征向量
    np.save(hog_train_imgs_file, X_train)
    np.save(hog_test_imgs_file, X_test)
else:
    raise Exception('Mode not defined')

if not os.path.exists('Q1/linear_svc.model') or mode == 'repartition':
    linear_svc = svm.LinearSVC(loss='hinge', max_iter=1000)
    linear_svc.fit(X_train, Y_train)
    pickle.dump(linear_svc, open('Q1/linear_svc.model', 'wb'))
linear_svc = pickle.load(open('Q1/linear_svc.model', 'rb'))
pred_test = linear_svc.predict(X_test)
files_name = os.listdir(test_imgs_path)
Пример #5
0
基本策略:先用max_pooling缩小图片,用UNET预测。然后将预测得到的图片做线性插值,恢复为原来的样子。
"""
import sys
import os
import numpy as np
import UNet
import utils
import cv2

mode = sys.argv[1]
PREDICT = 'predict'
TRAIN = 'train'
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))  # 形态学处理(膨胀、腐蚀等)所用的核

# 获取原始训练图像和掩码图像
imgs = utils.get_imgs(['init_imgs'], file_type_list=['.bmp', '.png'], homomorphic=True,
                      max_pool=False, morphology=True, bit_wise=True)
mask = utils.get_imgs(['mask_imgs'], file_type_list=['.bmp', '.png'], max_pool=False,
                      morphology=False, bit_wise=True)

# 预处理用于训练的输入图像和掩码图像并保存
if not os.path.exists('Q2'):
    mode = TRAIN
    os.mkdir('Q2')
if not os.path.exists('Q2/imgs'):
    os.mkdir('Q2/imgs')
if not os.path.exists('Q2/mask_imgs'):
    os.mkdir('Q2/mask_imgs')
for i in range(imgs.shape[0]):
    tmp = imgs[i].astype(np.uint8)
    tmp = utils.adjust_gamma(tmp, gamma=6)
    tmp = cv2.erode(tmp, kernel)
Пример #6
0
from joblib import dump, load
from utils import get_imgs
import cv2

kmeans = load("kmeans100000.joblib")

imgs = get_imgs("database/")

sift = cv2.xfeatures2d.SIFT_create()

desc_list = []
for word, img_list in imgs.items():
    for img in img_list:
        kp, desc = sift.detectAndCompute(img, None)

        desc_list.append(desc)

Пример #7
0
    def eval_model(self):
        device = self.device
        model = self.model.to(device)
        # model.load_state_dict(torch.load(self.path))
        # model.eval()

        dfcn_feats, dcnn_feats = [], []
        names = list()
        h5f1 = h5py.File('featsDCNN.h5', 'w')
        h5f2 = h5py.File('featsDFCN.h5', 'w')
        imgs = get_imgs('net/train')

        train_data = OneData(config=self.config,
                             data_dir=self.train_dir,
                             transform=train_transform(self.config))
        train_loader = DataLoader(dataset=train_data,
                                  batch_size=self.batch_size,
                                  shuffle=True,
                                  num_workers=self.config.num_workers)
        # print(imgs)
        for idx, (inputs, labels) in enumerate(train_loader):
            # name = os.path.split(inputs)[-1]
            # inputs = inputs[np.newaxis, :]
            inputs = inputs.to(device)
            # labels = labels.to(device)
            # print(labels)
            # print(inputs[0])

            output, dfcn, dcnn = model(inputs)
            # dfcn = np.array(dfcn)
            # dfcn = np.squeeze(dfcn)
            # dcnn = np.squeeze(dcnn)
            # print(dfcn.shape)
            # print(dcnn.shape)
            for it in dfcn:
                tmp = list()
                for i in it:
                    tmp.append(float(i))
                #
                # it = list(it)
                # it = np.squeeze(it)
                # print(it.shape)
                dfcn_feats.append(tmp)

            for it in dcnn:
                tmp = list()
                for i in it:
                    tmp.append(float(i))
                dcnn_feats.append(tmp)
            # return
            # dfcn_feats.append(dfcn)
            # dcnn_feats.append(dcnn)
            print("extracting feature from image No. %d , %d images in total" %
                  ((idx + 1), len(train_loader)))
            for name in labels:
                names.append(name)
            # names.append(name)
            # if idx == 9:
            #     break
        # print(dcnn_feats)
        # print(np.array(dcnn_feats).shape)

        feats = np.array(dcnn_feats)
        print(feats)
        print(feats.shape)
        # return
        h5f1.create_dataset('dataset_1', data=feats)
        feats = np.array(dfcn_feats)
        h5f2.create_dataset('dataset_1', data=feats)
        names = [name.encode() for name in names]
        h5f1.create_dataset('dataset_2', data=np.string_(names))
        h5f2.create_dataset('dataset_2', data=np.string_(names))
        h5f1.close()
        h5f2.close()