Esempio n. 1
0
    def __init__(self, name, item_list, marked_email_list):
        self.name = name
        self.google_item_list = item_list
        self.marked_email_list = marked_email_list
        self.node_mln_list = []
        self.aff_word_list = []
        self.resource_dir = CONSTANT_PATH['resource']
        self.input_person_list_path = os.path.join(self.resource_dir, 'input_person_list.json')
        self.person_mln_dir = os.path.join(CONSTANT_PATH['mln_by_product_dir'], self.name.replace(' ', '_'))
        self.grounding_file_path = os.path.join(self.person_mln_dir, 'evidence.db')
        self.raw_item_file_path = os.path.join(CONSTANT_PATH['google_item_dir'], self.name + '.json')
        self.processed_item_file_path = os.path.join(self.person_mln_dir, 'google_item_list.json')
        self.valid_email_json_path = os.path.join(self.person_mln_dir, 'valid_email.json')

        create_dir_if_not_exist(self.person_mln_dir)
        self.get_aff_word_list()
        self.get_node_mln_list()
Esempio n. 2
0
    def __init__(self, name, item_list, marked_email_list):
        self.name = name
        self.google_item_list = item_list
        self.marked_email_list = marked_email_list
        self.node_mln_list = []
        self.aff_word_list = []
        self.resource_dir = CONSTANT_PATH['resource']
        self.input_person_list_path = os.path.join(self.resource_dir,
                                                   'input_person_list.json')
        self.person_mln_dir = os.path.join(CONSTANT_PATH['mln_by_product_dir'],
                                           self.name.replace(' ', '_'))
        self.grounding_file_path = os.path.join(self.person_mln_dir,
                                                'evidence.db')
        self.raw_item_file_path = os.path.join(
            CONSTANT_PATH['google_item_dir'], self.name + '.json')
        self.processed_item_file_path = os.path.join(self.person_mln_dir,
                                                     'google_item_list.json')
        self.valid_email_json_path = os.path.join(self.person_mln_dir,
                                                  'valid_email.json')

        create_dir_if_not_exist(self.person_mln_dir)
        self.get_aff_word_list()
        self.get_node_mln_list()
def create_patch(whole_slide_dir, patch_dir, patch_size):
    # Create dirs
    responder_dir = patch_dir + "/1st_manual"
    non_responder_dir = patch_dir
    # create_dir_if_not_exist(responder_dir)
    create_dir_if_not_exist(non_responder_dir)
    create_dir_if_not_exist("processed")

    # Iterate through files to split and group them
    image_files = os.listdir(whole_slide_dir)
    print(image_files)
    print(len(image_files), "slide images found")
    total = 0
    skipped = []
    for image_file in tqdm(image_files, desc="Splitting images"):
        if "DS_Store" not in image_file:
            image = Image.open(whole_slide_dir + "/" + image_file)
            width, height = image.size
            file_well_num = image_file[:image_file.rindex(".")]

            save_dir = responder_dir if "1st_manual" in image_file else non_responder_dir

            # Round to lowest multiple of target width and height.
            # Will lead to a loss of image data around the edges, but ensures split images are all the same size.
            rounded_width = patch_size * (width // patch_size)
            rounded_height = patch_size * (height // patch_size)

            # Split and save
            xs = range(0, rounded_width, patch_size)
            ys = range(0, rounded_height, patch_size)
            for i_x, x in enumerate(xs):
                for i_y, y in enumerate(ys):
                    box = (x, y, x + patch_size, y + patch_size)
                    cropped_data = image.crop(box)
                    # print(cropped_data)
                    cropped_image = Image.new('RGB', (patch_size, patch_size),
                                              255)
                    cropped_image.paste(cropped_data)
                    np_data = np.array(cropped_image)
                    # print(np_data.shape)
                    # if np.mean(np_data[:, :, :1]) == 0:
                    #     continue

                    # Check which dataset is being used
                    if 'train' in patch_dir:
                        processed_GT_file = os.listdir(
                            "test_patches/HRF128/train_GT")
                    elif 'valid' in patch_dir:
                        processed_GT_file = os.listdir(
                            "test_patches/HRF128/valid_GT")
                    else:
                        processed_GT_file = os.listdir(
                            "test_patches/HRF128/test_GT")

                    if "GT" in whole_slide_dir:
                        cropped_image.save(save_dir + "/" +
                                           file_well_num.zfill(5) + "_x" +
                                           str(i_x).zfill(2) + "_y" +
                                           str(i_y).zfill(2) + ".png")
                    else:
                        naming_string = file_well_num.zfill(5) + "_x" + str(
                            i_x).zfill(2) + "_y" + str(i_y).zfill(2) + ".png"
                        if naming_string not in processed_GT_file:
                            continue
                        cropped_image.save(save_dir + "/" +
                                           file_well_num.zfill(5) + "_x" +
                                           str(i_x).zfill(2) + "_y" +
                                           str(i_y).zfill(2) + ".png")
                    total += 1

    print('Created', total, 'split images')
    if skipped:
        print('Labels not found for', skipped, 'so they were skipped')
Esempio n. 4
0
import os
from util import create_dir_if_not_exist

CONSTANT_PATH = {
    'google_item_dir': os.path.join('..', 'resource', 'google_items'),
    'result': os.path.join('..', 'result'),
    'resource': os.path.join('..', 'resource'),
    'by_product_dir': os.path.join('..', 'by_product'),
    'mln_by_product_dir': os.path.join('..', 'by_product', 'mln_by_product'),
    'svm_by_product_dir': os.path.join('..', 'by_product', 'svm_by_product'),
    'fgm_by_product_dir': os.path.join('..', 'by_product', 'fgm_by_product'),
}

for key, value in CONSTANT_PATH.items():
    create_dir_if_not_exist(value)
Esempio n. 5
0
    filenames = [
        os.path.join(args.data_dir, f) for f in filenames if f.endswith('.png')
    ]

    # shuffling filneames
    random.seed(SEED)
    filenames.sort()
    random.shuffle(filenames)

    # splitting into training and validation set
    split = int(TRAIN_SPLIT * len(filenames))
    train_filenames = filenames[:split]
    val_filenames = filenames[split:]
    splited_sets = {'train': train_filenames, 'val': val_filenames}

    create_dir_if_not_exist(args.output_dir)

    for split in ['train', 'val']:
        """Saving to directories:
            X - original images
            Y - segmented images
            labeled - images with converted colors from segmented images to classes
        """
        output_dir_split = os.path.join(args.output_dir, '{}'.format(split))
        output_dir_labeled = os.path.join(output_dir_split, 'labeled')
        output_dir_X = os.path.join(output_dir_split, 'X')
        output_dir_Y = os.path.join(output_dir_split, 'Y')
        create_dir_if_not_exist(output_dir_split)
        create_dir_if_not_exist(output_dir_labeled)
        create_dir_if_not_exist(output_dir_X)
        create_dir_if_not_exist(output_dir_Y)
Esempio n. 6
0
from os.path import abspath
from util import get_ip_address
from util import create_dir_if_not_exist

reload(sys)
sys.setdefaultencoding('utf8')
sys.dont_write_bytecode = True

# server
SERVER_IP = '127.0.0.1'
# SERVER_IP = get_ip_address()
SERVER_PORT = 5000

# db
DB_DIR = join(abspath(os.path.dirname(__file__)), 'db')
create_dir_if_not_exist(DB_DIR)


class Config():

    # CSRF
    CSRF_ENABLED = True
    SECRET_KEY = 'CSSUNB'
    SECURITY_PASSWORD_SALT = 'CSSUNB23333'

    # DB settings
    APP_DB = 'ACE.db'
    TEST_DB = 'test'
    SQLALCHEMY_TRACK_MODIFICATIONS = True
    SQLALCHEMY_MIGRATE_REPO = join(DB_DIR, 'db_repo')
    SQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % (join(DB_DIR, APP_DB))
def create_patch(image_path,
                 gt_path,
                 patch_dir,
                 patch_size,
                 patch_per_image,
                 inside=True):
    # Create dirs
    gt_dir = patch_dir + "_GT"
    image_dir = patch_dir
    create_dir_if_not_exist(gt_dir)
    create_dir_if_not_exist(image_dir)
    create_dir_if_not_exist("random")

    # Iterate through files to split and group them
    image_files = os.listdir(image_path)
    print(len(image_files), "slide images found")
    iter_tot = 0
    for image_file in tqdm(image_files, desc="Splitting images"):
        if "DS_Store" not in image_file:
            image = Image.open(image_path + "/" + image_file)
            image_np = np.asarray(image)
            # print(image_np.shape)

            gt = Image.open(gt_path + "/" + image_file[:-3] + 'png')
            gt_np = np.asarray(gt)
            # print(gt_np.shape)
            gt_np = np.reshape(gt_np, (gt_np.shape[0], gt_np.shape[1], 1))
            # print(gt_np.shape)

            width, height = image.size

            save_dir_image = image_dir
            save_dir_gt = gt_dir

            # Round to lowest multiple of target width and height.
            # Will lead to a loss of image data around the edges, but ensures split images are all the same size.
            rounded_width = patch_size * (width // patch_size)
            rounded_height = patch_size * (height // patch_size)
            # TODO: Added from https://github.com/QTIM-Lab/retinaunet/blob/master/lib/extract_patches.py

            k = 0
            patches = []
            patches_gt = []
            while k < patch_per_image:
                x_center = randint(0 + int(patch_size / 2),
                                   width - int(patch_size / 2))
                # print "x_center " +str(x_center)
                y_center = randint(0 + int(patch_size / 2),
                                   height - int(patch_size / 2))
                # print "y_center " +str(y_center)
                # check whether the patch is fully contained in the FOV
                # if inside == True:
                #     if is_patch_inside_FOV(x_center, y_center, img_w, img_h, patch_h) == False:
                #         continue
                patch = image_np[x_center - int(patch_size / 2):x_center +
                                 int(patch_size / 2),
                                 y_center - int(patch_size / 2):y_center +
                                 int(patch_size / 2), :]
                patch_mask = gt_np[x_center - int(patch_size / 2):x_center +
                                   int(patch_size / 2),
                                   y_center - int(patch_size / 2):y_center +
                                   int(patch_size / 2), :]
                patches.append(patch)
                patches_gt.append(patch_mask)

                box = (x_center - int(patch_size / 2),
                       y_center - int(patch_size / 2),
                       x_center + int(patch_size / 2),
                       y_center + int(patch_size / 2))
                cropped_data = image.crop(box)
                cropped_data_gt = gt.crop(box)

                cropped_image = Image.new('RGB', (patch_size, patch_size), 255)
                cropped_image.paste(cropped_data)

                cropped_image_gt = Image.new('RGB', (patch_size, patch_size),
                                             255)
                cropped_image_gt.paste(cropped_data_gt)

                # if inside:
                #     if is_patch_inside_FOV(x_center, y_center, width, height, patch_size) == False:
                #         continue

                # if np.mean(np.asarray(cropped_image_gt)) == 0:
                #     continue
                # else:
                # print(np.mean(np.asarray(cropped_image_gt)[:, :, :1]))

                iter_tot += 1  # total
                k += 1  # per full_img

                cropped_image.save(save_dir_image + "/" +
                                   str(iter_tot).zfill(5) + ".png")

                cropped_image_gt.save(save_dir_gt + "/" +
                                      str(iter_tot).zfill(5) + ".png")

            # print(patches)

            # return patches  #, patches_masks
    print('Created', iter_tot, 'split images')
Esempio n. 8
0
 def __init__(self, tot_data_num):
     self.tot_data_num = tot_data_num
     self.root_path = '../' + self.svm_name
     self.data_root_path = '../' + self.svm_name + '/email/'
     self.result_root_path = '../' + self.svm_name + '/result/'
     self.train_dir_path = self.data_root_path + 'train/'
     self.test_dir_path = self.data_root_path + 'test/'
     self.model_dir_path = self.data_root_path + 'model/'
     self.pred_dir_path = self.data_root_path + 'prediction/'
     self.result_test_dir_path = self.result_root_path + 'test/'
     self.result_compare_dir_path = self.result_root_path + 'compare/'
     create_dir_if_not_exist(self.root_path)
     create_dir_if_not_exist(self.data_root_path)
     create_dir_if_not_exist(self.result_root_path)
     create_dir_if_not_exist(self.test_dir_path)
     create_dir_if_not_exist(self.train_dir_path)
     create_dir_if_not_exist(self.model_dir_path)
     create_dir_if_not_exist(self.pred_dir_path)
     create_dir_if_not_exist(self.result_test_dir_path)
     create_dir_if_not_exist(self.result_compare_dir_path)
Esempio n. 9
0
        y.unsqueeze_(0)
        X = Variable(X.to(device))

        outputs = model(X)
        _, preds = torch.max(outputs.data, 1)
        fname = os.path.basename(os.path.normpath(path))
        preds, y = preds.cpu().numpy(), y.numpy()

        image = plot_fast(preds, y)
        imageio.imwrite('preds/' + fname, image.astype(np.uint8))


def plot_validation(model):
    """Plots predictions for whole validation set"""
    test_loader = data_loader.fetch_dataloader('data/val', batch_size=8)
    with torch.no_grad():
        for images, y, _, fname in tqdm(test_loader):
            images = images.to(device)
            outputs = model(images)
            _, preds = torch.max(outputs.data, 1)
            preds, y = preds.cpu().numpy(), y.numpy()
            for b in range(preds.shape[0]):
                image = plot_fast(preds[b], y[b])
                imageio.imwrite('preds/' + fname[b], image.astype(np.uint8))


if __name__ == '__main__':
    model = load_model(device)
    create_dir_if_not_exist('preds')
    plot_validation(model)
Esempio n. 10
0
from os.path import abspath
from util import get_ip_address
from util import create_dir_if_not_exist

reload(sys)
sys.setdefaultencoding('utf8')
sys.dont_write_bytecode = True

# server
SERVER_IP = '127.0.0.1'
# SERVER_IP = get_ip_address()
SERVER_PORT = 5000

# db
DB_DIR = join(abspath(os.path.dirname(__file__)), 'db')
create_dir_if_not_exist(DB_DIR)


class Config():

    # CSRF
    CSRF_ENABLED = True
    SECRET_KEY = 'CSSUNB'
    SECURITY_PASSWORD_SALT = 'CSSUNB23333'

    # DB settings
    APP_DB = 'ACE.db'
    TEST_DB = 'test'
    SQLALCHEMY_TRACK_MODIFICATIONS = True
    SQLALCHEMY_MIGRATE_REPO = join(DB_DIR, 'db_repo')
    SQLALCHEMY_DATABASE_URI = 'sqlite:///%s' % (join(DB_DIR, APP_DB))