Ejemplo n.º 1
0
    def LoadData(self, updatefunction=None):
        'Called when first starting the program, or when preference change sets dirtyDicts'

        config = self.config
        if config['charset']:
            charset = config['charset']
        else:
            charset = 'simplified'

        self.dicts = []

        for dictname in config['dictionaries']:
            self.addMessage("Loading dictionary %s ..." % dictname)
            dictFile = os.path.join(config.appDir, 'dict', dictname)
            dict = segmenter.Dictionary(dictFile,
                                        format='cedict',
                                        verbose=True,
                                        updatefunction=updatefunction)

            if dict.messages != None:
                for elem in dict.messages:
                    self.addMessage(elem)
                # add a blank line

            self.addMessage("Loaded dictionary %s, %d words" %
                            (dictname, dict.getWordCount()))
            self.dicts.append(dict)

        self.seg = segmenter.Segmenter(charset, self.dicts, self.stats)
        self.addMessage("")
Ejemplo n.º 2
0
def main(tds, vds):

    # setup experiment logging to comet.ml
    if expConfig.LOG_COMETML:
        hyper_params = {
            "experimentName": expConfig.EXPERIMENT_NAME,
            "epochs": expConfig.EPOCHS,
            "batchSize": expConfig.BATCH_SIZE,
            "channels": expConfig.CHANNELS,
            "virualBatchsize": expConfig.VIRTUAL_BATCHSIZE
        }
        expConfig.experiment.log_parameters(hyper_params)
        expConfig.experiment.add_tags(
            [expConfig.EXPERIMENT_NAME, "ID{}".format(expConfig.id)])
        if hasattr(expConfig, "EXPERIMENT_TAGS"):
            expConfig.experiment.add_tags(expConfig.EXPERIMENT_TAGS)
        print(bcolors.OKGREEN + "Logging to comet.ml" + bcolors.ENDC)
    else:
        print(bcolors.WARNING + "Not logging to comet.ml" + bcolors.ENDC)

    # log parameter count
    if expConfig.LOG_PARAMCOUNT:
        paramCount = sum(p.numel() for p in expConfig.net.parameters()
                         if p.requires_grad)
        print("Parameters: {:,}".format(paramCount).replace(",", "'"))

    #load data
    randomCrop = expConfig.RANDOM_CROP if hasattr(expConfig,
                                                  "RANDOM_CROP") else None
    trainloader = torch.utils.data.DataLoader(
        tds,
        batch_size=expConfig.BATCH_SIZE,
        shuffle=True,
        pin_memory=True,
        num_workers=expConfig.DATASET_WORKERS)

    valloader = torch.utils.data.DataLoader(
        vds,
        batch_size=1,
        shuffle=False,
        pin_memory=True,
        num_workers=expConfig.DATASET_WORKERS)

    challengeValloader = torch.utils.data.DataLoader(
        vds,
        batch_size=1,
        shuffle=False,
        pin_memory=True,
        num_workers=expConfig.DATASET_WORKERS)

    seg = segmenter.Segmenter(expConfig, trainloader, valloader,
                              challengeValloader)
    if hasattr(expConfig, "VALIDATE_ALL") and expConfig.VALIDATE_ALL:
        seg.validateAllCheckpoints()
    elif hasattr(expConfig, "PREDICT") and expConfig.PREDICT:
        seg.makePredictions()
    else:
        seg.train()
Ejemplo n.º 3
0
def main():
    '''Get data as a string from read_in() and write out the segments'''
    line = read_in()

    s = seg.Segmenter()

    segments = get_segments(line, s)

    for s in flatten(segments):
        print("%s%s" % (s[0].upper(), s[1:]) + '.')
Ejemplo n.º 4
0
def main(args):
    expConfig=exps[args.exp]
    # setup experiment logging to comet.ml
    if expConfig.LOG_COMETML:
        hyper_params = {"experimentName": expConfig.EXPERIMENT_NAME,
                        "epochs": expConfig.EPOCHS,
                        "batchSize": expConfig.BATCH_SIZE,
                        "channels": expConfig.CHANNELS,
                        "virualBatchsize": expConfig.VIRTUAL_BATCHSIZE}
        expConfig.experiment.log_(hyper_params)
        expConfig.experiment.add_tags([expConfig.EXPERIMENT_NAME, "ID{}".format(expConfig.id)])
        if hasattr(expConfig, "EXPERIMENT_TAGS"): expConfig.experiment.add_tags(expConfig.EXPERIMENT_TAGS)
        print(bcolors.OKGREEN + "Logging to comet.ml" + bcolors.ENDC)
    else:
        print(bcolors.WARNING + "Not logging to comet.ml" + bcolors.ENDC)

    # log parameter count
    if expConfig.LOG_PARAMCOUNT:
        paramCount = sum(p.numel() for p in expConfig.net.parameters() if p.requires_grad)
        print("Parameters: {:,}".format(paramCount).replace(",", "'"))

    #load data
    randomCrop = None
    trainset = bratsDataset.BratsDataset(systemsetup.BRATS_PATH, expConfig, mode="train", randomCrop=randomCrop)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=expConfig.BATCH_SIZE, shuffle=True, pin_memory=False)

    valset = bratsDataset.BratsDataset(systemsetup.BRATS_PATH, expConfig, mode="validation")
    valloader = torch.utils.data.DataLoader(valset, batch_size=1, shuffle=False, pin_memory=False)

    challengeValset = bratsDataset.BratsDataset(systemsetup.BRATS_VAL_PATH, expConfig,hasMasks=False, mode="validation", returnOffsets=True)
    challengeValloader = torch.utils.data.DataLoader(challengeValset, batch_size=1, shuffle=False, pin_memory=True)

    seg = segmenter.Segmenter(expConfig, trainloader, valloader, challengeValloader)#,trainvalloader)
    if hasattr(expConfig, "VALIDATE_ALL") and expConfig.VALIDATE_ALL:
        seg.validateAllCheckpoints()
    elif hasattr(expConfig, "PREDICT") and expConfig.PREDICT:
        seg.makePredictions()
    else:
        seg.train()
Ejemplo n.º 5
0
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    processedimage = cv2.adaptiveThreshold(gray, 255,
                                           cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                           cv2.THRESH_BINARY, 11, 2)
    processedimage = cv2.medianBlur(processedimage, 3)
    return processedimage


def deskew_image(im, pim):
    cornerdetector = cd.CornerDetector(pim)
    corners = cornerdetector.detect_corners()
    for i in range(0, len(corners)):
        corner = corners[i]
        nextcorner = corners[(i + 1) % len(corners)]
        cv2.circle(im, (corner[0], corner[1]), 25, (210, 210, 210), -1)
        cv2.circle(im, (corner[0], corner[1]), 27, (130, 210, 210), 4)
        cv2.line(im, tuple(corner), tuple(nextcorner), (130, 210, 210), 5)
    plt.imshow(im, cmap='gray')
    plt.show()
    deskewer = deskew.Deskewer(im, corners, 1.414)
    transformedimage = deskewer.deskew()
    return transformedimage


img = cv2.cvtColor(cv2.imread('form5.jpg'), cv2.COLOR_BGR2RGB)
im = preprocess_image(img)
img = deskew_image(img, im)
pim = preprocess_image(img)
segm = seg.Segmenter(img, pim)
segm.display_segments()
Ejemplo n.º 6
0
def main():

    #  setup experiment logging to comet.ml
    if expConfig.LOG_COMETML:
        hyper_params = {
            "experimentName": expConfig.EXPERIMENT_NAME,
            "epochs": expConfig.EPOCHS,
            "batchSize": expConfig.BATCH_SIZE,
            "channels": expConfig.CHANNELS,
            "virualBatchsize": expConfig.VIRTUAL_BATCHSIZE
        }
        expConfig.experiment.log_parameters(hyper_params)
        expConfig.experiment.add_tags(
            [expConfig.EXPERIMENT_NAME, "ID{}".format(expConfig.id)])
        if hasattr(expConfig, "EXPERIMENT_TAGS"):
            expConfig.experiment.add_tags(expConfig.EXPERIMENT_TAGS)
        print(bcolors.OKGREEN + "Logging to comet.ml" + bcolors.ENDC)
    else:
        print(bcolors.WARNING + "Not logging to comet.ml" + bcolors.ENDC)

    # log parameter count
    if expConfig.LOG_PARAMCOUNT:
        paramCount = sum(p.numel() for p in expConfig.net.parameters()
                         if p.requires_grad)
        print("Parameters: {:,}".format(paramCount).replace(",", "'"))

    # load data
    if expConfig.AVERAGE_DATA:
        average_data, _, _ = utils.load_nii(
            "/home/liujing/data/MICCAI_BraTS/2019/training/MixupData.nii.gz")
    else:
        average_data = None
    randomCrop = expConfig.RANDOM_CROP if hasattr(expConfig,
                                                  "RANDOM_CROP") else None
    trainset = bratsDataset.BratsDataset(systemsetup.BRATS_PATH,
                                         expConfig,
                                         mode="train",
                                         randomCrop=randomCrop,
                                         average_data=average_data)
    trainloader = torch.utils.data.DataLoader(
        trainset,
        batch_size=expConfig.BATCH_SIZE,
        shuffle=True,
        pin_memory=True,
        num_workers=expConfig.DATASET_WORKERS)

    valset = bratsDataset.BratsDataset(systemsetup.BRATS_PATH,
                                       expConfig,
                                       mode="validation",
                                       average_data=average_data)
    valloader = torch.utils.data.DataLoader(
        valset,
        batch_size=1,
        shuffle=False,
        pin_memory=True,
        num_workers=expConfig.DATASET_WORKERS)

    challengeValset = bratsDataset.BratsDataset(systemsetup.BRATS_VAL_PATH,
                                                expConfig,
                                                mode="validation",
                                                hasMasks=False,
                                                returnOffsets=True,
                                                average_data=average_data)
    challengeValloader = torch.utils.data.DataLoader(
        challengeValset,
        batch_size=1,
        shuffle=False,
        pin_memory=True,
        num_workers=expConfig.DATASET_WORKERS)

    seg = segmenter.Segmenter(expConfig, trainloader, valloader,
                              challengeValloader, trainset)
    if hasattr(expConfig, "VALIDATE_ALL") and expConfig.VALIDATE_ALL:
        seg.validateAllCheckpoints()
    elif hasattr(expConfig, "PREDICT") and expConfig.PREDICT:
        seg.makePredictions()
    else:
        seg.train(is_mixup=False)