Exemplo n.º 1
0
def extractBrainPart(imgDir, n_divisions=3, x_part=0, y_part=0, z_part=0):
    imgPath = os.path.join(imgDir, "*")
    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "brainpart_" + str(n_divisions) + "_" + str(x_part) +
        "_" + str(y_part) + "_" + str(z_part) + "_" +
        imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        data = pickle.load(save)
        save.close()
        return data
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    data = []
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()
        # Resize to a <scale> proportion of original size
        imgData_resized = sp.ndimage.interpolation.zoom(
            imgData_original, scale)
        imgData_flipped = np.flip(imgData_resized, 0)

        data.append([similarity])
        printProgress(i + 1, n_samples)
    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(data, output)
    output.close()
    print "Done"
    return data
Exemplo n.º 2
0
def extractCompleteBrain(imgDir):
    imgPath = os.path.join(imgDir, "*")
    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "complete_" + imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        data = pickle.load(save)
        save.close()
        return data
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    data = []
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()
        single_brain = imgData.flatten().tolist()
        data.append(single_brain)
        printProgress(i + 1, n_samples)
    print "\n!!!!!NOT Storing the features !!!!"
    #output = open(outputFileName,"wb")
    #pickle.dump(data,output)
    #output.close()
    #print "Done"
    return data
Exemplo n.º 3
0
def extractColorPercentage(imgDir, upperDark, upperGray, firstColor,
                           secondColor):
    imgPath = os.path.join(imgDir, "*")

    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "2ColorPercentage_" + str(upperDark) + "_" +
        str(upperGray) + "_" + str(firstColor) + "_" + str(secondColor) + "_" +
        imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        zoneAverages = pickle.load(save)
        save.close()
        return zoneAverages

    # Fetch all directory listings of set_train and sort them on the image number
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    printProgress(0, n_samples)

    allPercentages = []
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()
        imgDataSlice = imgData[:, :, imgData.shape[2] / 2, 0]

        COUNTS = []
        voxelsCounted = 0.0
        blackCounted = 0
        grayCounted = 0
        whiteCounted = 0
        for j in range(0, imgData.shape[0]):
            for k in range(0, imgData.shape[1]):
                value = imgDataSlice[j][k]
                if value > 0:
                    voxelsCounted += 1
                    if value <= upperDark:
                        blackCounted += 1
                    if value <= upperGray and value > upperDark:
                        grayCounted += 1
                    if value > upperGray:
                        whiteCounted += 1

        if (voxelsCounted > 0):
            COUNTS.append(blackCounted)
            COUNTS.append(grayCounted)
            COUNTS.append(whiteCounted)

        allPercentages.append(COUNTS[firstColor] / COUNTS[secondColor])
        printProgress(i + 1, n_samples)

    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(allPercentages, output)
    output.close()
    print "Done"
    return allPercentages
Exemplo n.º 4
0
def output_components(path, client):
    i = 0
    l = 0

    offset = 3000

    directory = client.get_json(
        "query/components?q=SELECT component_name__v, checksum__v, component_type__v, LONGTEXT(mdl_definition__v), LONGTEXT(json_definition__v) FROM vault_component__v WHERE status__v='active__v' ORDER BY component_type__v LIMIT {0}"
        .format(offset))

    l = directory["responseDetails"]["size"]
    printProgress.printProgress(i,
                                l,
                                prefix='Progress:',
                                suffix='Complete',
                                barLength=50)

    csv = []
    Helpers.append_line(csv, 'Component Type,Name,Checksum')

    for component in directory["data"]:
        name = component["component_name__v"]
        type = component["component_type__v"]
        checksum = component["checksum__v"]
        mdl = component["mdl_definition__v"]
        json_def = component["json_definition__v"]

        Helpers.append_line(
            csv, '{0},{1},{2}'.format(type, type + "." + name, checksum))

        type_folder = (path + "/%s") % (type)
        if not os.path.exists(type_folder):
            os.makedirs(type_folder)

        #json_folder = (type_folder + "/json/")
        #if not os.path.exists(json_folder):
        #    os.makedirs(json_folder)

        mdl_folder = (type_folder)  # + "/mdl")
        if not os.path.exists(mdl_folder):
            os.makedirs(mdl_folder)

        #Helpers.dump_json_file(name, json_def, json_folder)

        with open(mdl_folder + "/" + name + ".txt", "w") as f:
            if (type == "Workflow"):
                Helpers.dump_json_file(name + ".json", json_def,
                                       mdl_folder + "/")
            else:
                f.write(mdl.encode('utf-8'))
            i += 1
            printProgress.printProgress(i,
                                        l,
                                        prefix='Progress:',
                                        suffix='Complete' + " - " + name,
                                        barLength=50)

    return csv
Exemplo n.º 5
0
def extractColoredZone(imgDir, minColor, maxColor, nPartitions=1):
    imgPath = os.path.join(imgDir, "*")

    allColoredZones = []

    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir,
        "coloredzones2d_" + str(nPartitions) + "_" + str(minColor) + "_" +
        str(maxColor) + "_" + imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        zoneAverages = pickle.load(save)
        save.close()
        return zoneAverages

    # Fetch all directory listings of set_train and sort them on the image number
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    printProgress(0, n_samples)
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()
        imgDataDisected = imgData[20:148, 35:163, 88, 0]

        colZones = np.asarray([[[0] * nPartitions] * nPartitions] *
                              nPartitions)
        # Size should be same for all dimensions, imgData should
        # have same dimensions for x, y, z all such that they can be
        # divided by nPartitions
        for x in range(imgDataDisected.shape[0]):
            for y in range(imgDataDisected.shape[1]):
                #for z in range(imgDataDisected.shape[2]):
                val = imgDataDisected[x][y]  #[z]
                partX = int((x * nPartitions) / imgDataDisected.shape[0])
                partY = int((y * nPartitions) / imgDataDisected.shape[1])
                #partZ = int((z*nPartitions)/imgDataDisected.shape[2])
                if val <= maxColor and val >= minColor:
                    colZones[partX][partY] += 1

        allColoredZones.append(colZones.flatten().tolist())
        printProgress(i + 1, n_samples)

    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(allColoredZones, output)
    output.close()
    print "Done"
    return allColoredZones
Exemplo n.º 6
0
def extractHistograms(imgDir, maxValue=4000, nBins=-1, nPartitions=1):
    if nBins == -1: nBins = maxValue

    # The number of different intensities per point of the histogram
    binSize = math.ceil((maxValue * 1.) / nBins)
    imgPath = os.path.join(imgDir, "*")

    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "histograms_" + str(nBins) + "-" + str(maxValue) + "_" +
        imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        histograms = pickle.load(save)
        save.close()
        return histograms

    # Fetch all directory listings of set_train and sort them on the image number
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    histograms = []
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    printProgress(0, n_samples)
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()

        # Count occurances of each intensity below the maxValue
        single_brain = np.array([[[[0] * nBins] * nPartitions] * nPartitions] *
                                nPartitions)
        for x in range(imgData.shape[0]):
            for y in range(imgData.shape[1]):
                for z in range(imgData.shape[2]):
                    val = imgData[x][y][z][0]
                    partX = int((x * nPartitions) / imgData.shape[0])
                    partY = int((y * nPartitions) / imgData.shape[1])
                    partZ = int((z * nPartitions) / imgData.shape[2])
                    if val < maxValue and val > 0:
                        c = int(val / binSize)
                        single_brain[partX][partY][partZ][c] += 1
        histograms.append(single_brain.flatten().tolist())
        printProgress(i + 1, n_samples)

    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(histograms, output)
    output.close()
    print "Done"

    return histograms
Exemplo n.º 7
0
def output_components(path, client, includeWorkflow):

    comps = VaultService.get_component_types(client)

    i = 0
    l = 0

    # build list of names first
    for component_json in comps:
        l += len(component_json["names"])

    printProgress.printProgress(i,
                                l,
                                prefix='Progress:',
                                suffix='Complete',
                                barLength=50)

    for component_json in comps:
        component_type = component_json["type"]
        type_folder = (path + "/%s") % (component_type)

        for component_name in component_json["names"]:

            if not os.path.exists(type_folder):
                os.makedirs(type_folder)

            name = component_type + "." + component_name
            if includeWorkflow and component_type == "Workflow":
                wf = VaultService.get_workflow(client, component_name)
                Helpers.dump_json_file(component_name, wf, type_folder + "/")
            else:
                with open(type_folder + "/" + component_name + ".mdl",
                          "w") as f:
                    mdl = VaultService.get_component(client, name)
                    f.write(mdl.encode('utf-8'))

            i += 1
            printProgress.printProgress(i,
                                        l,
                                        prefix='Progress:',
                                        suffix='Complete' + " - " + name,
                                        barLength=50)
Exemplo n.º 8
0
def extractFlipSim(imgDir, nPartitions=8, exponent=50):
    imgPath = os.path.join(imgDir, "*")

    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "flipsim_" + str(nPartitions) + "-" + str(exponent) +
        "_" + imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        data = pickle.load(save)
        save.close()
        return data

    # Fetch all directory listings of set_train and sort them on the image number
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    data = []
    printProgress(0, n_samples)
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = np.asarray(img.get_data()[:, :, :, 0])
        imgData_flipped = np.flip(imgData, 0)
        part_size_x = int(round(float(imgData.shape[0]) / nPartitions))
        part_size_y = int(round(float(imgData.shape[1]) / nPartitions))
        part_size_z = int(round(float(imgData.shape[2]) / nPartitions))
        partsSim = []
        for x in range(nPartitions):
            for y in range(nPartitions):
                for z in range(nPartitions):
                    x_start = x * part_size_x
                    if (x == nPartitions - 1): x_stop = imgData.shape[0]
                    else: x_stop = (x + 1) * part_size_x

                    y_start = y * part_size_y
                    if (y == nPartitions - 1): y_stop = imgData.shape[1]
                    else: y_stop = (y + 1) * part_size_y

                    z_start = z * part_size_z
                    if (z == nPartitions - 1): z_stop = imgData.shape[2]
                    else: z_stop = (z + 1) * part_size_z

                    imgPart = imgData[x_start:x_stop, y_start:y_stop,
                                      z_start:z_stop]
                    imgPart_flipped = imgData_flipped[x_start:x_stop,
                                                      y_start:y_stop,
                                                      z_start:z_stop]

                    #ssim_val = ssim(imgPart,imgPart_flipped)
                    mse_val = mse(imgPart, imgPart_flipped)
                    #similarity = [ssim_val**exponent,mse_val]
                    partsSim.append(mse_val)
        data.append(partsSim)
        printProgress(i + 1, n_samples)

    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(data, output)
    output.close()
    print "Done"

    return data
Exemplo n.º 9
0
def extractThreeColors(imgDir,
                       darkColor,
                       grayColor,
                       whiteColor,
                       nPartitions=1):
    imgPath = os.path.join(imgDir, "*")

    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "threeColors_" + str(nPartitions) + "_" + str(darkColor) +
        "_" + str(grayColor) + "_" + str(whiteColor) + "_" +
        imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        zoneAverages = pickle.load(save)
        save.close()
        return zoneAverages

    # Fetch all directory listings of set_train and sort them on the image number
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    printProgress(0, n_samples)

    allPercentages = []
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()
        imgDataDisected = imgData[:, :, :, 0]

        percentages = []
        voxelsCounted = 0.0
        blackCounted = 0
        grayCounted = 0
        whiteCounted = 0
        for j in range(0, imgData.shape[0]):
            for k in range(0, imgData.shape[1]):
                for l in range(0, imgData.shape[2]):
                    value = imgDataDisected[j][k][l]
                    if value > 0:
                        voxelsCounted += 1
                        if value <= darkColor:
                            blackCounted += 1
                        if value <= grayColor and value > darkColor:
                            grayCounted += 1
                        if value <= whiteColor and value > grayColor:
                            whiteCounted += 1

        if (voxelsCounted > 0):
            percentages.append(blackCounted / voxelsCounted)
            percentages.append(grayCounted / voxelsCounted)
            percentages.append(whiteCounted / voxelsCounted)

        allPercentages.append(percentages)
        printProgress(i + 1, n_samples)

    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(allPercentages, output)
    output.close()
    print "Done"
    return allPercentages
Exemplo n.º 10
0
def extractZoneAverages(imgDir, nPartitions=1):
    imgPath = os.path.join(imgDir, "*")

    allZoneAverages = []

    # This is the cache for the feature, used to make sure we do the heavy computations more often than neccesary
    outputFileName = os.path.join(
        featuresDir, "zoneavg_" + str(nPartitions) + "_" +
        imgDir.replace(os.sep, "-") + ".feature")
    if os.path.isfile(outputFileName):
        save = open(outputFileName, 'rb')
        zoneAverages = pickle.load(save)
        save.close()
        return zoneAverages

    # Fetch all directory listings of set_train and sort them on the image number
    allImageSrc = sorted(glob.glob(imgPath), key=extractImgNumber)
    n_samples = len(allImageSrc)
    print "Found " + str(n_samples) + " images!"
    print "Preparing the data"
    printProgress(0, n_samples)
    for i in range(0, n_samples):
        img = nib.load(allImageSrc[i])
        imgData = img.get_data()

        imgDataDisected = imgData[20:148, 35:163, 30:158, 0]
        # Size should be same for all dimensions, imgData should
        # have same dimensions for x, y, z all such that they can be
        # divided by nPartitions
        size = imgDataDisected.shape[0] / nPartitions
        '''
		zoneAverages = []
		for j in range(0,nPartitions):
			for k in range(0, nPartitions):
				for l in range(0, nPartitions):
					xStart = j*size
					yStart = k*size
					zStart = l*size

					totalSum = 0
					totalVoxels = size*size*size
					for x in range(xStart, xStart+size):
						for y in range(yStart, yStart+size):
							for z in range(zStart, zStart+size):
								totalSum += imgDataDisected[x][y][z]

					mean = totalSum/totalVoxels
					zoneAverages.append(mean)	
					#print "brain " +str(i)+" zone " + str(j) + ", " + str(k) + ", " + str(l) + " with mean " +str(mean)
		'''
        zoneAverages = np.array([[[0] * nPartitions] * nPartitions] *
                                nPartitions)
        totalVoxels = size * size * size
        for x in range(imgDataDisected.shape[0]):
            for y in range(imgDataDisected.shape[1]):
                for z in range(imgDataDisected.shape[2]):
                    val = imgDataDisected[x][y][z]
                    partX = int((x * nPartitions) / imgDataDisected.shape[0])
                    partY = int((y * nPartitions) / imgDataDisected.shape[1])
                    partZ = int((z * nPartitions) / imgDataDisected.shape[2])
                    if val > 0:
                        zoneAverages[partX][partY][partZ] += val
        for j in range(zoneAverages.shape[0]):
            for k in range(zoneAverages.shape[1]):
                for l in range(zoneAverages.shape[1]):
                    zoneAverages[j][k][l] = float(
                        zoneAverages[j][k][l]) / totalVoxels
        allZoneAverages.append(zoneAverages.flatten().tolist())
        printProgress(i + 1, n_samples)

    print "\nStoring the features in " + outputFileName
    output = open(outputFileName, "wb")
    pickle.dump(allZoneAverages, output)
    output.close()
    print "Done"

    return allZoneAverages
Exemplo n.º 11
0
def main():
    print("""
▓█████  ██▀███   ██▀███   ▒█████   ██▀███    ██████ 
▓█   ▀ ▓██ ▒ ██▒▓██ ▒ ██▒▒██▒  ██▒▓██ ▒ ██▒▒██    ▒ 
▒███   ▓██ ░▄█ ▒▓██ ░▄█ ▒▒██░  ██▒▓██ ░▄█ ▒░ ▓██▄   
▒▓█  ▄ ▒██▀▀█▄  ▒██▀▀█▄  ▒██   ██░▒██▀▀█▄    ▒   ██▒
░▒████▒░██▓ ▒██▒░██▓ ▒██▒░ ████▓▒░░██▓ ▒██▒▒██████▒▒
░░ ▒░ ░░ ▒▓ ░▒▓░░ ▒▓ ░▒▓░░ ▒░▒░▒░ ░ ▒▓ ░▒▓░▒ ▒▓▒ ▒ ░
 ░ ░  ░  ░▒ ░ ▒░  ░▒ ░ ▒░  ░ ▒ ▒░   ░▒ ░ ▒░░ ░▒  ░ ░
   ░     ░░   ░   ░░   ░ ░ ░ ░ ▒    ░░   ░ ░  ░  ░  
   ░  ░   ░        ░         ░ ░     ░           ░  
                                                    
    """)

    i = 0
    l = 0

    use_cache = True

    client = VaultService.get_client()
    instance_name = datetime.datetime.now()

    components = mdl.get_component_type_names(client, use_cache)
    l = len(components)
    printProgress.printProgress(i,
                                l,
                                prefix='Progress:',
                                suffix='Complete',
                                barLength=50)

    first = True
    for component_type in components:

        error_url = 'configuration/{0}/errors'.format(component_type)
        try:
            errors_resp = client.get_json(error_url)

            if first:

                create_markdown_file('common-errors', errors_resp, True)

                header = [
                    'Type', 'Code', 'Reason', 'Type', 'Developer message',
                    'End user message\n'
                ]
                Helpers.save_as_file('errors', ','.join(header),
                                     'output/errors/csv2', 'csv')

                #export_csv('errors', errors_resp, True)

                i += 1
                first = False

            create_markdown_file('{0}-errors'.format(component_type),
                                 errors_resp)
            #export_csv('errors', errors_resp)

            printProgress.printProgress(i,
                                        l,
                                        prefix='Progress:',
                                        suffix='Complete' + " - " +
                                        component_type,
                                        barLength=50)

        except ApiClient.ApiException:
            print('Failed to get errors for {0} from the API'.format(
                component_type))

        i += 1

    print('Done!')