コード例 #1
0
if len(sys.argv) <= 2:
  sys.exit("Specify the patient to process, e.g. 'LIDC-IDRI-0001'.")
patId = sys.argv[1]
imageType = sys.argv[2] # bin or orig

# If pickled dataset joining X & y values doesn't exist, then create it; else, use it.
pickle_file_name = "Evans-MacBook-Pro.local-8x8_edge_patches-"+imageType+"-"+patId+".pickle"
if os.path.isfile(pickle_file_name):
  input_data_LIDC.esprint("Unpickling: " + pickle_file_name)
  with open(pickle_file_name, "rb") as pickle_file:
    dataset_input = pickle.load(pickle_file)
else:
  dataset_input = input_data_LIDC.read_data_sets(
    '../../../LIDC_Complete_20141106/LIDC-IDRI-edge_patches/'+patId,
    (os.sep + imageType + os.sep),
    '*.tiff',
    '../../../LIDC_Complete_20141106/Extracts/master_join4.csv',
    '../../../LIDC_Complete_20141106/Extracts/DICOM_metadata_extracts/',
    '*.csv')
  input_data_LIDC.esprint("Pickling: " + pickle_file_name)
  with open(pickle_file_name, "wb") as pickle_file:
    pickle.dump(dataset_input, pickle_file)
esprint("Done pickling.")



# Randomize the image & label set in-place, taking care to maintain array correspondance.
# First, re-merge the training, validation, and test sets into a single set.
train_images, train_labels = dataset_input[0]
# validation_images, validation_labels = dataset_input[1]
test_images, test_labels = dataset_input[1]
コード例 #2
0
# y_len = 5



### Read LIDC images and labels ###
# Memoization: If we previously joined images to malignancy values and saved the results to a pickle file, then load that pickle file.
pickle_file_name = cfg["pickle_file_name"]
if os.path.isfile(pickle_file_name):
  input_data_LIDC.esprint("Unpickling: " + pickle_file_name)
  with open(pickle_file_name, "rb") as pickle_file:
    dataset_input = pickle.load(pickle_file)
# Else, we haven't computed join, so let's compute it and create the pickle file. This will take a while and use all your CPU cores.
else:
  dataset_input = input_data_LIDC.read_data_sets(
    cfg["images_dir_path"],
    cfg["images_file_glob"],
    cfg["master_join4_path"],
    cfg["DICOM_metadata_extracts_dir_path"],
    cfg["DICOM_metadata_extracts_file_glob"])
  input_data_LIDC.esprint("Pickling: " + pickle_file_name)
  with open(pickle_file_name, "wb") as pickle_file:
    pickle.dump(dataset_input, pickle_file)
# Specify the input characteristics for samples (X) and labels (y).
img_px_len_x = cfg["image_len_x"]
img_px_len_y = img_px_len_x
X_len = img_px_len_x * img_px_len_y
y_len = cfg["y_len"]



# Randomize the image & label set in-place, taking care to maintain array correspondance.
# First, re-merge the training, validation, and test sets into a single set.
コード例 #3
0
if len(sys.argv) <= 2:
    sys.exit("Specify the patient to process, e.g. 'LIDC-IDRI-0001'.")
patId = sys.argv[1]
imageType = sys.argv[2]  # bin or orig

# If pickled dataset joining X & y values doesn't exist, then create it; else, use it.
pickle_file_name = "Evans-MacBook-Pro.local-8x8_edge_patches-" + imageType + "-" + patId + ".pickle"
if os.path.isfile(pickle_file_name):
    input_data_LIDC.esprint("Unpickling: " + pickle_file_name)
    with open(pickle_file_name, "rb") as pickle_file:
        dataset_input = pickle.load(pickle_file)
else:
    dataset_input = input_data_LIDC.read_data_sets(
        "../../../LIDC_Complete_20141106/LIDC-IDRI-edge_patches/" + patId,
        (os.sep + imageType + os.sep),
        "*.tiff",
        "../../../LIDC_Complete_20141106/Extracts/master_join4.csv",
        "../../../LIDC_Complete_20141106/Extracts/DICOM_metadata_extracts/",
        "*.csv",
    )
    input_data_LIDC.esprint("Pickling: " + pickle_file_name)
    with open(pickle_file_name, "wb") as pickle_file:
        pickle.dump(dataset_input, pickle_file)
esprint("Done pickling.")


# Randomize the image & label set in-place, taking care to maintain array correspondance.
# First, re-merge the training, validation, and test sets into a single set.
train_images, train_labels = dataset_input[0]
# validation_images, validation_labels = dataset_input[1]
test_images, test_labels = dataset_input[1]
コード例 #4
0
# img_px_len_x = 236
# img_px_len_y = img_px_len_x
# X_len = img_px_len_x * img_px_len_y
# y_len = 5

### Read LIDC images and labels ###
# Memoization: If we previously joined images to malignancy values and saved the results to a pickle file, then load that pickle file.
pickle_file_name = cfg["pickle_file_name"]
if os.path.isfile(pickle_file_name):
    input_data_LIDC.esprint("Unpickling: " + pickle_file_name)
    with open(pickle_file_name, "rb") as pickle_file:
        dataset_input = pickle.load(pickle_file)
# Else, we haven't computed join, so let's compute it and create the pickle file. This will take a while and use all your CPU cores.
else:
    dataset_input = input_data_LIDC.read_data_sets(
        cfg["images_dir_path"], cfg["images_file_glob"],
        cfg["master_join4_path"], cfg["DICOM_metadata_extracts_dir_path"],
        cfg["DICOM_metadata_extracts_file_glob"])
    input_data_LIDC.esprint("Pickling: " + pickle_file_name)
    with open(pickle_file_name, "wb") as pickle_file:
        pickle.dump(dataset_input, pickle_file)
# Specify the input characteristics for samples (X) and labels (y).
img_px_len_x = cfg["image_len_x"]
img_px_len_y = img_px_len_x
X_len = img_px_len_x * img_px_len_y
y_len = cfg["y_len"]

# Randomize the image & label set in-place, taking care to maintain array correspondance.
# First, re-merge the training, validation, and test sets into a single set.
train_images, train_labels = dataset_input[0]
validation_images, validation_labels = dataset_input[1]
test_images, test_labels = dataset_input[2]