def load_process_digits(pathlist):

    black_white = []
    black_white_small = []
    optimal_lw = []

    labels = np.array([])

    for p in pathlist:
        digits_databw, labels_data = utils.load_image_data(p,
                                                           side=200,
                                                           padding=40,
                                                           bw=True)
        digits_databw_small, _ = utils.load_image_data(p,
                                                       side=20,
                                                       padding=4,
                                                       bw=True)

        digits_data, _ = utils.load_image_data(p,
                                               side=200,
                                               padding=40,
                                               bw=False)
        digits_data = change_thickness_individual(digits_data, 15)

        labels = np.concatenate((labels, labels_data))
        black_white.append(digits_databw)
        black_white_small.append(digits_databw_small)
        optimal_lw.append(digits_data)

    black_white = np.concatenate(black_white, axis=0)
    black_white_small = np.concatenate(black_white_small, axis=0)
    optimal_lw = np.concatenate(optimal_lw, axis=0)
    return black_white, black_white_small, optimal_lw, labels
def load_process_digits(pathlist):

    labels = np.array([])
    combined_data_dict = {}
    for t in range(min_thickness, max_thickness + 1):
        combined_data_dict[t] = []

    dataset_index = 0
    t = time.time()
    for p in pathlist:
        dataset_index += 1
        digits_data, labels_data = utils.load_image_data(p,
                                                         side=200,
                                                         padding=40)
        labels = np.concatenate((labels, labels_data))
        #digits_data = normalize_digit_thickness(digits_data)
        #digits_data = change_thickness(digits_data,min_thickness)
        tau_data_dict = build_thickness_data(digits_data, min_thickness,
                                             max_thickness)
        processed_digit_copy = copy.deepcopy(tau_data_dict)
        processed_digit_copy["labels"] = labels_data
        filename = 'Dataset-%i-save' % dataset_index
        utils.save_processed_data(processed_digit_copy, filename)

        for k in tau_data_dict.keys():
            d = tau_data_dict[k]
            combined_data_dict[k].append(d)
        print(labels.shape)

    for t in combined_data_dict.keys():
        combined_data_dict[t] = np.concatenate(combined_data_dict[t], axis=0)
    elapsed = time.time() - t
    print("This ran for : %s" % (datetime.timedelta(seconds=elapsed)))
    return combined_data_dict, labels
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description='model')
    parser.add_argument('--input_image_paths',
                        dest='input_image_paths',
                        default='inputs_128_128_1',
                        nargs='+',
                        help='input image paths, separate by space')
    parser.add_argument('--max_input_count',
                        dest='max_input_count',
                        type=int,
                        default=10000,
                        help='max input image count for train')
    parser.add_argument('--model_parameter_path',
                        dest='model_parameter_path',
                        default='model_parameter')
    parser.add_argument('--dump_detail',
                        dest='dump_detail',
                        default=False,
                        action='store_true')

    args = parser.parse_args()

    train_x, train_y, test_x, test_y = load_image_data(args.input_image_paths,
                                                       args.max_input_count)
    model = create_model(0.0001)
    load_latest_model_parameter(model, args.model_parameter_path)
    test_model(model, train_x, train_y, args.dump_detail)
    test_model(model, test_x, test_y, args.dump_detail)
Esempio n. 4
0
def upload_study_me(file_path, host, port):
    file_dict = []
    headers = {'Content-Type': 'multipart/related; '}
    request_json = {
        'request': 'post',
        'route': '/',
        'inference_command': 'get-bounding-box-2d'
    }

    images = load_image_data(file_path)
    images = sort_images(images)

    width = 0
    height = 0
    count = 0
    for image in images:
        try:
            dcm_file = pydicom.dcmread(image.path)
            if width == 0 or height == 0:
                width = dcm_file.Columns
                height = dcm_file.Rows
            count += 1
            field = str(count)
            fo = open(image.path, 'rb').read()
            filename = os.path.basename(os.path.normpath(image.path))
            file_dict.append((field, (filename, fo, 'application/dicom')))
        except:
            print('File {} is not a DICOM file'.format(image.path))
            continue

    print('Sending {} files...'.format(count))
    request_json['depth'] = count
    request_json['height'] = height
    request_json['width'] = width

    file_dict.insert(
        0,
        ('request_json',
         ('request', json.dumps(request_json).encode('utf-8'), 'text/json')))

    me = MultipartEncoder(fields=file_dict)
    boundary = me.content_type.split('boundary=')[1]
    headers['Content-Type'] = headers['Content-Type'] + 'boundary="{}"'.format(
        boundary)

    r = requests.post('http://' + host + ':' + port + '/',
                      data=me,
                      headers=headers)

    if r.status_code != 200:
        print("Got error status code ", r.status_code)
        exit(1)

    multipart_data = decoder.MultipartDecoder.from_response(r)

    json_response = json.loads(multipart_data.parts[0].text)
    print("JSON response:", json_response)
Esempio n. 5
0
def _get_images_and_masks(dicom_images, inference_results):
    if isinstance(dicom_images, str):
        images = load_image_data(dicom_images)
        images = sort_images(images)
    else:
        images = dicom_images

    if isinstance(inference_results, str):
        masks = [np.fromfile(inference_results, dtype=np.uint8)]
    else:
        masks = inference_results
    return (images, masks)
Esempio n. 6
0
def main():
    parser = argparse.ArgumentParser(description='model')
    parser.add_argument('--input_image_paths',
                        dest='input_image_paths',
                        default='inputs_128_128_1',
                        nargs='+',
                        help='input image paths, separate by space')
    parser.add_argument('--model_parameter_path',
                        dest='model_parameter_path',
                        default='model_parameter')
    parser.add_argument('--max_input_count',
                        dest='max_input_count',
                        type=int,
                        default=10000,
                        help='max input image count for train')
    parser.add_argument('--epoch_num',
                        dest='epoch_num',
                        type=int,
                        default=20,
                        help='epoch_num')
    parser.add_argument('--learning_rate',
                        dest='learning_rate',
                        type=float,
                        default=0.0001,
                        help='learning rate')

    args = parser.parse_args()
    train_x, train_y, test_x, test_y = load_image_data(args.input_image_paths,
                                                       args.max_input_count)

    model = create_model(args.learning_rate)
    latest_model_index = load_latest_model_parameter(model,
                                                     args.model_parameter_path)
    while True:
        train_model(model, args.epoch_num, train_x, train_y, test_x, test_y)
        latest_model_index += 1
        save_model_parameter(model, args.model_parameter_path,
                             latest_model_index)
        time.sleep(3)
Esempio n. 7
0
def upload_study_me(file_path, is_segmentation_model, host, port):
    file_dict = []
    headers = {'Content-Type': 'multipart/related; '}
    request_json = {
        'request':
        'post',
        'route':
        '/',
        'inference_command':
        'get-probability-mask'
        if is_segmentation_model else 'get-bounding-box-2d'
    }

    images = load_image_data(file_path)
    # images = sort_images(images)

    width = 0
    height = 0
    count = 0
    for image in images:
        try:
            dcm_file = pydicom.dcmread(image.path)
            if width == 0 or height == 0:
                width = dcm_file.Columns
                height = dcm_file.Rows
            count += 1
            field = str(count)
            fo = open(image.path, 'rb').read()
            filename = os.path.basename(os.path.normpath(image.path))
            file_dict.append((field, (filename, fo, 'application/dicom')))
        except:
            print('File {} is not a DICOM file'.format(image.path))
            continue

    print('Sending {} files...'.format(count))
    request_json['depth'] = count
    request_json['height'] = height
    request_json['width'] = width

    file_dict.insert(
        0,
        ('request_json',
         ('request', json.dumps(request_json).encode('utf-8'), 'text/json')))

    me = MultipartEncoder(fields=file_dict)
    boundary = me.content_type.split('boundary=')[1]
    headers['Content-Type'] = headers['Content-Type'] + 'boundary="{}"'.format(
        boundary)

    r = requests.post('http://' + host + ':' + port + '/',
                      data=me,
                      headers=headers)

    if r.status_code != 200:
        print("Got error status code ", r.status_code)
        exit(1)

    multipart_data = decoder.MultipartDecoder.from_response(r)

    json_response = json.loads(multipart_data.parts[0].text)
    print("JSON response:", json_response)
    mask_count = len(json_response["parts"])

    masks = [
        np.frombuffer(p.content, dtype=np.uint8)
        for p in multipart_data.parts[1:mask_count + 1]
    ]

    if is_segmentation_model:
        output_folder = 'output'

        if images[0].position is None:
            # We must sort the images by their instance UID based on the order of the response:
            identifiers = [
                part['dicom_image']['SOPInstanceUID']
                for part in json_response["parts"]
            ]
            filtered_images = []
            for id in identifiers:
                image = next((img for img in images if img.instanceUID == id))
                filtered_images.append(image)
            test_inference_mask.generate_images_for_single_image_masks(
                filtered_images, masks, output_folder)
        else:
            test_inference_mask.generate_images_with_masks(
                images, masks, output_folder)

        print("Segmentation mask images generated in folder: {}".format(
            output_folder))
        print("Saving output masks to files 'output/output_masks_*.npy")
        for index, mask in enumerate(masks):
            mask.tofile('output/output_masks_{}.npy'.format(index + 1))
Esempio n. 8
0
print("====== START TRAINING NEURAL NETWORK MODEL ======")
model = ann.parse_model_js(network_model)
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=['accuracy'])
model.fit(xtrain,
          ytrain,
          verbose=1,
          validation_data=(xval, yval),
          epochs=epochs)
print("====== CALCULATING MNIST ACCURACY ======")
mnist_accuracy = ann.test_model(model, xtest, ytest, "accuracy")

print("====== LOAD CUSTOM DIGITS FROM OLEKSANDR AND HENRY ======")
xm_digits, xm_labels = utils.load_image_data(xm_digits_path,
                                             side=286,
                                             padding=40,
                                             unpad=False)
ob_digits, ob_labels = utils.load_image_data(ob_digits_path,
                                             side=286,
                                             padding=40,
                                             unpad=False)

xm_labels = utils.create_one_hot(xm_labels)
ob_labels = utils.create_one_hot(ob_labels)

combined_data = np.concatenate((xm_digits, ob_digits))
combined_labels = np.concatenate((xm_labels, ob_labels))

print("Analysing Henry's Digits")
xm_r, xm_acc, xm_tau, xm_new_dig = thickness_sim(model, xm_digits, xm_labels,
                                                 mnist_linethickness)
Esempio n. 9
0
def upload_study_me(file_path,
                    model_type,
                    host,
                    port,
                    output_folder,
                    attachments,
                    override_inference_command=None,
                    send_study_size=False):
    file_dict = []
    headers = {'Content-Type': 'multipart/related; '}

    images = load_image_data(file_path)
    images = sort_images(images)

    if model_type == BOUNDING_BOX:
        print("Performing bounding box prediction")
        inference_command = 'get-bounding-box-2d'
    elif model_type == SEGMENTATION_MODEL:
        if images[0].position is None:
            # No spatial information available. Perform 2D segmentation
            print("Performing 2D mask segmentation")
            inference_command = 'get-probability-mask-2D'
        else:
            print("Performing 3D mask segmentation")
            inference_command = 'get-probability-mask-3D'
    else:
        inference_command = 'other'

    if override_inference_command:
        inference_command = override_inference_command

    request_json = {
        'request': 'post',
        'route': '/',
        'inference_command': inference_command
    }

    count = 0
    width = 0
    height = 0
    for att in attachments:
        count += 1
        field = str(count)
        fo = open(att, 'rb').read()
        filename = os.path.basename(os.path.normpath(att))
        file_dict.append((field, (filename, fo, 'application/octet-stream')))

    for image in images:
        try:
            dcm_file = pydicom.dcmread(image.path)
            if width == 0 or height == 0:
                width = dcm_file.Columns
                height = dcm_file.Rows
            count += 1
            field = str(count)
            fo = open(image.path, 'rb').read()
            filename = os.path.basename(os.path.normpath(image.path))
            file_dict.append((field, (filename, fo, 'application/dicom')))
        except:
            print('File {} is not a DICOM file'.format(image.path))
            continue

    print('Sending {} files...'.format(len(images)))
    if send_study_size:
        request_json['depth'] = count
        request_json['height'] = height
        request_json['width'] = width

    file_dict.insert(
        0,
        ('request_json',
         ('request', json.dumps(request_json).encode('utf-8'), 'text/json')))

    me = MultipartEncoder(fields=file_dict)
    boundary = me.content_type.split('boundary=')[1]
    headers['Content-Type'] = headers['Content-Type'] + 'boundary="{}"'.format(
        boundary)

    r = requests.post('http://' + host + ':' + port + '/',
                      data=me,
                      headers=headers)

    if r.status_code != 200:
        print("Got error status code ", r.status_code)
        exit(1)

    multipart_data = decoder.MultipartDecoder.from_response(r)

    json_response = json.loads(multipart_data.parts[0].text)
    print("JSON response:", json_response)

    if model_type == SEGMENTATION_MODEL:
        mask_count = len(json_response["parts"])

        # Assert that we get one binary part for each object in 'parts'
        # The additional two multipart object are: JSON response and request:response digests
        assert mask_count == len(multipart_data.parts) - 2, \
            "The server must return one binary buffer for each object in `parts`. Got {} buffers and {} 'parts' objects" \
            .format(len(multipart_data.parts) - 2, mask_count)

        masks = [
            np.frombuffer(p.content, dtype=np.uint8)
            for p in multipart_data.parts[1:mask_count + 1]
        ]

        if images[0].position is None:
            # We must sort the images by their instance UID based on the order of the response:
            identifiers = [
                part['dicom_image']['SOPInstanceUID']
                for part in json_response["parts"]
            ]
            filtered_images = []
            for id in identifiers:
                image = next((img for img in images if img.instanceUID == id))
                filtered_images.append(image)
            test_inference_mask.generate_images_for_single_image_masks(
                filtered_images, masks, json_response, output_folder)
        else:
            test_inference_mask.generate_images_with_masks(
                images, masks, json_response, output_folder)

        print("Segmentation mask images generated in folder: {}".format(
            output_folder))
        print("Saving output masks to files '{}/output_masks_*.npy".format(
            output_folder))
        for index, mask in enumerate(masks):
            mask.tofile('{}/output_masks_{}.npy'.format(
                output_folder, index + 1))
    elif model_type == BOUNDING_BOX:
        boxes = json_response['bounding_boxes_2d']
        test_inference_boxes.generate_images_with_boxes(
            images, boxes, output_folder)

    with open(os.path.join(output_folder, 'response.json'), 'w') as outfile:
        json.dump(json_response, outfile)
"""


def test_model(model, test_data, test_labels):
    predictions = model.predict(test_data)
    correct = np.equal(np.argmax(predictions, 1), np.argmax(test_labels, 1))
    accuracy = np.mean(correct)
    return accuracy


utils.setup_gpu_session()

pathXM = os.path.join(".", "images", "XiaoMing_Digits")
pathOB = os.path.join(".", "images", "60 Images")

imgsXM, labelsXM = utils.load_image_data(pathXM)
imgsOB, labelsOB = utils.load_image_data(pathOB)

xtrain, ytrain, xtest, ytest = utils.load_mnist(normalize=True)
xtrain, xtest = xtrain.reshape(60000, 28, 28,
                               1), xtest.reshape(10000, 28, 28, 1)

img_size = imgsXM.shape[0]
ximgXM = imgsXM.reshape(img_size, 28, 28, 1)
ximgXM = utils.normalize_data(ximgXM)
yimgXM = utils.create_one_hot(labelsXM)
img_size = imgsOB.shape[0]
ximgOB = imgsOB.reshape(img_size, 28, 28, 1)
ximgOB = utils.normalize_data(ximgOB)
yimgOB = utils.create_one_hot(labelsOB)
imgs_combined = np.concatenate((ximgXM, ximgOB))
Esempio n. 11
0
def main(argv=None):
    tf.set_random_seed(1237)
    np.random.seed(1237)

    # Load data
    x_train, sorted_x_train = \
            utils.load_image_data(FLAGS.dataset, n_xl, n_channels, FLAGS.mbs)
    xshape = (-1, n_xl, n_xl, n_channels)
    print('Data shape = {}'.format(x_train.shape))

    x_train = x_train * 2 - 1
    sorted_x_train = sorted_x_train * 2 - 1

    # Make some data
    is_training = tf.placeholder_with_default(False,
                                              shape=[],
                                              name='is_training')
    generator = get_generator(FLAGS.dataset, FLAGS.arch,
                              n_code if FLAGS.arch == 'ae' else n_x, n_xl,
                              n_channels, n_z, ngf, is_training,
                              'transformation')
    if FLAGS.arch == 'adv':
        discriminator = get_discriminator(FLAGS.dataset, FLAGS.arch, n_x, n_xl,
                                          n_channels, n_f, ngf // 2,
                                          is_training)
        decoder = get_generator(FLAGS.dataset, FLAGS.arch, n_x, n_xl,
                                n_channels, n_f, ngf, is_training, 'decoder')

    # Define training/evaluation parameters
    run_name = 'results/{}_{}_{}_{}_c{}_mbs{}_bs{}_lr{}_t0{}'.format(
        FLAGS.dataset, FLAGS.arch, FLAGS.dist, FLAGS.match, n_code, FLAGS.mbs,
        FLAGS.bs, FLAGS.lr0, FLAGS.t0)

    if not os.path.exists(run_name):
        os.makedirs(run_name)

    # Build the computation graph
    if FLAGS.arch == 'ae':
        ae = ConvAE(x_train, (None, n_xl, n_xl, n_channels), ngf)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ae.train(sess)
            x_code = ae.encode(x_train, sess)
            sorted_x_code = ae.encode(sorted_x_train, sess)

        model = MyPMD(x_code, sorted_x_code, xshape, generator, run_name, ae)
    elif FLAGS.arch == 'adv':
        model = MyPMD(x_train,
                      sorted_x_train,
                      xshape,
                      generator,
                      run_name,
                      F=discriminator,
                      D=decoder)
    else:
        model = MyPMD(x_train, sorted_x_train, xshape, generator, run_name)

    # Run the inference
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if FLAGS.arch == 'ae':
            ae.train(sess)

        print('Training...')
        model.train(sess,
                    gen_dict={
                        model.batch_size_ph: FLAGS.mbs,
                        is_training: False
                    },
                    opt_dict={
                        model.batch_size_ph: FLAGS.bs,
                        is_training: True
                    },
                    iters=((x_train.shape[0] - 1) // FLAGS.mbs) + 1)
Esempio n. 12
0
print("====== START TRAINING NEURAL NETWORK MODEL ======")
model = ann.parse_model_js(network_model)
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=['accuracy'])
model.fit(xtrain,
          ytrain,
          verbose=1,
          validation_data=(xval, yval),
          epochs=epochs)
print("====== CALCULATING MNIST ACCURACY ======")
mnist_accuracy = ann.test_model(model, xtest, ytest, "accuracy")

print("====== LOAD CUSTOM DIGITS FROM OLEKSANDR AND HENRY ======")
xm_digits, xm_labels = utils.load_image_data(xm_digits_path,
                                             side=286,
                                             padding=57)
ob_digits, ob_labels = utils.load_image_data(ob_digits_path,
                                             side=286,
                                             padding=57)

xm_labels = utils.create_one_hot(xm_labels)
ob_labels = utils.create_one_hot(ob_labels)

combined_data = np.concatenate((xm_digits, ob_digits))
combined_labels = np.concatenate((xm_labels, ob_labels))

print("Analysing Henry's Digits")
xm_r, xm_acc, xm_tau, xm_new_dig = thickness_sim(model, xm_digits, xm_labels,
                                                 mnist_linethickness)
print("== FINISH ==\n")