Ejemplo n.º 1
0
def test(path):
    model = Model()
    model.to("cuda:0")
    model.eval()
    checkpoint = torch.load("./model.pth")
    model.load_state_dict(checkpoint["model"])
    img = np.array(Image.open(path).resize([448, 448]))[np.newaxis]
    img = np.transpose(img, axes=[0, 3, 1, 2]) / 255
    img = torch.tensor(img, dtype=torch.float32).to("cuda:0")
    preds = model(img).cpu().detach().numpy()
    cell_h, cell_w = IMG_H / S, IMG_W / S
    x, y = np.meshgrid(range(S), range(S))
    preds_xywhs = []
    for i in range(B):
        preds_x = (preds[0, :, :, i * 4] + x) * cell_w
        preds_y = (preds[0, :, :, i * 4 + 1] + y) * cell_h
        preds_w = preds[0, :, :, i * 4 + 2] * IMG_W
        preds_h = preds[0, :, :, i * 4 + 3] * IMG_H
        preds_xywh = np.dstack((preds_x, preds_y, preds_w, preds_h))
        preds_xywhs.append(preds_xywh)
    preds_xywhs = np.dstack(preds_xywhs)
    preds_xywhs = np.reshape(preds_xywhs, [-1, 4])
    preds_class = preds[0, :, :, 10:]
    preds_class = np.reshape(preds_class, [-1, 20])
    preds_c = preds[0, :, :, 8:10]
    preds_c = np.reshape(preds_c, [-1, 1])
    max_arg = np.argmax(preds_c, axis=0)
    print("max confidence: %f" % (preds_c[max_arg]))
    max_arg_ = np.argmax(preds_class[int(max_arg // 2)])
    print("class confidence: %f" % (preds_class[max_arg // 2, max_arg_]))
    print("class category: %s" % (CLASSES[int(max_arg_)]))
    Image.fromarray(
        np.uint8(
            draw_bboxes(np.array(Image.open(path).resize([448, 448])),
                        preds_xywhs[max_arg[0]:max_arg[0] + 1]))).show()
Ejemplo n.º 2
0
def main():
    start = time()

    # load data
    print 'loading data'
    mndata = mnist.MNIST(sys.argv[1])
    train_x, train_y = mndata.load_training()
    train_x = np.array(train_x).astype('float32') / 255

    print 'time to load data:', time() - start
    start = time()

    # set dims
    in_dim = train_x[0].shape[0]
    hid_dim1 = 128
    hid_dim2 = 64
    out_dim = 10

    # create and train classifier
    print 'start training'
    model = MLP2(in_dim, hid_dim1, hid_dim2, out_dim)
    train_data, dev_data = train_dev_split(train_x, train_y, size=0.2)
    print 'all:', len(train_x), ', train:', len(train_data), 'dev:', len(dev_data)
    train_classifier(train_data, dev_data, model)

    # blind test
    print 'start blind test'
    test_x, test_y = mndata.load_testing()
    test_x = np.array(test_x).astype('float32') / 255
    test_acc, test_loss = model.check_on_dataset(zip(test_x, test_y))
    print 'test-acc:', test_acc, 'test-loss:', test_loss

    train_acc, train_loss = model.check_on_dataset(train_data)
    print 'train-acc:', train_acc, 'train-loss:', train_loss

    pickle.dump(model.get_params(), open('nn_params/model_{}_{}.params'.format(int(train_acc * 100), int(test_acc * 100)), 'w'))
    log.write('\ntrain: accuracy: {} | loss: {}\ntest: accuracy: {} | loss: {}'.format(
        train_acc, train_loss, test_acc, test_loss))
    with open('nn_params/log_{}_{}.txt'.format(int(train_acc * 100), int(test_acc * 100)), 'w') as f:
        f.write(log.getvalue())

    print 'time to train:', time() - start
Ejemplo n.º 3
0
def extract_patches(image, reference, patch_size, stride):
    window_shape = patch_size
    window_shape_array = (window_shape, window_shape, image.shape[2])
    window_shape_ref = (window_shape, window_shape)
    patches_array = np.array(
        view_as_windows(image, window_shape_array, step=stride))

    patches_ref = np.array(
        view_as_windows(reference, window_shape_ref, step=stride))

    print('Patches extraidos')
    print(patches_array.shape)
    num_row, num_col, p, row, col, depth = patches_array.shape

    print('fazendo reshape')
    patches_array = patches_array.reshape(num_row * num_col, row, col, depth)
    print(patches_array.shape)
    patches_ref = patches_ref.reshape(num_row * num_col, row, col)
    print(patches_ref.shape)

    return patches_array, patches_ref
Ejemplo n.º 4
0
def contrast_filter(array):
    s = np.shape(array)
    contrast = 0.125 * np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
    if len(s) == 2:
        return signal.convolve2d(array, contrast, boundary='symm', mode='same')
    if len(s) == 3:
        for k in range(s[0]):
            array[k] = signal.convolve2d(array[k],
                                         contrast,
                                         boundary='symm',
                                         mode='same')
        return array
Ejemplo n.º 5
0
def solve_model(header_params,params,header_features,features,debugmsg):
    #Extracts each parameter
    fs = params[header_params.index('Fs')]
    rvent = params[header_params.index('Rvent')]
    c = params[header_params.index('C')]
    rins = params[header_params.index('Rins')]
    rexp = rins  # params[4]
    peep = params[header_params.index('PEEP')]
    sp = params[header_params.index('SP')]
    trigger_type = features[header_features.index('Triggertype')]
    trigger_arg = params[header_params.index('Triggerarg')]
    rise_type = features[header_features.index('Risetype')]
    rise_time = params[header_params.index('Risetime')]
    cycle_off = params[header_params.index('Cycleoff')]
    rr = params[header_params.index('RR')]
    pmus_type = features[header_features.index('Pmustype')]
    pp = params[header_params.index('Pp')]
    tp = params[header_params.index('Tp')]
    tf = params[header_params.index('Tf')]
    noise = params[header_params.index('Noise')]
    e2 = params[header_params.index('E2')]
    model = features[header_features.index('Model')]

    expected_len = int(np.floor(180.0 / np.min(RR) * np.max(Fs)) + 1)
    
    #Assings pmus profile
    pmus = pmus_profile(fs, rr, pmus_type, pp, tp, tf)
    pmus = pmus + peep #adjusts PEEP
    pmus = np.concatenate((np.array([0]), pmus)) #sets the first value to zero

    
    #Unit conversion from cmH2O.s/L to cmH2O.s/mL
    rins = rins / 1000.0
    rexp = rexp / 1000.0
    rvent = rvent / 1000.0


    #Generates time, flow, volume, insex and paw waveforms
    time = np.arange(0, np.floor(60.0 / rr * fs) + 1, 1) / fs
    time = np.concatenate((np.array([0]), time))
    flow = np.zeros(len(time))
    volume = np.zeros(len(time))
    insex = np.zeros(len(time))
    paw = np.zeros(len(time)) + peep #adjusts PEEP
    len_time = len(time)

    #Peak flow detection
    peak_flow = flow[0]
    detect_peak_flow = False

    #Support detection
    detect_support = False
    time_support = -1

    #Expiration detection
    detect_exp = False
    time_exp = -1

    if trigger_type == 'flow':
        # units conversion from L/min to mL/s
        trigger_arg = trigger_arg / 60.0 * 1000.0

    for i in range(1, len(time)):
        # period until the respiratory effort beginning
        if (((trigger_type == 'flow' and flow[i] < trigger_arg) or
             (trigger_type == 'pressure' and paw[i] > trigger_arg + peep) or
             (trigger_type == 'delay' and time[i] < trigger_arg)) and
                (not detect_support) and (not detect_exp)):
            paw[i] = peep
            y0 = volume[i - 1]
            tspan = [time[i - 1], time[i]]
            args = (paw[i], pmus[i], model, c, e2, rins)
            sol = odeint(flow_model, y0, tspan, args=args)
            volume[i] = sol[-1]
            flow[i] = flow_model(volume[i], time[i], paw[i], pmus[i], model, c, e2, rins)
            if debugmsg:
                print('volume[i]= {:.2f}, flow[i]= {:.2f}, paw[i]= {:.2f}, waiting'.format(volume[i], flow[i], paw[i]))

            if (((trigger_type == 'flow' and flow[i] >= trigger_arg) or
                 (trigger_type == 'pressure' and paw[i] <= trigger_arg + peep) or
                 (trigger_type == 'delay' and time[i] >= trigger_arg))):
                detect_support = True
                time_support = time[i+1]
                continue

        # detection of inspiratory effort
        # ventilator starts to support the patient
        elif (detect_support and (not detect_exp)):
            if rise_type == 'step':
                paw[i] = sp + peep
            elif rise_type == 'exp':
                rise_type = rise_type if np.random.random() > 0.01 else 'linear'
                if paw[i] < sp + peep:
                    paw[i] = (1.0 - np.exp(-(time[i] - time_support) / rise_time )) * sp + peep
                if paw[i] >= sp + peep:
                    paw[i] = sp + peep
            elif rise_type == 'linear':
                rise_type = rise_type if np.random.random() > 0.01 else 'exp'
                if paw[i] < sp + peep:
                    paw[i] = (time[i] - time_support) / rise_time * sp + peep
                if paw[i] >= sp + peep:
                    paw[i] = sp + peep

            y0 = volume[i - 1]
            tspan = [time[i - 1], time[i]]
            args = (paw[i], pmus[i], model, c, e2, rins)
            sol = odeint(flow_model, y0, tspan, args=args)
            volume[i] = sol[-1]
            flow[i] = flow_model(volume[i], time[i], paw[i], pmus[i], model, c, e2, rins)
            if debugmsg:
                print('volume[i]= {:.2f}, flow[i]= {:.2f}, paw[i]= {:.2f}, supporting'.format(volume[i], flow[i], paw[i]))

            if flow[i] >= flow[i - 1]:
                peak_flow = flow[i]
                detect_peak_flow = False
            elif flow[i] < flow[i - 1]:
                detect_peak_flow = True

            if (flow[i] <= cycle_off * peak_flow) and detect_peak_flow and i<len_time:
                detect_exp = True
                time_exp = i+1    
                try:
                    paw[i + 1] = paw[i]
                except IndexError:
                    pass

        elif detect_exp:
            if rise_type == 'step':
                paw[i] = peep
            elif rise_type == 'exp':
                if paw[i - 1] > peep:
                    paw[i] = sp * (np.exp(-(time[i] - time[time_exp-1]) / rise_time )) + peep
                if paw[i - 1] <= peep:
                    paw[i] = peep
            elif rise_type == 'linear':
                rise_type = rise_type if np.random.random() > 0.01 else 'exp'
                if paw[i - 1] > peep:
                    paw[i] = sp * (1 - (time[i] - time[time_exp-1]) / rise_time) + peep
                if paw[i - 1] <= peep:
                    paw[i] = peep

            y0 = volume[i - 1]
            tspan = [time[i - 1], time[i]]
            args = (paw[i], pmus[i], model, c, e2, rexp + rvent)
            sol = odeint(flow_model, y0, tspan, args=args)
            volume[i] = sol[-1]
            flow[i] = flow_model(volume[i], time[i], paw[i], pmus[i], model, c, e2, rexp + rvent)
            if debugmsg:
                print('volume[i]= {:.2f}, flow[i]= {:.2f}, paw[i]= {:.2f}, exhaling'.format(volume[i], flow[i], paw[i]))

    #Generates InsEx trace
    if time_exp > -1:
        insex = np.concatenate((np.ones(time_exp), np.zeros(len(time) - time_exp)))

    #Drops the first element
    flow = flow[1:] / 1000.0 * 60.0  # converts back to L/min
    volume = volume[1:]
    paw = paw[1:]
    pmus = pmus[1:] - peep #reajust peep again
    insex = insex[1:]

    flow,volume,pmus,insex,paw = generate_cycle(expected_len,flow,volume,pmus,insex,paw,peep=peep)

    # paw = generate_cycle(expected_len,paw,peep=peep)[0]
    
    flow,volume,paw,pmus,insex = generate_noise(noise,flow,volume,paw,pmus,insex)

    # plt.plot(flow)
    # plt.plot(volume)
    # plt.plot(paw)
    # plt.plot(pmus)
    # plt.show()

    return flow, volume, paw, pmus, insex, rins,rexp, c
Ejemplo n.º 6
0
Archivo: sbn.py Proyecto: ysmiraak/lgm
        return sess.run(self.v, feed_dict= {self.a: a})

    def gen(self, sess):
        while True: yield sess.run(self.v)


if False:
    from utils import mnist
    batchit = mnist(batch_size= 100, ds= 'train', with_labels= False, binary= True)

    sbn = Sbn((784, 210, 56, 15, 4), samples= 100)
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    # rm -r log/sbn
    # tf.summary.FileWriter("log/sbn", sess.graph).close()
    # tf.reset_default_graph()
    # sess.close()

    with tf.summary.FileWriter("log/sbn") as wtr:
        sbn.fit(sess, wtr, batchit, lr= 0.01, steps= 600000, step_plot= 60000)

    plot = plot_fn('gen')
    b = 0, 1
    q = np.array(list(product(b, b, b, b)), dtype= np.bool)
    for n, a in enumerate((np.tile(q, (100, 1)) for q in q)):
        with tf.summary.FileWriter("log/sbn/res{:02d}".format(n)) as wtr:
            plot(sess, wtr, sbn.ans(sess, a), sbn.step)

    tf.train.Saver().save(sess, "./models/sbn")
Ejemplo n.º 7
0
    aux = [
        elem[0] * 60 / 1000 for index, elem in enumerate(annots['flow'])
        if index % offset == 0
    ]
else:
    aux = [
        elem[0] * 60 / 1000 for index, elem in enumerate(annots['flowLmin'])
        if index % offset == 0
    ]
flow = []
for index in range(len(aux) // 901):
    flow.append(
        aux[index * 901:(index + 1) *
            901])  #- np.percentile(np.abs(aux[index*901:(index+1)*901]),15))
# print(len(annots['volint'])) 489998
flow = np.array(flow)

# get_peep(annots)

# print(min(flow[0,:]),max(flow[0,:]))

#print(annots['exp_mark']) #nan

#print(annots['fs']) #[[512]]

#print(annots['ins']) #Line 1 -> 352

#print(annots['ins_mark']) #nan

# print(annots['paw']) # Good
print('Imagem RGB')
# print(img_train)
print(img_train.shape)
# img_train = img_train.transpose((1, 2, 0))
print(img_train.shape)
# print(img_train)
# from skimage.transform import resize
print('Imagem HSV')
hsv_patch = cv2.cvtColor(img_train, cv2.COLOR_RGB2HSV)
print(hsv_patch.shape)
plt.imshow(hsv_patch)
plt.show()
# hsv_patch = hsv_patch[:, :, 2]
for i in range(3):
    print(f'Img train max hsv {i}: {hsv_patch[:, :, i].max()}, Img train min hsv: {hsv_patch[:, :, i].min()}')
hsv_patch = hsv_patch / np.array([179, 255, 255])
for i in range(3):
    print(f'Img norm max hsv {i}: {hsv_patch[:, :, i].max()}, Img norm min hsv: {hsv_patch[:, :, i].min()}')
# img_train = cv2.resize(img_train, (500, 500), cv2.INTER_AREA)
# hsv_patch = resize(hsv_patch, (500, 500))
# #cv2.imshow('teste', img_train)
# print('image showed')
#cv2.waitKey(0)
# print(img_train_normalized.shape)

# Load reference
img_train_ref_path = 'Reference_Train.tif'
# img_train_ref_path = 'Image_Train_ref.jpeg'
print(os.path.join(root_path, img_train_ref_path))
img_train_ref = load_tiff_image(os.path.join(root_path, img_train_ref_path))
img_train_ref = img_train_ref.transpose((1, 2, 0))
Ejemplo n.º 9
0
    #print('test case %d'% (i+1))
    err_nmsre.append(
        np.sqrt(np.sum((pmus - pmus_hat)**2)) /
        np.sqrt(np.sum((pmus - np.average(pmus))**2)))

# r_squared_error = np.average([(r[0]-r[1])**2 for r in err_r])
# r_error = np.average([r[1] for r in err_r])
# r_hat_error = np.average([r[0] for r in err_r])
# nmse_r = r_squared_error/r_error/r_hat_error

# c_squared_error = np.average([(c[0]-c[1])**2 for c in err_c])
# c_error = np.average([c[1] for c in err_c])
# c_hat_error = np.average([c[0] for c in err_c])
# nmse_c = c_squared_error/c_error/c_hat_error

err_pmus = np.array(err_pmus)
err_pmus_hat = np.array(err_pmus_hat)

nrmse = np.sqrt(np.sum((err_pmus - err_pmus_hat)**2)) / np.sqrt(
    np.sum((err_pmus - np.average(err_pmus))**2))

print(nrmse)
print(np.average(err_nmsre))
print(np.std(err_nmsre))

# rc_squared_error = np.average([(rc[0]-rc[1])**2 for rc in err_rc])
# rc_error = np.average([rc[1] for rc in err_rc])
# rc_hat_error = np.average([rc[0] for rc in err_rc])
# nmse_rc = rc_squared_error/rc_error/rc_hat_error

# plt.figure()
plt.imsave(os.path.join(args.output_path, 'pred_seg_reconstructed.jpeg'),
           img_reconstructed_rgb)

# Visualize inference per class
if args.use_multitasking:

    for i in range(len(patches_test)):
        print(f'Patch: {i}')
        # Plot predictions for each class and each task; Each row corresponds to a
        # class and has its predictions of each task
        fig1, axes = plt.subplots(nrows=args.num_classes,
                                  ncols=7,
                                  figsize=(15, 10))
        img = patches_test[i]
        img = (img * np.array([255, 255, 255])).astype(np.uint8)
        img_ref = patches_test_ref[i]
        img_ref_h = tf.keras.utils.to_categorical(img_ref, args.num_classes)
        bound_ref_h = get_boundary_label(img_ref_h)
        dist_ref_h = get_distance_label(img_ref_h)
        # Put the first plot as the patch to be observed on each row
        for n_class in range(args.num_classes):
            axes[n_class, 0].imshow(img)
            # Loop the columns to display each task prediction and reference
            # Remeber we are not displaying color preds here, since this task
            # do not use classes
            # Multiply by 2 cause its always pred and ref side by side
            for task in range(len(patches_pred) - 1):
                task_pred = patches_pred[task]
                col_ref = (task + 1) * 2
                print(task_pred.shape)