Пример #1
0
def load_data(params, dataset_pickle_file='notMNIST.pickle'):

    with open(dataset_pickle_file, 'rb') as f:
        save = pickle.load(f)
        train_dataset = save['train_dataset']
        train_labels = save['train_labels']
        valid_dataset = save['valid_dataset']
        valid_labels = save['valid_labels']
        test_dataset = save['test_dataset']
        test_labels = save['test_labels']
        del save  # hint to help gc free up memory

        train_dataset, train_labels = utils.reformat(train_dataset,
                                                     train_labels, params)
        valid_dataset, valid_labels = utils.reformat(valid_dataset,
                                                     valid_labels, params)
        test_dataset, test_labels = utils.reformat(test_dataset, test_labels,
                                                   params)

        print('---- Input data shapes: -----')
        print('Training set', train_dataset.shape, train_labels.shape)
        print('Validation set', valid_dataset.shape, valid_labels.shape)
        print('Test set', test_dataset.shape, test_labels.shape)

        return [
            train_dataset, train_labels, valid_dataset, valid_labels,
            valid_dataset, valid_labels
        ]
Пример #2
0
def showresults(request):
    elapsedtime = 0.0
    context = setConstants(request, im)
    try:
        status = request.GET['status']
    except:
        status = 'showfile'
    filename = request.GET['filename']
    context['filename'] = filename
    context['jobstatus'] = request.GET['status']
    f = open(getJobfile(filename), "rb")
    filecontent = f.read()
    if status == 'showmedia':
        context['derivativegrid'] = 'Medium'
        context['sizegrid'] = '240px'
        context['imageserver'] = prmz.IMAGESERVER
        context['items'] = rendermedia(filecontent)
    elif status == 'showinportal':
        pass
    else:
        context['filecontent'] = reformat(filecontent)
    elapsedtime = time.time() - elapsedtime
    context = setContext(context, elapsedtime)

    return render(request, 'uploadmedia.html', context)
Пример #3
0
def warp_flow(img, flow):
    img = np.float32(reformat(img))
    h, w = flow.shape[:2]
    flow = -flow
    flow[:, :, 0] += np.arange(w)
    flow[:, :, 1] += np.arange(h)[:, np.newaxis]
    res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
    # res = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
    return res
Пример #4
0
def opticalflow(img1, img2):
    b, c, h, w = img1.shape
    # examine(img1, 'img1 before reformat')

    img1 = np.float32(reformat(img1))
    img2 = np.float32(reformat(img2))
    # examine(img1, 'img1 after reformat')

    prev = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
    nxt = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
    # examine(prev, 'prev = img1 in grayscale')

    flow = cv2.calcOpticalFlowFarneback(
        prev,
        nxt,
        flow=None,
        pyr_scale=0.5,
        levels=3,  # wb 1?
        winsize=15,
        iterations=3,  # wb 2?
        poly_n=5,
        poly_sigma=1.2,  # wb 1.1?
        flags=0)
    # examine(flow, 'flow:')

    hsv = np.zeros_like(img1, np.uint8)
    hsv[:, :, 1] = 255
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    # examine(hsv, 'final hsv:')

    rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
    rgb = np.float32(rgb)
    rgb = array_to_torch(rgb)
    # examine(rgb, 'rgb final')
    return rgb, flow
Пример #5
0
def recognize(im):
    width, height = im.size
    change_point_y = get_change_point_y(im)
    if len(change_point_y) < 1:
        bottom = height
    else:
        bottom = change_point_y[-1]

    t_im = im.crop((0.05 * width, 0.18 * height, 0.95 * width, bottom))  # x0, y0, x1, y1
    # t_im.show()
    t_str = pytesseract.image_to_string(t_im, lang="chi_sim", config="-psm 6")
    t_str = reformat(t_str)[2:]

    if t_str.count("\n") > 3:
        t_str = t_str.replace("\n", "", 1).replace("\n", " ")
    else:
        t_str = t_str.replace("\n", " ")

    return t_str
Пример #6
0
def main():
    args = parse_args()
    try:
        LOCAL_FLAG = True
        with open(args.input, 'r', encoding='utf8') as fp:
            result = reformat(fp, ABP_ALLOW_FMT)
    except IOError:
        LOCAL_FLAG = False
        print('Unable to open rule file, trying to get online list...')
        result = get_online_list(ABP_ALLOW_FMT)

    if LOCAL_FLAG:
        last_modified = datetime.utcfromtimestamp(os.path.getmtime(args.input))
    else:
        last_modified = datetime.utcnow()
    with open(args.output, mode='w', encoding='utf8', newline='\n') as fp:
        fp.write(f'! Last Modified: {last_modified.isoformat()}\n\n')
        fp.writelines(result)
    print('Done!')
Пример #7
0
def recognize(im):
    width, height = im.size
    change_point_y = get_change_point_y(im)
    if len(change_point_y) < 1:
        bottom = height
    else:
        bottom = change_point_y[-1]

    t_im = im.crop(
        (0.05 * width, 0.18 * height, 0.95 * width, bottom))  # x0, y0, x1, y1
    # t_im.show()
    t_str = pytesseract.image_to_string(t_im, lang="chi_sim", config="-psm 6")
    t_str = reformat(t_str)[2:]

    if t_str.count("\n") > 3:
        t_str = t_str.replace("\n", "", 1).replace("\n", " ")
    else:
        t_str = t_str.replace("\n", " ")

    return t_str
Пример #8
0
assert current_folder == '/Users/futianfan/Downloads/Gatech_Courses/mimic_text/'
assert data_folder == '/Users/futianfan/Downloads/Gatech_Courses/mimic_text/data'
assert mimic3_folder == '/Users/futianfan/Downloads/Gatech_Courses/mimic_text/data/mimic3'
'''
STEP 1-5  notes_labeled.csv 

'''

#### 1. combine procedure.csv and diagnosis.csv => ALL_CODES.csv

t1 = time()
proc_file = pd.read_csv('{}/PROCEDURES_ICD.csv'.format(mimic3_folder))
diag_file = pd.read_csv('{}/DIAGNOSES_ICD.csv'.format(mimic3_folder))
proc_file['absolute_code'] = proc_file.apply(
    lambda row: str(utils.reformat(str(row[4]), True)), axis=1)
diag_file['absolute_code'] = diag_file.apply(
    lambda row: str(utils.reformat(str(row[4]), True)), axis=1)
allcodes = pd.concat([diag_file, proc_file])
allcodes.to_csv(
    '%s/ALL_CODES.csv' % mimic3_folder,
    index=False,
    columns=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'SEQ_NUM', 'absolute_code'],
    header=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'SEQ_NUM', 'ICD9_CODE'])

df = pd.read_csv('%s/ALL_CODES.csv' % mimic3_folder, dtype={"ICD9_CODE": str})
leng = len(df['ICD9_CODE'].unique())  ### 8994
print('ALL_CODES has {} different ICD codes'.format(leng))
print('STEP 1. combining procedure.csv and diagnosis.csv takes {} seconds'.
      format(int(time() - t1)))
### 68 seconds
Пример #9
0
    img_shape = (640, 360)
    videonames = ['output1.mp4', '9_17_s.mp4', '22_26_s.mp4']
    transform = transforms.ToTensor()
    loader = get_loader(1,
                        data_path,
                        img_shape,
                        transform,
                        video_list=videonames,
                        frame_nb=10,
                        shuffle=False)

    for idx, frames in enumerate(loader):
        print(idx, len(frames))
        f1, f2 = frames[2], frames[3]
        examine(f1, 'f1 raw')
        f1 = reformat(f1)
        f2 = reformat(f2)
        examine(f1, 'f1 reformated')

        f1 = np.float32(f1)  # / 255)
        f2 = np.float32(f2)  # / 255)
        examine(f1, 'im1 intermediate')
        examine(f2, 'im2 intermediate')
        im1 = cv2.cvtColor(f1, cv2.COLOR_RGB2GRAY)
        im2 = cv2.cvtColor(f2, cv2.COLOR_RGB2GRAY)
        examine(im1, 'im1 ready')
        examine(im2, 'im2 ready')

        flow = cv2.calcOpticalFlowFarneback(im1, im2, None, 0.5, 3, 15, 3, 5,
                                            1.2, 0)
        print('OF done!')
Пример #10
0
        out = cv2.VideoWriter('output_test{}.mp4'.format(count+8), fourcc, 20.0, (2*width, height))
        print(len(frames))
        for i in range(10):
            print('{}/{}'.format(i, len(frames)))
            t1 = time.time()

            # get frame
            frame = frames[i]
            if t.gpu:
                frame = frame.cuda()

            # get processed image
            output = style_transfer(frame, t)

            # convert image types
            frame = reformat(frame)
            output = reformat(output)

            # concatenate images
            img = np.concatenate((frame,output),axis=1)

            # save img
            out.write(np.uint8(img))

            t2 = time.time()
            #print('{0:.3f} s'.format(t2-t1))
        
        # cleaning things
        video.release()
        out.release()
        count += 1
import csv
import operator
from options import args
from utils import build_vocab, word_embeddings, fasttext_embeddings, gensim_to_fasttext_embeddings, gensim_to_embeddings, \
    reformat, write_discharge_summaries, concat_data, split_data



Y = 'full'
notes_file = '%s/NOTEEVENTS.csv' % args.MIMIC_3_DIR

# step 1: process code-related files
dfproc = pd.read_csv('%s/PROCEDURES_ICD.csv' % args.MIMIC_3_DIR)
dfdiag = pd.read_csv('%s/DIAGNOSES_ICD.csv' % args.MIMIC_3_DIR)

dfdiag['absolute_code'] = dfdiag.apply(lambda row: str(reformat(str(row[4]), True)), axis=1)
dfproc['absolute_code'] = dfproc.apply(lambda row: str(reformat(str(row[4]), False)), axis=1)

dfcodes = pd.concat([dfdiag, dfproc])


dfcodes.to_csv('%s/ALL_CODES.csv' % args.MIMIC_3_DIR, index=False,
           columns=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'SEQ_NUM', 'absolute_code'],
           header=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'SEQ_NUM', 'ICD9_CODE'])

df = pd.read_csv('%s/ALL_CODES.csv' % args.MIMIC_3_DIR, dtype={"ICD9_CODE": str})
print("unique ICD9 code: {}".format(len(df['ICD9_CODE'].unique())))

# step 2: process notes
min_sentence_len = 3
disch_full_file = write_discharge_summaries("%s/disch_full.csv" % args.MIMIC_3_DIR, min_sentence_len, '%s/NOTEEVENTS.csv' % (args.MIMIC_3_DIR))
Пример #12
0
from collections import Counter
import csv
import operator
from utils.options import args
from utils import build_vocab, word_embeddings, fasttext_embeddings, gensim_to_fasttext_embeddings, gensim_to_embeddings, \
    reformat, write_discharge_summaries, concat_data, split_data

Y = 'full'
notes_file = '%s/NOTEEVENTS.csv' % args.MIMIC_3_DIR

# step 1: process code-related files
dfproc = pd.read_csv('%s/PROCEDURES_ICD.csv' % args.MIMIC_3_DIR)
dfdiag = pd.read_csv('%s/DIAGNOSES_ICD.csv' % args.MIMIC_3_DIR)

dfdiag['absolute_code'] = dfdiag.apply(
    lambda row: str(reformat(str(row[4]), True)), axis=1)
dfproc['absolute_code'] = dfproc.apply(
    lambda row: str(reformat(str(row[4]), False)), axis=1)

dfcodes = pd.concat([dfdiag, dfproc])

dfcodes.to_csv(
    '%s/ALL_CODES.csv' % args.MIMIC_3_DIR,
    index=False,
    columns=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'SEQ_NUM', 'absolute_code'],
    header=['ROW_ID', 'SUBJECT_ID', 'HADM_ID', 'SEQ_NUM', 'ICD9_CODE'])

df = pd.read_csv('%s/ALL_CODES.csv' % args.MIMIC_3_DIR,
                 dtype={"ICD9_CODE": str})
print("unique ICD9 code: {}".format(len(df['ICD9_CODE'].unique())))