Ejemplo n.º 1
0
def data_gen(data_paths, mask_paths):
    """
    get all training images including pet/ct (pet-ct) and mask images
    :param data_paths: data paths for PET images
    :param mask_paths: paths for mask images
    :return: PET images with batch and
    """
    no_samples = len(data_paths)
    pet_imgs = np.zeros(shape=(no_samples, 1, 96, 96, 96), dtype=np.float32)   # change patch shape if necessary
    mask_imgs = np.zeros(shape=(no_samples, 1, 96, 96, 96), dtype=np.float32)
    for i, (pet_path, mask_path) in tqdm(enumerate(zip(data_paths, mask_paths)), total=no_samples):
        pet = sitk.GetArrayFromImage(sitk.ReadImage(pet_path))
        mask = sitk.GetArrayFromImage(sitk.ReadImage(mask_path))
        # insert one dimension to the existing data as image channel
        pet = np.expand_dims(pet, axis=0)
        mask = np.expand_dims(mask, axis=0)

        # append image
        pet_imgs[i] = pet
        mask_imgs[i] = mask

    # Normalize data and convert label value to either 1 or 0
    pet_imgs = normalize(pet_imgs)
    mask_imgs = label_converter(mask_imgs)

    print("Loading and Process Complete!")
    return pet_imgs, mask_imgs
def get_data(data_path):
    """
    use (pet_path, ct_path, mask_path) to get corresponding image array
    :param data_path: path consists of (pet_path, ct_path, mask_path)
    :return: a list of corresponding image path
    """
    pet_path, ct_path, mask_path = data_path
    pet_img = sitk.GetArrayFromImage(sitk.ReadImage(pet_path))
    ct_img = sitk.GetArrayFromImage(sitk.ReadImage(ct_path))
    mask = sitk.GetArrayFromImage(sitk.ReadImage(mask_path))
    mask = label_converter(mask)
    return [pet_img, ct_img, mask]
def data_gen(data_paths, mask_paths):
    """
    get all training images including pet/ct (pet-ct) and mask images
    :param data_paths: data paths for PET images
    :param mask_paths: paths for mask images
    :return: PET images with batch and
    """

    no_samples = len(data_paths)
    imgs = np.zeros(shape=(no_samples, 1, 30, 30, 30),
                    dtype=np.float32)  # change patch shape if necessary
    mask_imgs = np.zeros(shape=(no_samples, 1, 30, 30, 30), dtype=np.float32)
    for i, (img_path, mask_path) in tqdm(enumerate(zip(data_paths,
                                                       mask_paths)),
                                         total=no_samples):
        # print(pet_path)
        img = sitk.GetArrayFromImage(sitk.ReadImage(img_path))
        mask = sitk.GetArrayFromImage(sitk.ReadImage(mask_path))
        # insert one dimension to the existing data as image channel
        #pet = np.expand_dims(pet, axis=0)
        #mask = np.expand_dims(mask, axis=0)

        # append image
        img = resize(img, (30, 30, 30), mode='constant', preserve_range=True)
        mask = resize(mask, (30, 30, 30), mode='constant', preserve_range=True)

        imgs[i, 0, :, :, :] = img
        mask_imgs[i, 0, :, :] = mask

    # Normalize data and convert label value to either 1 or 0
    imgs[:, 0, :, :, :] = 3
    mask_imgs[:, 0, :, :, :] = 3
    imgs = imgs / 255.
    mask_imgs = label_converter(mask_imgs)

    ## AQUI DEBE IR EL CORTE DE LA SECCIÓN DEL CEREBRO

    print("Loading and Process Complete!")
    return imgs, mask_imgs
Ejemplo n.º 4
0
from test import test
from dataset import dataset,data_prefetcher
from utils import label_converter,OneHot_encoder
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
os.environ['CUDA_VISIBLE_DEVICES'] = "4"


opt = {"seed":44,"img_size":(64,84),"max_len":94,"num_epoch":100,"batch_size":80,
       "lr":0.1,"grad_clip":5,"model_name":"TransformerBase","train_stage":"1.0",
       "previous_stage":None,"split_ratio":0.9,"optimizer":"Adam"
}

with open("./vocab.json","r") as f:
    CH_char = json.load(f)
class_num = len(CH_char)+2
converter = label_converter("".join(CH_char))
OneHot = OneHot_encoder(class_num)

def time_interval(second):
    s,ms = divmod(int(1000*(second)),1000)
    m,s = divmod(s,60)
    h,m = divmod(m,60)

    return "%02d:%02d:%02d:%03d"%(h,m,s,ms)

def make_model(class_num=class_num,SeqLen=int(opt["img_size"][0]*opt["img_size"][1]/4**2),d_model=512,d_ff=2048,h=8,EncodeLayer_num=2,DecodeLayer_num=1,dropout=0.1):
    '''features_extractor'''
    features_extractor = ResNet_FeatureExtractor(1,d_model)
    "position encoder"
    pe = torch.zeros(SeqLen, d_model)
    position = torch.arange(0, SeqLen).float().unsqueeze(dim=1)