def setup_software(): setup_general.setup_general() os.system("pip install -q albumentations==0.5.0") print("Libraries Installed!") torch_path = "project/torch_utils.py" vis_path = "project/visualization_utils.py" layers_path = "project/layers.py" train_path = "project/train_utils.py" lung_path = "project/lung_segment.py" setup_general.download_github_content(torch_path, "utils/torch_utils.py") setup_general.download_github_content(layers_path, "utils/layers.py") setup_general.download_github_content(train_path, "utils/train_utils.py") setup_general.download_github_content(vis_path, "utils/visualization_utils.py") setup_general.download_github_content(lung_path, "utils/lung_segment.py") from utils import general as gen #if download_dataset: train_id ="1zaucizp_3iy_Tlk4NNfNqEtP25qcSKLl" test_id = "1uqMqdxDmBeQNu-Zziaa02Yoa6IBsMCDk" gen.download_file_from_google_drive(train_id, "train_data.zip", size=1.96e6) gen.download_file_from_google_drive(test_id, "test_data.zip", size=805.2e3) print("Dataset Downloaded Successfully") software_model_id = "1ypjPbKDGfVtnJdgJnxhkpZecS7dyl2fr" print("gonna get me some models!") gen.download_file_from_google_drive(software_model_id, "weights_software.pt", dst="./models", size=120e3) print("Util Functions Downloaded Successfully") print("Software Demo Enabled Successfully")
def setup_autoencoder(architecture="autoencoder"): os.makedirs("train_utils", exist_ok=True) with open("train_utils/__init__.py", "wb") as f: f.close() setup_general.setup_general() metrics_path = "pytorch_utils/train_utils/metrics.py" train_loop_path = "pytorch_utils/train_utils/seg_train_loops.py" layers_path = "pytorch_utils/autoencoder_architectures/layers.py" general_layers_path = "pytorch_utils/general_layers.py" setup_general.download_github_content(general_layers_path, "utils/general_layers.py") setup_general.download_github_content(metrics_path, "train_utils/metrics.py") setup_general.download_github_content(train_loop_path, "train_utils/train_loop.py") setup_general.download_github_content(layers_path, "utils/layers.py") print("Layers and utils enabled") if architecture == "autoencoder" or "autoencoder" in architecture: autoencoder_path = "pytorch_utils/autoencoder_architectures/autoencoder.py" setup_general.download_github_content(autoencoder_path, "utils/autoencoder.py") print("Autoencoder Enabled")
def setup_extra_workshop(download_dataset=True): setup_general.setup_general() extra_utils_path = "ISIS_4825/ML/Taller_Extra/extra_utils.py" setup_general.download_github_content(extra_utils_path, "utils/extra_utils.py") print("Util Functions Downloaded Successfully") if download_dataset: from utils import general as gen id_data = "0B0vscETPGI1-TE5KWFgxaURubFE" gen.download_file_from_google_drive(id_data, "kits.zip", size=4.27e6) print("Dataset Downloaded") print("Extra Workshop Enabled Successfully")
def setup_workshop_12(pretrained=True, download_dataset=True): setup_general.setup_general() tf_utils_path = "ISIS_4825/ML/Taller_12/tf_utils.py" setup_general.download_github_content(tf_utils_path, "utils/tf_utils.py") print("Util Functions Downloaded Successfully") from utils import general as gen if download_dataset: data_id = "1xnK3B6K6KekDI55vwJ0vnc2IGoDga9cj" labels_url = "https://github.com/AlexOlsen/DeepWeeds/raw/master/labels/labels.csv" gen.download_file_from_google_drive(data_id, "images.zip", size=491e3) gen.download_content(labels_url, "labels.csv", "data") print("Dataset Downloaded Successfully") if pretrained: inception_id = "1Us6XwnpyWYCCY2wRzBTr9ep6kRXcL-jz" gen.download_file_from_google_drive(inception_id, "inception.h5", dst="models", size=89e3) print("Pretrained Networks Downloaded Successfully") print("Workshop 12 Enabled Successfully")
def setup_workshop_13(download_dataset=True, pretrained=True, brats=False): setup_general.setup_general() os.system("pip install -q albumentations==0.5.0") print("Libraries Installed!") torch_path = "ISIS_4825/ML/Taller_13/torch_utils.py" vis_path = "ISIS_4825/ML/Taller_13/visualization_utils.py" layers_path = "ISIS_4825/ML/Taller_13/layers.py" train_path = "ISIS_4825/ML/Taller_13/train_utils.py" setup_general.download_github_content(torch_path, "utils/torch_utils.py") setup_general.download_github_content(layers_path, "utils/layers.py") setup_general.download_github_content(train_path, "utils/train_utils.py") setup_general.download_github_content(vis_path, "utils/visualization_utils.py") print("Util Functions Downloaded Successfully") from utils import general as gen if download_dataset: train_id = "192V5FfehmbpN2wkl1apiygxSqW6EUmyP" test_id = "1--hE7Ucvlsjf-fwET4-JMuS-VAva7Vxq" gen.download_file_from_google_drive(train_id, "train_data.zip", size=212e3) gen.download_file_from_google_drive(test_id, "test_data.zip", size=22e3) print("Dataset Downloaded Successfully") if pretrained: autoencoder_id = "1ZGDKYCHZFpW1D-rx8xmxxjJiFoc5N1Au" unet_id = "1kfd4_30DIdp3Di8fU5_h4I-EdFKEY7Uq" gen.download_file_from_google_drive(autoencoder_id, "autoencoder.pt", dst="./models", size=49e3) gen.download_file_from_google_drive(unet_id, "unet.pt", dst="./models", size=22e3) print("Pretrained Networks Downloaded Successfully") if brats: part_1 = "1NRAFehzp6WtpoQduMJ5PGt1ivC8NhpJ1" part_2 = "1-6vfu9nBu1PIabcZPMAmX685M1mOkceI" gen.download_file_from_google_drive(part_1, "brats_part_1.zip", size=426.8e3) gen.download_file_from_google_drive(part_2, "brats_part_2.zip", size=370.3e3) print("Dataset BraTS Downloaded Successfully") print("Workshop 13 Enabled Successfully")
def setup_journal(): setup_general.setup_general() torch_path = "project/torch_utils.py" vis_path = "project/visualization_utils.py" layers_path = "project/layers.py" train_path = "project/train_utils.py" lung_path = "project/lung_segment.py" setup_general.download_github_content(torch_path, "utils/torch_utils.py") setup_general.download_github_content(layers_path, "utils/layers.py") setup_general.download_github_content(train_path, "utils/train_utils.py") setup_general.download_github_content(vis_path, "utils/visualization_utils.py") setup_general.download_github_content(lung_path, "utils/lung_segment.py") from utils import general as gen #if download_dataset: set_thresh ="1yN2dzVPjz-5yTMZfeTrMRTDQVCgkmwaU" set_segment = "1Ar7ww1ZNjsYs9SpQWmXdVDeDbbdIzV6H" set_original = "1QLuziDEys8G9tp5d-6-X9hnlusy5MwgK" gen.download_file_from_google_drive(set_thresh, "selectDataClip.zip", size=216e3) gen.download_file_from_google_drive(set_segment, "selectDataSegment.zip", size=55.7e3) gen.download_file_from_google_drive(set_original, "selectData.zip", size=549.7e3) originalData_id = "1CXyrOF1KEeZUWtKNM3GWaI3Evqwg48-n" threhsoldData_id = "1l4BlO2APgdMNyuIyQ0dm3sDIB9zgASzR" threhsoldData_id2 = "1tBeYO0GjtcIYXhGpJU-VmQjE5G-9ApuH" segmentedData_id = "1-YoJegTc22hoRzbtpVpdtkRKzyGjwSvV" gen.download_file_from_google_drive(originalData_id, "weights_original.pt", dst="./models", size=120e3) gen.download_file_from_google_drive(threhsoldData_id, "weights_interval.pt", dst="./models", size=120e3) gen.download_file_from_google_drive(threhsoldData_id2, "weights_interval2.pt", dst="./models", size=120e3) gen.download_file_from_google_drive(segmentedData_id, "weights_segmented.pt", dst="./models", size=120e3) print("Dataset Downloaded Successfully") print("Interactive Paper Enabled Succesfully!")
def setup_unet(architecture="unet"): os.makedirs("train_utils", exist_ok=True) with open("train_utils/__init__.py", "wb") as _: ... setup_general.setup_general() metrics_path = "pytorch_utils/train_utils/metrics.py" train_loop_path = "pytorch_utils/train_utils/seg_train_loops.py" layers_path = "pytorch_utils/unet_architectures/layers.py" general_layers_path = "pytorch_utils/general_layers.py" setup_general.download_github_content(general_layers_path, "utils/general_layers.py") setup_general.download_github_content(metrics_path, "train_utils/metrics.py") setup_general.download_github_content(train_loop_path, "train_utils/train_loop.py") setup_general.download_github_content(layers_path, "utils/layers.py") print("Layers and utils enabled") if architecture == "unet" or "unet" in architecture: unet_path = "pytorch_utils/unet_architectures/unet.py" setup_general.download_github_content(unet_path, "utils/unet.py") print("U-Net Enabled") if architecture == "runet" or "runet" in architecture: runet_path = "pytorch_utils/unet_architectures/runet.py" setup_general.download_github_content(runet_path, "utils/runet.py") print("RU-Net Enabled") if architecture == "r2unet" or "r2unet" in architecture: r2unet_path = "pytorch_utils/unet_architectures/r2unet.py" setup_general.download_github_content(r2unet_path, "utils/r2unet.py") print("R2U-Net Enabled") if architecture == "attention" or "attention" in architecture: attention_path = "pytorch_utils/unet_architectures/attention_unet.py" setup_general.download_github_content(attention_path, "utils/attention_unet.py") print("Attention U-Net Enabled")