Exemple #1
0
def main():
    parser = get_cli()
    args = parser.parse_args()
    pythonpath = args.pythonpath
    sys.path.append(pythonpath)
    # gpu = args.gpu
    # if gpu is None:
    #     print("No CUDA_VISIBLE_DEVICES passed...")
    #     if torch.cuda.is_available():
    #         os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    # else:
    #     os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    config_file = args.config_file
    config = Config(user_config_file=config_file)
    df = pd.read_csv(config.dataset_table, dtype={"tomo_name": str})
    df.set_index('tomo_name', inplace=True)
    tomo_name = args.tomo_name
    input_tomo = df[config.processing_tomo][tomo_name]
    target_tomo = os.path.join(config.work_dir, tomo_name)
    target_tomo = os.path.join(target_tomo, "match_spectrum_filtered.mrc")
    with mrcfile.open(input_tomo, permissive=True) as m:
        tomo = m.data.astype("f4")
        tomo_h = m.header

    target_spectrum = pd.read_csv(target_tomo, sep="\t")["intensity"].values

    filtered_tomo = match_spectrum(tomo, target_spectrum, config.cutoff,
                                   config.smoothen)

    m = mrcfile.new(args.output, overwrite=True)
    m.set_data(filtered_tomo)
    m.set_extended_header(tomo_h)
    m.close()
def main():
    if (len(sys.argv) != 2):
        LOGGER.error('Invalid number of parameter: .env file is required!')
        return

    env_path = getEnvPath()
    config = Config(env_path)

    # Start bot
    LOGGER.info('Bot started...')
    updater = getUpdater(config)

    LOGGER.info('Listener started...')
    updater.start_polling()
    updater.idle()
import ast

import pandas as pd

from constants import h5_internal_paths
from constants.config import Config
from constants.dataset_tables import DatasetTableHeader
from file_actions.writers.h5 import assemble_tomo_from_subtomos
from file_actions.readers.tomograms import load_tomogram
from paths.pipeline_dirs import get_probability_map_path, testing_partition_path
from constants.config import get_model_name

tomo_name = args.tomo_name
fold = ast.literal_eval(args.fold)
config_file = args.config_file
config = Config(user_config_file=config_file)

model_path, model_name = get_model_name(config, fold)

snakemake_pattern = config.output_dir + "/predictions/" + model_name + "/" + tomo_name + "/" + config.pred_class + \
                    "/.{fold}.probability_map.done".format(fold=str(fold))
from networks.utils import get_training_testing_lists

if isinstance(fold, int):
    tomo_training_list, tomo_testing_list = get_training_testing_lists(
        config=config, fold=fold)
    if tomo_name in tomo_testing_list:
        run_job = True
    else:
        run_job = False
else:
Exemple #4
0
import torch.optim as optim

from networks.io import get_device, to_device
from networks.utils import get_training_testing_lists, \
    generate_data_loaders_data_augmentation
from networks.loss import DiceCoefficientLoss
from networks.routines import train, validate

from networks.unet import UNet3D
from networks.utils import save_unet_model
from networks.visualizers import TensorBoard_multiclass

from constants.config import Config, record_model
from constants.config import get_model_name

config = Config(args.config_file)
gpu = args.gpu
device = get_device()
fold = ast.literal_eval(args.fold)

# Generate relevant dirs
model_path, model_name = get_model_name(config, fold)
print("model_path: ", model_path)
if fold is None:
    snakemake_pattern = ".done_patterns/" + model_path + "_None.pth.done"
else:
    snakemake_pattern = ".done_patterns/" + model_path + "_" + str(
        fold) + ".pth.done"

if os.path.exists(model_path) and not config.force_retrain:
    print("model exists already!")
Exemple #5
0
tomo_name = args.tomo_name
config_file = args.config_file

sys.path.append(pythonpath)

import os
import ast
import numpy as np
import pandas as pd

from constants.config import Config
from tomogram_utils.volume_actions.actions import \
    generate_strongly_labeled_partition
from paths.pipeline_dirs import training_partition_path

config = Config(config_file)
df = pd.read_csv(config.dataset_table, dtype={"tomo_name": str})
df.set_index('tomo_name', inplace=True)
fold = ast.literal_eval(args.fold)
path_to_raw = df[config.processing_tomo][tomo_name]
labels_dataset_list = list()
for semantic_class in config.semantic_classes:
    mask_name = semantic_class + '_mask'
    path_to_mask = df[mask_name][tomo_name]
    labels_dataset_list.append(path_to_mask)

box_shape = (config.box_size, config.box_size, config.box_size)
output_path_dir, output_path = training_partition_path(
    output_dir=config.work_dir, tomo_name=tomo_name, fold=fold)
print(output_path_dir)
os.makedirs(name=output_path_dir, exist_ok=True)
Exemple #6
0
    user = str(message.chat.username)
    return (user == config.BOT_NAME)


def bypassMsg():
    LOGGER.warn('Incoming message ignored!')


# --- Script entry point

if (len(sys.argv) != 2):
    LOGGER.error('Invalid number of parameter: .env file is required!')
    sys.exit()

env_path = getEnvPath()
config = Config(env_path)

downloader = DownloaderClient()

pyroClient = EnvLoader.getClient(config)


@pyroClient.on_message(~Filters.private)
def handler(client, msg):
    if (not allowedUser(msg)):
        username = str(msg.chat.username)
        LOGGER.warn('Request from invalid user! Requested username [' +
                    username + ']')
        return

    if (not downloader.addMessage(msg)):