Beispiel #1
0
def main(args):
    cfg = get_config(args, save=False)

    dataset = NOCSDataset(cfg['obj']['basepath'],
                          cfg['obj_category'],
                          cfg['obj_info'], cfg['num_expr'], mode=args.mode)
    ins_dict = {}
    lim = 5
    for i in range(len(dataset)):
        path = dataset.file_list[i]
        instance = path.split('.')[-2].split('/')[-3]
        if instance not in ins_dict:
            ins_dict[instance] = 0
        ins_dict[instance] += 1
        if ins_dict[instance] <= lim:
            print(path)
            visualize_data(dataset[i])
    for i in tqdm(range(len(dataset))):
        cur_pose = dataset.get_pose(i)
        path = dataset.file_list[i]
        for key, value in cur_pose.items():
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                print('inf or nan in', key)
                print(path)
        if np.any(cur_pose['scale'] < 1e-3):
            print('scale too small!', cur_pose['scale'])
            print(path)
Beispiel #2
0
def center_rov(move,
               xPos=0,
               yPos=0,
               Bbox=None,
               depth_sensor=None,
               logger=None):
    global previous_position
    global error_integral
    config = get_config("tasks")['path_task']
    Kp = config['centering']['kp']
    Ki = config['centering']['ki']
    Kd = config['centering']['kd']

    if Bbox:
        xPos = Bbox.xc
        yPos = Bbox.yc

    error_integral = error_integral + xPos
    derivative = xPos - previous_position
    previous_position = xPos

    move.set_lin_velocity(right=-xPos * Kp + error_integral * Ki +
                          derivative * Kd)
    if logger:
        logger.log("xPos: " + str(xPos) + ' velocity set ' +
                   str(xPos * Kp + error_integral * Ki + derivative * Kd))
    if depth_sensor:
        current_depth = depth_sensor.get_depth()
        move.pid_set_depth(current_depth - Kp * yPos)
    sleep(0.3)
    move.set_lin_velocity(right=0)
Beispiel #3
0
    def center_on_flare(self):
        """
        rotates in vertical axis so flare is in the middle of an image
        TODO: obsługa dwóch flar
        """
        config = self.config['centering']
        flare_size = get_config(
            "objects_size")["localization"]["flare"]["height"]

        MAX_CENTER_ANGLE_DEG = config['max_center_angle_deg']
        MAX_TIME_SEC = config['max_time_sec']

        stopwatch = Stopwatch()
        stopwatch.start()

        while stopwatch <= MAX_TIME_SEC:
            bbox = self.darknet_client.predict()[0].normalize(480, 480)
            self.flare_position = location_calculator(bbox, flare_size,
                                                      "height")
            angle = -m.degrees(
                m.atan2(self.flare_position['x'],
                        self.flare_position['distance']))
            if abs(angle) <= MAX_CENTER_ANGLE_DEG:
                self._logger.log("centered on flare successfully")
                return True
            self._control.rotate_angle(0, 0, angle)
        self._logger.log("couldn't center on flare")
        return False
Beispiel #4
0
 def __init__(self, control_dict: Movements, sensors_dict, main_logger):
     self._control = control_dict['movements']
     self._hydrophones = sensors_dict['hydrophones']
     self.darknet_client = DarknetClient(DARKNET_PORT, IP_ADDRESS)
     self._bounding_box = BoundingBox(0, 0, 0, 0)
     self._logger = main_logger
     self.config = get_config('tasks')['localization']
     self.confidence = 0
     self.flare_position = None
Beispiel #5
0
    def __init__(self, control_dict, sensors_dict, main_logger):
        self._control = control_dict
        self.depth_sensor = sensors_dict['depth']
        self._logger = main_logger
        self.movements = control_dict['movements']
        self.darknet_client = DarknetClient()
        self.config = get_config('tasks')['qualification_task']
        self.confidence = 0

        self.movements.pid_turn_on()
        self._logger.log("Qualification task executor init done")
    def is_this_gate(self, bounding_box, img):

        confidence = 0
        MOVING_AVERAGE_DISCOUNT = get_config('moving_avg_discount')
        CONFIDENCE_THRESHOLD = get_config('confidence_threshold')

        bounding_box = YoloGateLocator().get_gate_bounding_box(img)

        if bounding_box is not None:
            confidence = mvg_avg(1, confidence, MOVING_AVERAGE_DISCOUNT) #co robi funkcja mvg_avg
            self._bounding_box.mvg_avg(bounding_box, 0.5, True)
            self._logger.log("is_this_gate: somoething detected")
        else:
            confidence = mvg_avg(0, confidence, MOVING_AVERAGE_DISCOUNT)

        # Stop and report sucess if we are sure we found a path!
        if confidence > CONFIDENCE_THRESHOLD:
            self._logger.log("is_this_gate: gate found")
            self._control.set_ang_velocity(0, 0, 0)
            return True
 def __init__(self, contorl_dict: Movements, sensors_dict, main_logger):
     self._control = contorl_dict
     self._bounding_box = BoundingBox(0, 0, 0, 0)
     self._logger = main_logger
     self.config = get_config("tasks")['navigation_task']
     # For which path we are taking angle. For each path, rotation
     # angle might be set differently in config.json
     self.number = 0
     self.path = []
     self.darknet_client = DarknetClient() # TODO - chnge in client to paste https
     is_model_loaded = self.darknet_client.load_model('coke')
     self._logger.log("Loding model: "+str(is_model_loaded))
Beispiel #8
0
def main(args):
    cfg = get_config(args, save=False)

    bla = SAPIENDataset(args.root_dset,
                        args.obj_category,
                        cfg['obj_info'],
                        cfg['num_expr'],
                        mode=args.mode,
                        is_debug=args.is_debug)
    for j in range(10):
        i = np.random.randint(0, len(bla))
        print(bla.file_list[i])
        visualize_data(bla[i])
 def __init__(self):
     self.config = get_config()
     self.log_writer = LogWriter(self.config.log_dir)
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     if torch.cuda.is_available():
         print("Using CUDA, benchmarking implementations", file=sys.stderr)
         torch.backends.cudnn.benchmark = True
     self.model = AutoLSTM(self.config.input_size,
                           self.config.hidden_size,
                           self.config.num_layers,
                           bidirectional=False)
     self.model.to(self.device)
     self.model = nn.DataParallel(self.model)
    def authenticate(
        self
    ) -> AuthenticationResponse:
        """
        Expects 4 values in env
        1. REDDIT_APP_CLIENT_ID: <your_app_client_id>
        2. REDDIT_APP_CLIENT_SECRET: <your_app_client_secret>
        3. REDDIT_USER_NAME: <your_user_name_for_reddit>
        4. REDDIT_PASSWORD: <your_reddit_password>
        Returns:
        AuthenticationResponse
        """

        

        reddit_app_client_id: str = os.getenv("REDDIT_APP_CLIENT_ID")
        reddit_app_client_secret: str = os.getenv("REDDIT_APP_CLIENT_SECRET")

        if not reddit_app_client_id:
            raise ValueError("REDDIT_APP_CLIENT_ID not present in environment")
        if not reddit_app_client_secret:
            raise ValueError("REDDIT_APP_CLIENT_SECRET not present in environment")

        config: dict = get_config()
        auth_url: str = config["reddit_auth_url"]
        endpoint: str = config["reddit_authentication_endpoint"]
        client_auth: requests.models.HTTPBasicAuth = requests.auth.HTTPBasicAuth(
            reddit_app_client_id, reddit_app_client_secret
        )
        post_data = {
            "grant_type": "password",
            "username": self.reddit_user_name,
            "password": self.reddit_password,
        }
        headers = {"User-Agent": f"PersonalClient/0.1 by {self.reddit_user_name}"}
        response: requests.models.Response = requests.post(
            auth_url + endpoint, auth=client_auth, data=post_data, headers=headers
        )
        response.raise_for_status()
        response: dict = response.json()
        authentication_expiry_time = datetime.datetime.now() + datetime.timedelta(
            seconds=response["expires_in"]
        )
        return AuthenticationResponse(
            authentication_token=response["access_token"],
            authentication_expiry_time=authentication_expiry_time,
            reddit_user_name=self.reddit_user_name,
        )
Beispiel #11
0
def create_app():
    application = Flask(__name__)
    CORS(application)
    application.config.update(get_config())

    # Do not redirect branch URLs without / to URL with the one with /
    application.url_map.strict_slashes = False

    application.indexes = Indexer(open_data_file(application.config['INDEX_FILE']))
    print("Loaded indexes")
    application.genome_store = GenomeStore(open_data_file(application.config['GENOME_STORE_FILE']))
    print("Loaded Genome store")

    application.region_info_store = open_data_file(application.config['REGION_INFO_FILE'])
    print("Loaded Region info Store")

    with application.app_context():
        from blueprints import genome_search

        application.register_blueprint(genome_search.search_bp, url_prefix='/api/genome_search')

        from blueprints import alternative_assemblies
        application.register_blueprint(alternative_assemblies.alt_assemblies_bp, url_prefix='/api/alternative_assemblies')

        from blueprints import popular_genomes
        application.register_blueprint(popular_genomes.popular_genomes_bp, url_prefix='/api/popular_genomes')

        from blueprints import genomes
        application.register_blueprint(genomes.genomes_bp, url_prefix='/api/genome/')

        from blueprints import objects
        application.register_blueprint(objects.objects_bp, url_prefix='/api/object/')

        from blueprints import region_info
        application.register_blueprint(region_info.region_info_bp, url_prefix='/api/genome/karyotype/')

        from blueprints import region_validate
        application.register_blueprint(region_validate.region_validate_bp, url_prefix='/api/genome/region')

        application.register_blueprint(Blueprint('temp_static_blueprint', __name__, static_folder='static', static_url_path='/static/genome_images'))

        # print(application.url_map.iter_rules)

    # TODO: errorhandlers listening to only 404 errors at the moment. Needs investigating.
    register_generic_error_handlers(application)

    return application
 def __init__(self):
   self.config = get_config()
   with open(self.config.index_path, 'rb') as fp:
     self.data = pickle.load(fp)
   self.file_names = []
   self.matrix = []
   for k, v in self.data.items():
     self.file_names.append(k)
     self.matrix.append(v)
   self.matrix = np.array(self.matrix)
   self.file_names = np.array(self.file_names)
   self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
   self.weights = torch.load(self.config.model_path)
   self.model = AutoLSTM(self.config.input_size, self.config.hidden_size,
                    self.config.num_layers, False)
   self.model.load_state_dict(self.weights)
   self.model.eval()
   self.model.to(self.device)
Beispiel #13
0
 def __init__(self):
     self.config = get_config()
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     self.weights = torch.load(self.config.model_path)
     self.model = AutoLSTM(self.config.input_size,
                           self.config.hidden_size,
                           self.config.num_layers,
                           bidirectional=False)
     self.model.to(self.device)
     self.model.load_state_dict(self.weights)
     self.model.eval()
     self.data_parser = DatasetParser(self.config.features_list)
     self.dl = DataLoader(self.data_parser,
                          batch_size=self.config.batch_size,
                          shuffle=False,
                          num_workers=12,
                          collate_fn=my_collate)
     self.pbar = tqdm(self.dl)
Beispiel #14
0
    def __init__(self, control_dict: Movements, sensors_dict, main_logger,
                 bucket):  #bucket = 'blue' or 'red' or 'pinger'
        self._control = control_dict['movements']
        self._dropper = control_dict['dropper']
        self._hydrophones = sensors_dict['hydrophones']
        self._logger = main_logger
        self.config = get_config("tasks")['buckets_task']
        self.MAX_TIME_SEC = self.config['search']['max_time_sec']
        self.PINGER_LOOP_COUNTER = self.config['search']['pinger_loop_counter']
        self.BLUE_LOOP_COUNTER = self.config['search']['blue_loop_counter']
        self.ANY_BUCKET_COUNTER = self.config['search']['any_bucket_counter']
        self.SEARCHING_BUCKETS_FORWARD_TIME = self.config['search'][
            'SEARCHING_BUCKETS_FORWARD_TIME']
        self.PINGER_FREQ = self.config['search']['PINGER_FREQ']
        self.POSITION_THRESHOLD = self.config['search']['POSITION_THRESHOLD']
        self._control.pid_turn_on()
        self._control.pid_set_depth(self.config['search']['max_depth'])

        self.darknet_client = DarknetClient(DARKNET_PORT, IP_ADDRESS)

        self._logger.log('Buckets: diving')
Beispiel #15
0
    def __init__(self, feautures_list):

        try:
            self.config = get_config(mode='train')
            self.features_list = feautures_list
            self.batch_size = self.config.nb_samples
            self.min_ts = self.config.min_timesteps
            self.max_ts = self.config.max_timesteps
            self.input_features_dim = self.config.input_size

            self.batcher = None
            self.feature_files = read_filelist(self.features_list)

            assert len(self.feature_files) != 0, 'Empty feature'
            'file list at {}'.format(self.features_list)

            self.batcher = self.batch_gen()

            logger.info('Setting DatasetParser from {}'.format(
                self.features_list))

        except Exception as err:
            logger.warning('Error setting up DatasetParser {}'.format(err))
Beispiel #16
0
  def __init__(self, input_size, hidden_size, num_layers=2,
               bidirectional=False):
    super().__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.num_layers = num_layers
    self.bidirectional = bidirectional
    self.config = get_config()
    self.encoder_weights = torch.load(self.config.encoder_model_path)
    self.decoder_weights = torch.load(self.config.decoder_model_path)

    self.e_wts = {k[6:]: v for k, v in self.encoder_weights.items() if 'elstm' in k}
    self.d_wts = {'.'.join(k.split('.')[1:]): v for k, v in self.decoder_weights.items()
                  if 'dlstm' in k}

    self.elstm = eLSTM(input_size, hidden_size, num_layers, False)
    # the last hidden state of eLSTM is (num_layers * 1, 1, hidden_size)
    # = (2, 1, 512), change it to the shape (1, 1, 512 * 2), and then
    # repeat it seq_len times, as the input of dLSTM
    self.dlstm = dLSTM(2 * hidden_size, (input_size // hidden_size) * hidden_size,
                       num_layers, False)
    self.elstm.load_state_dict(self.e_wts)
    self.dlstm.load_state_dict(self.d_wts)
Beispiel #17
0
from os.path import join
from business import utils
from configs.config import get_config

credential = get_config("configs/credentials.yaml")


class ZomatoAPIWrapper:
    def __init__(self,
                 url="https://developers.zomato.com/api/",
                 version="v2.1",
                 api_key=credential["ZOMATO_API_KEY"],
                 file_format="json"):

        self.url = join(url, version)
        self.user_key = api_key
        self.file_format = file_format

    def __repr__(self):

        return self.url

    def headers(self):

        headers = {"user-key": self.user_key, "Accept": self.file_format}

        return headers

    def query(self, search_key, **kwargs):
        """
        Helper function to return String url of query against Zomato
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    cfg = get_config(args)

    '''LOG'''
    log_dir = pjoin(cfg['experiment_dir'], 'log')
    ensure_dirs(log_dir)

    logger = logging.getLogger("TrainModel")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/log_finetune.txt' % (log_dir))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(cfg)

    '''DATA'''
    test_dataloader = get_dataloader(cfg, args.use_val, downsampling=args.downsample)

    train_real_dataloader = get_dataloader(cfg, 'real_train', shuffle=True)
    syn_train_len = len(train_real_dataloader) * args.syn_n

    train_syn_dataloader = get_dataloader(cfg, 'train', shuffle=True)
    syn_train_cycle = iter(train_syn_dataloader)
    num_div = len(train_syn_dataloader) // syn_train_len

    '''TRAINER'''
    trainer = Trainer(cfg, logger)
    start_epoch = trainer.resume()

    def test_all():
        '''testing'''
        test_loss = {}
        for i, data in enumerate(test_dataloader):
            pred_dict, loss_dict = trainer.test(data)
            loss_dict['cnt'] = 1
            add_dict(test_loss, loss_dict)

        cnt = test_loss.pop('cnt')
        log_loss_summary(test_loss, cnt, lambda x, y: log_string('real_test {} is {}'.format(x, y)))

    test_all()

    for epoch in range(start_epoch, cfg['total_epoch']):
        trainer.step_epoch()

        '''training'''
        if not args.real_only:
            train_loss = {}
            for i in range(syn_train_len):
                data = next(syn_train_cycle)
                loss_dict = trainer.update(data)
                loss_dict['cnt'] = 1
                add_dict(train_loss, loss_dict)

            cnt = train_loss.pop('cnt')
            log_loss_summary(train_loss, cnt, lambda x, y: log_string('Syn_Train {} is {}'.format(x, y)))

        train_loss = {}
        for i, data in enumerate(train_real_dataloader):
            loss_dict = trainer.update(data)
            loss_dict['cnt'] = 1
            add_dict(train_loss, loss_dict)

        cnt = train_loss.pop('cnt')
        log_loss_summary(train_loss, cnt, lambda x, y: log_string('Real_Train {} is {}'.format(x, y)))

        if (epoch + 1) % cfg['freq']['save'] == 0:
            trainer.save()

        test_all()
        if (epoch + 1) % num_div == 0:
            syn_train_cycle = iter(train_syn_dataloader)
Beispiel #19
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    cfg = get_config(args)
    '''LOG'''
    log_dir = pjoin(cfg['experiment_dir'], 'log')
    ensure_dirs(log_dir)

    logger = logging.getLogger("TrainModel")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/log.txt' % (log_dir))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(cfg)
    '''DATA'''
    train_dataloader = get_dataloader(cfg, 'train', shuffle=True)
    test_dataloader = get_dataloader(cfg, 'test')

    if args.use_val is not None:
        val_dataloader = get_dataloader(cfg, args.use_val)
    else:
        val_dataloader = None
    '''TRAINER'''
    trainer = Trainer(cfg, logger)
    start_epoch = trainer.resume()

    def test_all():
        '''testing'''
        test_loss = {}
        for i, data in enumerate(test_dataloader):
            pred_dict, loss_dict = trainer.test(data)
            loss_dict['cnt'] = 1
            add_dict(test_loss, loss_dict)

        cnt = test_loss.pop('cnt')
        log_loss_summary(test_loss, cnt,
                         lambda x, y: log_string('Test {} is {}'.format(x, y)))

        if val_dataloader is not None:
            val_loss = {}
            for i, data in enumerate(val_dataloader):
                pred_dict, loss_dict = trainer.test(data)
                loss_dict['cnt'] = 1
                add_dict(val_loss, loss_dict)

            cnt = val_loss.pop('cnt')
            log_loss_summary(
                val_loss, cnt, lambda x, y: log_string('{} {} is {}'.format(
                    args.use_val, x, y)))

    for epoch in range(start_epoch, cfg['total_epoch']):
        trainer.step_epoch()
        train_loss = {}
        '''training'''
        for i, data in enumerate(train_dataloader):
            loss_dict = trainer.update(data)
            loss_dict['cnt'] = 1
            add_dict(train_loss, loss_dict)

        cnt = train_loss.pop('cnt')
        log_loss_summary(
            train_loss, cnt,
            lambda x, y: log_string('Train {} is {}'.format(x, y)))

        if (epoch + 1) % cfg['freq']['save'] == 0:
            trainer.save()

        test_all()
Beispiel #20
0
            p_rot = pred_pose['rotation'][p]
            p_trans = pred_pose['translation'][p]
            c_trans = pred_pose['translation'][c]
            relative_trans = np.matmul(p_rot.transpose(-1, -2),
                                       c_trans - p_trans)
            axis_index = info['main_axis'][len(joint_states)]
            axis = np.zeros((3, ))
            axis[axis_index] = 1
            state = np.dot(relative_trans.reshape(-1), axis)
        joint_states.append(state)
    return np.array(joint_states)


if __name__ == "__main__":
    args = parse_args()
    cfg = get_config(args, save=False)
    base_path = cfg['obj']['basepath']
    obj_category = cfg['obj_category']

    obj_info = cfg['obj_info']

    data_path = pjoin(cfg['experiment_dir'], 'results', 'data')

    all_raw = os.listdir(data_path)
    all_raw = sorted(all_raw)

    error_dict = {}

    for i, raw in enumerate(all_raw):
        name = raw.split('.')[-2]
        with open(pjoin(data_path, raw), 'rb') as f:
Beispiel #21
0
def main(args):
    def log_string(str):
        logger.info(str)
        print(str)

    cfg = get_config(args, save=False)

    '''LOG'''
    log_dir = pjoin(cfg['experiment_dir'], 'log')
    ensure_dirs(log_dir)

    logger = logging.getLogger("TestModel")
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler = logging.FileHandler('%s/log_test.txt' % (log_dir))
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    log_string('PARAMETER ...')
    log_string(cfg)

    '''TRAINER'''
    trainer = Trainer(cfg, logger)
    trainer.resume()

    '''testing'''
    save = cfg['save']
    no_eval = cfg['no_eval']

    dataset_name = args.mode_name

    test_dataloader = get_dataloader(cfg, dataset_name)
    test_loss = {'cnt': 0}

    zero_time = time.time()
    time_dict = {'data_proc': 0.0, 'network': 0.0}
    total_frames = 0

    for i, data in tqdm(enumerate(test_dataloader), total=len(test_dataloader), smoothing=0.9):
        num_frames = len(data)
        total_frames += num_frames
        print(f'Trajectory {i}, {num_frames:8} frames****************************')

        start_time = time.time()
        elapse = start_time - zero_time
        time_dict['data_proc'] += elapse
        print(f'Data Preprocessing: {elapse:8.2f}s {num_frames / elapse:8.2f}FPS')

        pred_dict, loss_dict = trainer.test(data, save=save, no_eval=no_eval)

        elapse = time.time() - start_time
        time_dict['network'] += elapse
        print(f'Network Forwarding: {elapse:8.2f}s {num_frames / elapse:8.2f}FPS')

        loss_dict['cnt'] = 1
        add_dict(test_loss, loss_dict)

        zero_time = time.time()

    print(f'Overall, {total_frames:8} frames****************************')
    print(f'Data Preprocessing: {time_dict["data_proc"]:8.2f}s {total_frames / time_dict["data_proc"]:8.2f}FPS')
    print(f'Network Forwarding: {time_dict["network"]:8.2f}s {total_frames / time_dict["network"]:8.2f}FPS')
    if cfg['batch_size'] > 1:
        print(f'PLEASE SET batch_size = 1 TO TEST THE SPEED. CURRENT BATCH_SIZE: cfg["batch_size"]')

    cnt = test_loss.pop('cnt')
    log_loss_summary(test_loss, cnt, lambda x, y: log_string('Test {} is {}'.format(x, y)))
    if save and not no_eval:
        trainer.model.save_per_diff()
class Genome(object):
    config = get_config()

    # Allow only alpha numeric in genome_id
    genome_id_regex = re.compile('[^{}+]'.format(
        config['GENOME_ID_VALID_CHARS']))

    def __init__(self, genome_info):
        self.genome_info = genome_info

    def create_genome_from_mr_format(self):

        # Use dict get method so that we get None value instead of KeyError when a key is not found
        self.common_name = self.genome_info.get('organism',
                                                {}).get('display_name')
        self.scientific_name = self.genome_info.get('organism',
                                                    {}).get('scientific_name')
        self.production_name = self.genome_info.get('organism', {}).get('name')

        self.assembly_name = self.genome_info.get('assembly',
                                                  {}).get('assembly_name')
        self.assembly_accession = self.genome_info.get(
            'assembly', {}).get('assembly_accession')

        self.division = [self.genome_info.get('division', {}).get('name')]

        self.genome_id = self.__assign_genome_id()
        self.alternative_assemblies = self.__find_alternative_assemblies()

        self.is_popular = self.__check_if_is_popular()

        if self.is_popular:
            self.popular_order = self.__get_popular_order()

        self.is_available = self.__check_if_is_available()

        if self.is_available:
            self.example_objects = self.__get_example_objects()

        self.__process_strains_info()

    def create_genome_from_gs_format(self):

        self.__dict__.update(self.genome_info)
        self.sanitize()

    def create_genome_from_something_else(self):
        pass

    def __process_strains_info(self):

        # TODO: How do I get reference species genome_id?

        if self.genome_info.get('organism', {}).get('strain') is not None:
            self.is_strain = True
            self.reference_genome_id = self.genome_info.get('organism',
                                                            {}).get('name')
        else:
            self.is_strain = False
            self.reference_genome_id = None

    def __assign_genome_id(self):

        # Tmp hack until GCA value is loaded into Metadata registry
        if self.production_name == 'plasmodium_falciparum':
            return 'plasmodium_falciparum_GCA_000002765_2'

        if self.assembly_accession is None and \
                self.assembly_name is None:
            raise Exception(
                'Problem with species {}. Either Assembly name or Assembly accession does\'t exist. \n'
                'Assembly name: {}, \n'
                'Assembly accession: {}'.format(self.common_name,
                                                self.assembly_name,
                                                self.assembly_accession))
        else:
            genome_id = '{}_{}'.format(
                Genome.genome_id_regex.sub('_', self.production_name),
                Genome.genome_id_regex.sub(
                    '_', self.assembly_accession
                    if self.assembly_accession else self.assembly_name))

            return genome_id

    def __find_alternative_assemblies(self):

        if 'ASSOCIATED_ASSEMBLIES' in self.config:
            for associated_assemblies in self.config[
                    'ASSOCIATED_ASSEMBLIES'].values():
                if self.genome_id in associated_assemblies:
                    associated_assemblies.remove(self.genome_id)
                    return associated_assemblies
        return None

    def __check_if_is_popular(self):

        if 'POPULAR_GENOMES' in self.config and self.genome_id in self.config[
                'POPULAR_GENOMES']:
            return True
        else:
            return False

    def __get_popular_order(self):

        return self.config['POPULAR_GENOMES'].index(self.genome_id)

    def __check_if_is_available(self):

        if 'AVAILABLE_GENOMES' in self.config and self.genome_id in self.config[
                'AVAILABLE_GENOMES']:
            return True
        else:
            return False

    def __get_example_objects(self):

        return self.config['AVAILABLE_GENOMES'].get(self.genome_id)

    def sanitize(self):
        """Removes unnecessary genome object data before creating json file for the genome"""
        self.__dict__.pop('genome_info')

    def convert_to_dict(self):
        if 'genome_info' in self.__dict__:
            self.__dict__.pop('genome_info')
        return self.__dict__
    def __init__(self, **kwargs):

        config = get_config()

        self.stop_char_mapping = config['STOP_CHARS_MAPPING']
        self.min_token_length = config['MINIMUM_TOKEN_LENGTH']
from sanic import Sanic
from sanic.response import text
import multiprocessing

from auth.auth import protected
from auth.login import login
from configs.config import get_config

app = Sanic("{{cookiecutter.app_name}}")

app.config = get_config()

app.blueprint(login)


@app.get("/")
async def hello_world(request):
    return text("Hello, world.")


@app.get("/secret")
@protected
async def secret(request):
    return text("To go fast, you must be fast.")


if __name__ == "__main__":
    workers = multiprocessing.cpu_count()
    app.run(host="0.0.0.0", port="{{cookiecutter.port}}", workers=workers)
Beispiel #25
0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
"""

import os
import requests, urllib.parse as urlparse
import json
from configs.config import get_config
from resources.rest_client import EnsemblREST, EnsemblGRCH37REST

if __name__ == "__main__":
    config = get_config()
    rest_client = EnsemblREST()
    grch37_rest_client = EnsemblGRCH37REST()
    with open(config['REGION_INFO_FILE'], 'w') as kf:
        regions_info = {}
        if os.path.exists(config['GENOME_STORE_FILE']):
            with open(config['GENOME_STORE_FILE'], "r") as genome_store_file:
                genome_store_data = json.load(genome_store_file)
                for gid, genome in genome_store_data.items():
                    print("Processing Species {} with assembly {} ".format(
                        genome['production_name'], genome['assembly_name']))
                    species_karyotypes = {}
                    species_name = genome['production_name']
                    if genome['assembly_name'] in ['GRCh37.p13']:
                        assembly_info = grch37_rest_client.get_assembly_info(
                            species_name)
Beispiel #26
0
                        model.module.dlstm.state_dict(),
                        '{}/test-decoder_lstm.pth'.format(model_save_dir))
                    torch.save(
                        model.module.state_dict(),
                        '{}/test-autoencoder_decoder.pth'.format(
                            model_save_dir))
                    au_loss = torch.stack(losses).mean()
                    print('Epoch: {} \tTraining Loss: {:.6f}'.format(
                        epoch, au_loss))
                    self.log_writer.update_loss(au_loss, epoch, 'decoder loss')
                    step += step
            except TypeError:
                continue
        time_elapsed = time.time() - since
        print('Training complete in {:.0f}m {:0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))

        model.load_state_dict(best_model_wts)
        return model, losses


if __name__ == '__main__':
    config = get_config(mode='train')
    t = trainer(config)
    model = AutoLSTM(config.input_size,
                     config.hidden_size,
                     config.num_layers,
                     bidirectional=True)
    model.to(t.device)
    t.train_model(model, config.n_epochs, config.model_save_dir)
Beispiel #27
0
        writer.add_scalar('loss', loss.detach().cpu().item(), total_step.val)
        if config.method != 'byol':
            writer.add_scalar('top1',
                              acc1.detach().cpu().item(), total_step.val)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()


if __name__ == "__main__":
    args = parser.parse_args()

    if args.method == 'simclr':
        args.lr = 0.1
        args.temperature = 0.5
        args.weight_decay = 1e-6
    if args.method == 'byol':
        args.lr = 0.2
        args.weight_decay = 1.5e-6

    config = get_config(method=args.method)
    match_config_with_args(config, args)
    inspect_config(config)
    config.freeze()

    # save experiment config
    save_experiment_config(config)
    # run training
    main(config, args)