Esempio n. 1
0
 def save(self, step):
     print(color_text('Saving models ...', 'green'))
     torch.save(
         self.model.module.state_dict(),
         os.path.join(self.args.save_path, 'model',
                      'pred_model_%09d.pt' % step))
     torch.save(
         self.optim.state_dict(),
         os.path.join(self.args.save_path, 'optimizer', 'optimizer.pt'))
     with open(os.path.join(self.args.save_path, 'epoch.pkl'), 'wb') as f:
         pkl.dump(self.epoch, f)
     self.bmanager.save_spc_buffer()
     print(color_text('Model saved successfully!', 'green'))
Esempio n. 2
0
 def __str__(self):
     """c
     """
     print_var = ' '
     if self.is_terminal:
         print_var = self.char + print_var
         if self.char == self.PLUS:
             print_var = color_text(print_var, "green")
         else:
             print_var = color_text(print_var, "red")
     elif self.is_invalid():
         print_var = color_text(self.char + print_var, "yellow")
     else:
         action = self.utility[1]
         print_var = ACTION_CHARS[action] + print_var
     return print_var
Esempio n. 3
0
def write_job(analydir, jobname, jobs, orders):

    job_dir = os.path.join(analydir, 'job')
    log_dir = os.path.join(analydir, 'log', jobname)
    create_dir(job_dir)
    create_dir(log_dir)
    log_path = os.path.join(job_dir, jobname)
    with open(log_path, 'w') as log:
        # write jobs
        for job in jobs:
            job_part = '''\
                job_begin
                  name {name}
                  memory {memory}
                  status {status}
                  sched_options {sched}
                  cmd_begin
                    {cmd}
                  cmd_end
                job_end\n
            '''.format(**job)

            log.write(textwrap.dedent(job_part))
        # write orders
        for order in orders:
            log.write(order + '\n')
        # write log_dir
        log.write('\nlog_dir %s\n' % log_dir)

    print 'successfully generated job, and you can start with: %s' % utils.color_text(
        'sjm job/' + jobname, 'green', style='bright')
Esempio n. 4
0
 def train_guide_action(self, step):
     if self.bmanager.spc_buffer.can_sample_guide(self.bsize):
         obs, guide_action = self.bmanager.spc_buffer.sample_guide(
             self.bsize)
         q = self.model(obs, action_only=True)
         loss = self.guide_loss_func()(q, guide_action)
         print('Guidance loss  %0.4f' % loss.data.cpu().numpy())
         return loss
     else:
         print(
             color_text('Insufficient expert data for imitation learning.',
                        'red'))
         return 0.0
Esempio n. 5
0
    def __init__(self, parent=None, name: str = '', description: str = ''):
        super(Dialog, self).__init__(parent)
        self.option = QSpinBox()

        self.option.setMinimum(5)
        self.option.setMaximum(10000000)
        self.option.setValue(300)
        self.setWindowTitle('Get giles')

        self.label = QLabel(color_text('Insert option:', 'limegreen'))
        self.name_label = QLabel(color_text(name + ':', 'limegreen'))
        self.tooltip = QLabel(description)
        self.ok_button = QPushButton('Ok', self)
        self.ok_button.setFixedSize(self.ok_button.sizeHint())
        self.ok_button.clicked.connect(self.accept)

        self.cancel_button = QPushButton('Cancel', self)
        self.cancel_button.setFixedSize(self.cancel_button.sizeHint())
        self.cancel_button.clicked.connect(self.reject)

        layout = QGridLayout(self)
        layout.addWidget(self.name_label, 0, 0, 1, 3)
        layout.addWidget(self.tooltip, 1, 0, 1, 3)

        layout.addWidget(self.label, 2, 0, 1, 3)
        layout.addWidget(self.option, 3, 0, 1, 3)

        layout.setColumnStretch(0, 1)
        layout.setColumnStretch(1, 0)
        layout.setColumnStretch(2, 0)
        layout.addWidget(self.ok_button, 4, 1)
        layout.addWidget(self.cancel_button, 4, 2)

        self.setFixedHeight(self.sizeHint().height())
        self.setFixedWidth(self.sizeHint().width())
        self.option.setFocus()
Esempio n. 6
0
    def __init__(self, head_channels, classes, snapshot_file='model.pth.tar'):
        super(SegmentationModule, self).__init__()

        norm_act = partial(ABN, activation="leaky_relu", slope=.01)
        self.body = models.__dict__["net_wider_resnet38_a2"](norm_act=norm_act, dilation=(1, 2, 4, 4))
        self.head = DeeplabV3(4096, 256, 256, norm_act=norm_act, pooling_size=(84, 84))
        self.cls = nn.Conv2d(head_channels, classes, 1)
        self.transform = SegmentationTransform(
            2048,
            (0.41738699, 0.45732192, 0.46886091),
            (0.25685097, 0.26509955, 0.29067996),
        )

        dir_path = os.path.dirname(os.path.realpath(__file__))
        snapshot_file = os.path.join(dir_path, snapshot_file)
        if snapshot_file is not None:
            if not os.path.exists(snapshot_file):
                print(color_text('No local model found at {}'.format(snapshot_file), 'red'))
                print(color_text('Please download pretrained model from https://drive.google.com/file/d/1SJJx5-LFG3J3M99TrPMU-z6ZmgWynxo-/view', 'red'))
            data = torch.load(snapshot_file)
            self.body.load_state_dict(data["state_dict"]["body"])
            self.head.load_state_dict(data["state_dict"]["head"])
            self.cls.load_state_dict(data["state_dict"]["cls"])
            print('Loading segmentation model from %s' % snapshot_file)
Esempio n. 7
0
    def train_model(self, args, step):
        target = self.bmanager.spc_buffer.sample(self.bsize)
        target = encode_target(target)
        target['seg_batch'] = target['seg_batch'].long()

        output = self.model(target['obs_batch'],
                            target['act_batch'],
                            action_var=target['prev_action'])
        loss = 0.0

        batch_thr = self.args.thr
        threshold = batch_thr * self.pstep

        if self.args.use_depth:
            depth_pred = output["depth_pred"].view(-1, self.img_h, self.img_w)
            depth_target = target["depth_batch"].view(-1, self.img_h,
                                                      self.img_w)
            depth_loss = self.depth_loss_func(depth_pred, depth_target)
            loss += depth_loss
            print("depth loss: {}".format(depth_loss.data.cpu().numpy()))

        if self.args.use_detection:
            original_bboxes = target['original_bboxes']
            bboxes_nums = [[
                original_bboxes[i][j].size / 5 for j in range(self.pstep + 1)
            ] for i in range(self.bsize)]
            bboxes_ind = [
                np.array(np.where(np.array(bboxes_nums[i]) > 0))
                for i in range(self.bsize)
            ]

            nonempty_batches = []
            empty_batches = []
            for i in range(self.bsize):
                if bboxes_ind[i].size > 0 and 0 in bboxes_ind[i]:
                    # ensure that the first frame in the episode contains at least one vehicle GT
                    nonempty_batches.append(i)
                else:
                    empty_batches.append(i)

            frame_idx = []
            for batch_ind in nonempty_batches:
                for frame_ind in bboxes_ind[batch_ind][0]:
                    frame_idx.append(batch_ind * (self.pstep + 1) + frame_ind)

            if len(frame_idx) < threshold:
                print(
                    color_text(
                        'No enough positive samples to train detector ...',
                        'green'))
                instance_loss = 0
            else:
                instance_loss = ins_loss(
                    step,
                    target,
                    output,
                    self.detect_loss_func,
                    self.coll_with_loss_func,
                    self.logger,
                    use_coll_with=self.args.use_colls_with)
            loss += instance_loss
            print("detector loss: {}".format(instance_loss))

        # Loss Part #2: loss from future event happening prediction
        loss += event_losses(step, target, output, get_accuracy,
                             self.event_loss_func, self.eventloss_weights,
                             self.logger)

        # Loss Part #3: loss from future speed prediction
        if args.use_speed:
            speed_pred = output['speed']
            speed_target = target['sp_batch'][:, 1:].unsqueeze(dim=2)
            speedloss = one_loss(step, speed_target, speed_pred,
                                 self.speed_loss_func, "speed", self.logger)
            loss += self.speedloss_weight * speedloss

        # Loss Part #3: loss from future pixelwise semantic label prediction
        seg_pred = output['seg_pred'].view(-1, self.classes, self.img_h,
                                           self.img_w)
        seg_target = target['seg_batch'].view(-1, self.img_h, self.img_w)
        segloss = one_loss(step, seg_target, seg_pred, self.seg_loss_func,
                           "seg", self.logger)
        loss += self.segloss_weight * segloss

        self.logger.write(step, "total_loss", loss.item())
        gc.collect()
        return loss
Esempio n. 8
0
        refer = link.get('href')
        if refer.startswith('/'):
            links.append(purl.scheme + '://' + purl.netloc + refer)
        elif refer.startswith('#'):
            links.append(url + refer)
        else:
            p = urlparse(refer)
            if p.netloc and p.scheme:
                links.append(refer)

links = list(set(links))
# print(links)
# exit(1)
spaces = '{: <100} {:>20} {:>40}'
print(
    spaces.format(color_text('link', 'cyan'), color_text('filesize', 'cyan'),
                  color_text('filename', 'cyan')))
for link in links:
    try:
        head = session.head(link, allow_redirects=True, timeout=10).headers
    except requests.ConnectionError as e:
        print('There is a connection error with link', link,
              'and the error is \n', str(e))
        continue
    if is_downloadable(head.get('content-type')):
        # print(head)
        filename = get_filename_from_cd(head.get('content-disposition'))
        if not filename:
            filename = link.split('/')[-1]

        file_size = int(head['content-length'])
Esempio n. 9
0
def print_devices_comparison(with_color=True, with_locations=False):
    """
    Prints a table showing when each directory was last backed up on each
    device.
    """
    nominal_table_width = 70

    no_columns = len(config.DEVICES) + 1
    column_width = nominal_table_width / no_columns
    table_width = (column_width * no_columns) + 1

    horizontal_border = (('+' + ('-' * (column_width - 1))) * (no_columns)) + \
        '+'

    # Print the table heading row.
    print '+{:-<{width}}+'.format('', width=(table_width - 2))
    print '|{:^{width}}|'.format('<<< All devices >>>', width=(table_width - 2))

    # Print the subheading row.
    print '+{:-<{width1}}+{:-<{width2}}+'.format(
        '',
        '',
        width1=(column_width - 1),
        width2=((column_width * (no_columns - 1)) - 1)
    )
    print '|{:^{width1}}|{:^{width2}}|'.format(
        '~ Directory ~',
        '~ Approximate backup age ~',
        width1=(column_width - 1),
        width2=((column_width * (no_columns - 1)) - 1)
    )
    print horizontal_border

    # Print the column headings row.
    column_heading_row = '|{:^{width}}' .format('', width=(column_width - 1))
    for device in config.DEVICES:
        max_text_width = column_width - 1
        column_heading_row += '|{:^{width}}'.format(
            device['name'][:max_text_width],
            width=max_text_width
        )
    column_heading_row += '|'
    print column_heading_row
    print horizontal_border

    # Print each directory's row.
    for directory in config.DIRECTORIES:
        directory_row = '| {:<{width}}'.format(
            directory['name'],
            width=(column_width - 2)
        )

        for d in config.DEVICES:
            device = Device(d)
            elapsed_seconds = device.directory_age(directory)

            if elapsed_seconds:
                raw_msg = readable_duration(elapsed_seconds)
                msg = '{:^{width}}'.format(raw_msg, width=(column_width - 2))
            else:
                msg = '{:^{width}}'.format(
                    '-',
                    width=(column_width - 2)
                )

            if with_color and elapsed_seconds > (60 * 60 * 24 * 3):
                msg = color_text(msg, 'red')
            elif with_color and elapsed_seconds > (60 * 60 * 24 * 1):
                msg = color_text(msg, 'amber')
            elif with_color and elapsed_seconds:
                msg = color_text(msg, 'green')

            directory_row += '| {}'.format(msg)
        directory_row += '|'

        print directory_row
        print horizontal_border

    # Print each device's location.
    if with_locations:
        print ''
        print horizontal_border
        location_row = '| {:<{width}}'.format(
            'Location',
            width=(column_width - 2)
        )
        for d in config.DEVICES:
            device = Device(d)
            max_text_width = column_width - 1
            location_row += '|{:^{width}}'.format(
                device.location[:max_text_width],
                width=max_text_width
            )
        location_row += '|'
        print location_row
        print horizontal_border

    # Add two blank lines at the end for visual clarity.
    print'\n'
Esempio n. 10
0
def train_policy(args, env, max_steps=40000000):
    guides = generate_guide_grid(args.bin_divide)
    train_net, net, optimizer, epoch, exploration, num_steps = init_models(args)

    buffer_manager = BufferManager(args)
    action_manager = ActionSampleManager(args, guides)
    action_var = Variable(torch.from_numpy(np.array([-1.0, 0.0])).repeat(1, args.frame_history_len - 1, 1), requires_grad=False).float()

    # prepare video recording
    if args.recording:
        video_folder = os.path.join(args.video_folder, "%d" % num_steps)
        os.makedirs(video_folder, exist_ok=True)
        if args.sync:
            video = cv2.VideoWriter(os.path.join(video_folder, 'video.avi'),
                                    cv2.VideoWriter_fourcc(*'MJPG'),
                                    24.0, (args.frame_width, args.frame_height), True)
        else:
            video = None
            signal = mp.Value('i', 1)
            p = mp.Process(target=record_screen,
                           args=(signal,
                                 os.path.join(video_folder, 'video.avi'),
                                 1280, 800, 24))
            p.start()

    # initialize environment
    obs, info = env.reset()
    if args.recording:
        log_frame(obs, buffer_manager.prev_act, video_folder, video)

    num_episode = 1
    print('Start training...')

    for step in range(num_steps, max_steps):
        obs_var = buffer_manager.store_frame(obs, info)
        action, guide_action = action_manager.sample_action(net=net,
                                                            obs=obs,
                                                            obs_var=obs_var,
                                                            action_var=action_var,
                                                            exploration=exploration,
                                                            step=step,
                                                            explore=num_episode % 2)
        obs, reward, done, info = env.step(action)
        print("action [{0:.2f}, {1:.2f}]".format(action[0], action[1]) + " " +
              "collision {}".format(str(bool(info['collision']))) + " " +
              "off-road {}".format(str(bool(info['offroad']))) + " " +
              "speed {0:.2f}".format(info['speed']) + " " +
              "reward {0:.2f}".format(reward) + " " +
              "explore {0:.2f}".format(exploration.value(step))
              )

        action_var = buffer_manager.store_effect(guide_action=guide_action,
                                                 action=action,
                                                 reward=reward,
                                                 done=done,
                                                 collision=info['collision'],
                                                 offroad=info['offroad'])
        if args.recording:
            log_frame(obs, action, video_folder, video)

        if done:
            print('Episode {} finished'.format(num_episode))
            if not args.sync and args.recording:
                signal.value = 0
                p.join()
                del p

        # train SPN
        if buffer_manager.spc_buffer.can_sample(args.batch_size) and ((not args.sync and done) or (args.sync and step % args.learning_freq == 0)):
            # train model
            for ep in range(args.num_train_steps):
                optimizer.zero_grad()
                loss = train_model(args=args,
                                   net=train_net,
                                   spc_buffer=buffer_manager.spc_buffer)
                if args.use_guidance:
                    loss += train_guide_action(args=args,
                                               net=train_net,
                                               spc_buffer=buffer_manager.spc_buffer,
                                               guides=guides)
                print('loss = %0.4f\n' % loss.data.cpu().numpy())
                loss.backward()
                optimizer.step()
                epoch += 1
            net.load_state_dict(train_net.state_dict())

            # save model
            if epoch % args.save_freq == 0:
                print(color_text('Saving models ...', 'green'))
                torch.save(train_net.module.state_dict(),
                           os.path.join(args.save_path, 'model', 'pred_model_%09d.pt' % step))
                torch.save(optimizer.state_dict(),
                           os.path.join(args.save_path, 'optimizer', 'optimizer.pt'))
                with open(os.path.join(args.save_path, 'epoch.pkl'), 'wb') as f:
                    pkl.dump(epoch, f)
                buffer_manager.save_spc_buffer()
                print(color_text('Model saved successfully!', 'green'))

        if done:
            # reset video recording
            if args.recording:
                if args.sync:
                    video.release()
                    if sys.platform == 'linux':  # save memory
                        os.system('ffmpeg -y -i {0} {1}'.format(
                            os.path.join(video_folder, 'video.avi'),
                            os.path.join(video_folder, 'video.mp4')
                        ))
                        if os.path.exists(os.path.join(video_folder, 'video.mp4')):
                            os.remove(os.path.join(video_folder, 'video.avi'))

                    video_folder = os.path.join(args.video_folder, "%d" % step)
                    os.makedirs(video_folder, exist_ok=True)
                    video = cv2.VideoWriter(os.path.join(video_folder, 'video.avi'),
                                            cv2.VideoWriter_fourcc(*'MJPG'),
                                            24.0, (args.frame_width, args.frame_height), True)
                else:
                    video_folder = os.path.join(args.video_folder, "%d" % step)
                    os.makedirs(video_folder, exist_ok=True)

                    signal.value = 1
                    p = mp.Process(target=record_screen,
                                   args=(signal, os.path.join(video_folder, 'obs.avi'), 1280, 800, 24))
                    p.start()

            num_episode += 1
            obs, info = env.reset()
            buffer_manager.reset(step)
            action_manager.reset()
            if args.recording:
                log_frame(obs, buffer_manager.prev_act, video_folder, video)
Esempio n. 11
0
    def start(self):

        # temp
        advance_dirs = {
            'Merged_vcf': '{analydir}/Advance/{newjob}/Merged_vcf',
            'ACMG': '{analydir}/Advance/{newjob}/ACMG',
            'FilterSV': '{analydir}/Advance/{newjob}/FilterSV',
            'FilterCNV': '{analydir}/Advance/{newjob}/FilterCNV',
            'Noncoding': '{analydir}/Advance/{newjob}/Noncoding',
            'ModelF': '{analydir}/Advance/{newjob}/ModelF',
            'Share': '{analydir}/Advance/{newjob}/Share',
            'Denovo': '{analydir}/Advance/{newjob}/Denovo',
            'Linkage': '{analydir}/Advance/{newjob}/Linkage',
            'ROH': '{analydir}/Advance/{newjob}/ROH',
            'Network': '{analydir}/Advance/{newjob}/Network',
            'Pathway': '{analydir}/Advance/{newjob}/Pathway',
            'PPI': '{analydir}/Advance/{newjob}/PPI',
            'HLA': '{analydir}/Advance/{newjob}/HLA',
            'SiteAS': '{analydir}/Advance/{newjob}/SiteAS',
            'GeneAS': '{analydir}/Advance/{newjob}/GeneAS',
            'IntegrateResult': '{analydir}/Advance/{newjob}/IntegrateResult',
            'Disease': '{analydir}/Advance/{newjob}/Disease',
            'BriefResults': '{analydir}/Advance/{newjob}/BriefResults',
        }

        for k, v in advance_dirs.iteritems():
            self.args.update({k: v.format(**self.args)})

        # print self.args['SiteAS']
        # exit()

        # print self.analy_array
        print 'hello, {}'.format(self.username)

        # Require rawdata or not
        qc_status = utils.get_status('qc', self.startpoint,
                                     config.ANALYSIS_POINTS)
        mapping_status = utils.get_status('bwa_mem', self.startpoint,
                                          config.ANALYSIS_POINTS)

        print 'qc status:', qc_status
        print 'mapping status:', mapping_status

        ANALY_DICT = utils.get_analysis_dict(self.analy_array,
                                             config.ANALYSIS_CODE)
        self.args.update({'ANALY_DICT': ANALY_DICT})
        # print ANALY_DICT.keys();exit()

        softwares = utils.get_softwares(self.analy_array,
                                        self.args['ANALY_DICT'], self.args,
                                        self.seqstrag)
        # pprint(softwares);exit()
        self.args.update({'softwares': softwares})

        # check inputs
        self.queues = utils.check_queues(self.queues, self.username)
        self.args.update({'queues': self.queues})

        # use sentieon specific queues if needed
        if 'sentieon' in softwares.values():
            print 'add sentieon_queues'
            sentieon_queues = self.queues
            if config.CONFIG.has_option('resource', 'sentieon_queues'):
                sentieon_queues = config.CONFIG.get(
                    'resource', 'sentieon_queues').split(',')
                sentieon_queues = utils.check_queues(sentieon_queues,
                                                     self.username)
                if not sentieon_queues:
                    sentieon_queues = self.queues
            self.args.update({'sentieon_queues': sentieon_queues})

        # print self.args['sentieon_queues'];exit()
        # print sentieon_queues;exit()

        utils.check_analy_array(self.seqstrag, self.analy_array,
                                config.ANALYSIS_CODE)
        utils.check_files(self.pn, self.samp_info, self.samp_list)
        newTR = utils.check_target_region(config.CONFIG, self.seqstrag,
                                          self.refgenome, self.rawTR)
        self.args.update({'TR': newTR})

        print 'analysis items:'
        for analysis_code in self.analy_array:
            print utils.color_text(
                '{:4}  {}'.format(analysis_code,
                                  config.ANALYSIS_CODE[analysis_code][0]),
                'yellow')

        # Analysis start point
        if self.startpoint:
            if self.startpoint in config.ANALYSIS_POINTS:
                print 'start point: {}'.format(
                    utils.color_text(self.startpoint))
            else:
                print '[error] invalid startpoint: {}'.format(
                    utils.color_text(self.startpoint))

                print 'maybe you want to choose: {}'.format(
                    utils.color_text(
                        process.extractOne(self.startpoint,
                                           config.ANALYSIS_POINTS.keys())[0],
                        'cyan'))

                print 'available startpoints are as follows:\n  {}'.format(
                    '  '.join(config.ANALYSIS_POINTS.keys()))
                exit(1)

        is_advance = max(self.analy_array) > 6.1
        project = utils.Project(self.analydir, self.samp_info,
                                self.samp_info_done, self.samp_list,
                                self.qc_list, qc_status, mapping_status,
                                is_advance)

        # Extract sample_info
        print 'extract sample informations...'

        fenqi, tissue, disease_name, sample_infos, sample_infos_all, sample_done = project.get_sample_infos(
            self.samp_list, self.samp_info, self.samp_info_done, is_advance)

        database = '{}/project/DisGeNet.json'.format(
            config.CONFIG.get('software', 'soft_dir'))
        disease_ids = utils.get_disease_id(disease_name, database)
        self.args.update({
            'disease_name': disease_name,
            'disease_ids': disease_ids,
        })

        sample_infos_waiting = {
            sampleid: infos
            for sampleid, infos in sample_infos.iteritems()
            if sampleid not in sample_done
        }
        self.args.update({'sample_infos_waiting': sample_infos_waiting})
        # print sample_infos_waiting
        # exit()

        # print 'fenqi:', fenqi
        # print 'tissue:', tissue
        # exit()

        sample_lists = project.get_sample_lists
        # print sample_lists
        # print sample_infos.keys()
        # print sample_infos_all.keys()
        # for sample in sample_infos:
        #     print sample, sample_infos[sample]['familyid']
        # exit()

        if mapping_status == 'waiting':
            sample_lists = project.update_qc_list()

        print '  report number: {}'.format(utils.color_text(fenqi))
        if disease_name:
            print '  disease name: {}'.format(utils.color_text(disease_name))
            print '  disease id: {}'.format(utils.color_text(disease_ids))
        if tissue:
            print '  tissue: {}'.format(utils.color_text(tissue))
        print '  samples ({}): {}'.format(
            len(sample_infos), utils.color_text(sample_infos.keys()))

        if sample_done:
            print '  samples done({}): {}'.format(
                len(sample_done), utils.color_text(sample_done))

        # Update qc_list and extract sample_list
        # print 'update qc_list...'
        # print json.dumps(sample_lists, indent=2)

        # set memory according seqstrag
        print 'set analysis memory...'
        if self.seqstrag == 'WGS':
            print 'upate memory for WGS...'
            for analysis, memory in config.ANALYSIS_MEM_WGS.items():
                if analysis in config.ANALYSIS_POINTS:
                    config.ANALYSIS_POINTS[analysis][0] = memory
        # exit()

        # ===========================================================
        # ===========================================================
        print '>>> pipeline start...'

        mutation_soft, sv_soft, cnv_soft, denovo_soft = [
            softwares[each] for each in ('mutation', 'sv', 'cnv', 'denovo')
        ]

        print '  mutation_soft:{}, sv_soft:{}, cnv_soft:{}, denovo_soft:{}'.format(
            mutation_soft, sv_soft, cnv_soft, denovo_soft)

        # QC
        if ANALY_DICT['quality_control'] and qc_status == 'waiting':
            utils.print_color('> QC', 'white')
            QC(self.args, self.jobs, self.orders, sample_lists, config).start()

        # Mapping
        if ANALY_DICT['mapping']:
            utils.print_color('> Mapping', 'white')
            Mapping(self.args, self.jobs, self.orders, sample_lists,
                    sample_infos, config, qc_status, mapping_status).start()

        # Mutation
        if ANALY_DICT['snpindel_call']:
            utils.print_color('> Mutation', 'white')
            Mutation(self.args, self.jobs, self.orders, sample_lists,
                     sample_infos, config).start()

        # SV
        if ANALY_DICT['sv_call']:
            utils.print_color('> SV', 'white')
            SV(self.args, self.jobs, self.orders, sample_infos, config).start()

        # CNV
        if ANALY_DICT['cnv_call']:
            utils.print_color('> CNV', 'white')
            CNV(self.args, self.jobs, self.orders, sample_infos,
                config).start()

        # FilterDB
        if ANALY_DICT['filter']:
            utils.print_color('> FilterDB', 'white')
            FilterDB(self.args, self.jobs, self.orders, mutation_soft, sv_soft,
                     cnv_soft, sample_infos, config, disease_name, tissue,
                     ANALY_DICT).start()

        # ModelF
        if ANALY_DICT['filter_model']:
            utils.print_color('> Model', 'white')
            FilterModel(self.args, self.jobs, self.orders, mutation_soft,
                        sv_soft, cnv_soft, sample_infos, config).start()

        # Denovo
        if ANALY_DICT['denovo']:
            utils.print_color('> Denovo', 'white')
            Denovo(self.args, self.jobs, self.orders, mutation_soft, sv_soft,
                   cnv_soft, denovo_soft, sample_infos, config,
                   ANALY_DICT).start()

        # Linkage
        if ANALY_DICT['linkage']:
            utils.print_color('> Linkage', 'white')
            Linkage(self.args, self.jobs, self.orders, mutation_soft, sv_soft,
                    cnv_soft, denovo_soft, sample_infos_all, config,
                    ANALY_DICT).start()

        # IntegrateResult
        if any(ANALY_DICT[analysis] for analysis in
               ['filter', 'filter_model', 'denovo', 'phenolyzer']):
            utils.print_color('> IntegrateResult', 'white')
            IntegrateResult(self.args, self.jobs, self.orders, config).start()

        # ROH
        if ANALY_DICT['roh']:
            utils.print_color('> ROH', 'white')
            ROH(self.args, self.jobs, self.orders, sample_infos, mutation_soft,
                config).start()

        # OTHER
        other = Other(self.args, self.jobs, self.orders, config, disease_name)

        # IBD
        if any(ANALY_DICT[each]
               for each in ['filter_model', 'linkage', 'denovo'
                            ]) and len(sample_infos_waiting) > 1:
            utils.print_color('> IBD', 'white')
            other.ibd()

        # Network
        if ANALY_DICT['phenolyzer']:
            utils.print_color('> Phenolyzer', 'white')
            other.phenolyzer()

        # Pathway
        if ANALY_DICT['pathway']:
            utils.print_color('> Pathway', 'white')
            other.pathway()

        # PPI
        if ANALY_DICT['ppi']:
            utils.print_color('> PPI', 'white')
            other.ppi()

        # SiteAS
        if ANALY_DICT['site_association']:
            utils.print_color('> SiteAS', 'white')
            Association(self.args, self.jobs, self.orders,
                        config).site_association()

        # GeneAS
        if ANALY_DICT['gene_association']:
            utils.print_color('> GeneAS', 'white')
            Association(self.args, self.jobs, self.orders,
                        config).gene_association()

        # HLA
        if ANALY_DICT['hla']:
            utils.print_color('> HLA', 'white')
            HLA(self.args, self.jobs, self.orders, sample_lists, sample_infos,
                config, qc_status).start()

        # result and report
        utils.print_color('> Result', 'white')
        Result(self.args, self.jobs, self.orders, config).start()

        utils.print_color('> Report', 'white')
        Report(self.args, self.jobs, self.orders, config).start()

        # job summary
        print 'lenght of jobs waiting/total: {}/{}'.format(
            len([job for job in self.jobs if job.get('status') == 'waiting']),
            len(self.jobs))

        utils.write_job(self.analydir, self.newjob, self.jobs, self.orders)

        print '{:-^80}'.format(' all done ')