Ejemplo n.º 1
0
def selected(auth_info, **kwargs):
    if auth_info['code'] == 1:
        return json.dumps(auth_info)
    username = auth_info['username']
    try:
        data = request.get_json()['params']
        where = data.get('where', None)
        m_table = data.get('m_table', None)
        field = data.get('field', None)
        s_table = data.get('s_table', None)
        res = app.config['db'].get_one_result(m_table, [field], where)
        res = res[field].split(',')  #eg: ['1', '2']
        result = app.config['db'].get_results(s_table,
                                              ['id', 'name', 'name_cn'])
        for x in result:  #eg: [{'id':1, 'name':'sa'},{'id': 2, 'name': 'php'}]
            for r_id in res:
                if int(r_id) == int(x['id']):
                    x['selected'] = 'selected="selected"'
        utils.write_log('api').info('%s selected %s successfully' %
                                    (username, s_table))
        return json.dumps({'code': 0, 'result': result})
    except:
        utils.write_log('api').error('selected error %s' %
                                     traceback.format_exc())
        return json.dumps({'code': 1, 'errmsg': 'selected.get faild'})
Ejemplo n.º 2
0
def apply_create(auth_info, **kwargs):
    username = auth_info['username']
    r_id = auth_info['r_id']
    field = [
        'project_id', 'info', 'applicant', 'commit', 'apply_date', 'status',
        'detail'
    ]
    try:
        data = request.get_json()[
            'params']  # project_id,project_name,applicant,info,detail
        data['commit'] = '11111'  # 脚本获取最新的commit
        data['apply_date'] = time.strftime('%Y-%m-%d %H:%M')
        data['status'] = 1  # 项目申请后状态变为1,
        data['applicant'] = username
        where = {"project_id": int(data['project_id'])}
        data.pop('project_username')  # 因为申请表单里面有项目名,数据库里存的上id,所以要除掉
        res = app.config['db'].get_one_result('project_apply', field, where)
        if res and res['status'] in (1, 2):
            return json.dumps({'code': 1, 'errmsg': '目前项目状态不可申请'})
        if not res:
            app.config['db'].execute_insert_sql('project_apply', data)
        else:
            app.config['db'].execute_update_sql('project_apply', data, where)
        app.config['db'].execute_insert_sql('project_deploy', data)
        utils.write_log('api').info('%s:项目申请成功' % username)
        return json.dumps({'code': 0, 'result': '项目申请成功'})
    except:
        utils.write_log('api').error('project apply error: %s' %
                                     traceback.format_exc())
        return json.dumps({'code': 1, 'errmsg': '项目申请失败'})
Ejemplo n.º 3
0
def userselfinfo(**kwargs):
    fields = ['id', 'username', 'name', 'email', 'mobile', 'is_lock', 'r_id']
    try:
        user = app.config['cursor'].get_one_result(
            'user', fields, where={'username': username})
        if user.get('r_id', None):
            r_id = user['r_id'].split(',')
            rids = app.config['cursor'].get_results('role',
                                                    ['id', 'name', 'p_id'],
                                                    where={'id': r_id})
        else:
            rids = {}
        pids = []
        for x in rids:
            pids += x['p_id'].split(',')
        pids = list(set(pids))  #去重。通过用户名查到其角色id,在通过角色id取到用户的权限id
        user['r_id'] = [x['name'] for x in rids]  #将角色id转为角色名

        if pids:  #将用户的权限id转为权限名
            mypids = app.config['cursor'].get_results(
                'power', ['id', 'name', 'name_cn', 'url'], where={'id': pids})
            user['p_id'] = dict([
                (str(x['name']), dict([(k, x[k]) for k in ('name_cn', 'url')]))
                for x in mypids
            ])  #返回格式:{'git':{'name_cn':'git','url':'http://git.com'},......}
        else:
            user['p_id'] = {}

        utils.write_log('api').info(username, 'get_user_info')
        return json.dumps({'code': 0, 'user': user})
    except:
        utils.write_log('api').error("Get users list error: %s" %
                                     traceback.format_exc())
        return json.dumps({'code': 1, 'errmsg': 'get userinfo failed'})
Ejemplo n.º 4
0
def test(model, test_loader, criterion, to_log=None):
    model.eval()
    test_loss = 0
    correct = 0

    output_array = 0
    target_array = 0

    with torch.no_grad():
        for data, target in test_loader:
            # target = target.long()
            data, target = data.to(device), target.to(device)
            output = model(data)
            o_array = output.cpu().numpy()
            t_array = target.cpu().numpy()
            test_loss += criterion(output, target)  # sum up batch loss
            mse = rmse(output, target)
            # pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            # correct += pred.eq(target.view_as(pred)).sum().item()
    test_loss /= len(test_loader.sampler)
    test_loss *= test_loader.batch_size
    # acc = 100. * correct / len(test_loader.sampler)
    acc = 0
    format_str = 'Test set: Average loss: {:.4f}, MSE: ({:3.5f})\n'.format(test_loss, mse)
    print(format_str)
    if to_log is not None:
        write_log(format_str, to_log)
    return test_loss.item(), acc
Ejemplo n.º 5
0
def update_log(args):
    anime_name, episodes = read_args(args)
    episodes = utils.compress_range(episodes)
    utils.write_log(anime_name, episodes)
    utils.update_tracklist(anime_name, episodes)
    utils.write_cache(anime_name)
    utils.update_watchlater(anime_name, episodes)
Ejemplo n.º 6
0
	def __init__(self, 
		filter_id,
		group_id,
		req,
		out_dir=".",
		order = None,
		min_identity = 30,
		min_blast_evalue = 0.01,
		save_to_ITOL = False,
		sep = ",",
		report_unfit = False):

		self.req = req
		self.order = order
		self.filter_id = filter_id
		self.group_id = group_id
		self.out_dir = os.path.abspath(out_dir)
		self.min_identity = min_identity
		self.min_blast_evalue = min_blast_evalue
		self.save_to_ITOL = save_to_ITOL
		self.sep = sep
		self.report_unfit = report_unfit

		if self.order == None: ## user didn't override the order argument
			d = os.path.abspath(os.path.dirname(__file__))
			data_env = os.path.join(d, 'data/')
			self.order = utils.get_order(self.req,data_env)

		## get blast versions
		self.blastp = self._get_blast_version("blastp")
		self.makeblastdb = self._get_blast_version("makeblastdb")

		utils.assure_path_exists(os.path.join(self.out_dir,group_id+ "_GROUP"))
		utils.write_log(os.path.join(self.out_dir,group_id + "_GROUP", "LOG"), "STEP 4 : GROUP ORFs", vars(self), "")
Ejemplo n.º 7
0
def test(model, test_loader, criterion, to_log=None):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            # target = target.long()
            data, target = data.to(device), target.to(device)

            output, mu, logvar = model(data)
            loss, bce, kld = loss_fn(output, target, mu, logvar)
            test_loss += loss
            # test_loss += criterion(output, target)  # sum up batch loss
            # mse = rmse(output, target)
            # pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            # correct += pred.eq(target.view_as(pred)).sum().item()

            t = torch.tensor([0.5]).to(device)  # threshold
            out = (output > t).float() * 1
            pre = precision(target, out)
            rec = recall(target, out)
            # if pre > 0.7 and rec> 0.7 and data.size()[1] > 20:
            #     tar_npy = target.cpu().numpy()
            #     out_npy = out.cpu().numpy()
            # plt.imshow(tar_npy)
            # plt.imshow(out_npy)
    test_loss /= len(test_loader.sampler)
    test_loss *= test_loader.batch_size
    # acc = 100. * correct / len(test_loader.sampler)
    format_str = 'Test set: Average loss: {:.4f}, precision: {:.2%}, recall: {:.2%}\n'.format(
        test_loss, pre, rec)
    print(format_str)
    if to_log is not None:
        write_log(format_str, to_log)
    return test_loss.item(), pre, rec
Ejemplo n.º 8
0
 def _delete_sql(self, table_name, where):
     if not (where and isinstance(where, dict)):
         return ""
     where_cond = ["%s='%s'" % (k, v) for k,v in where.items()]
     sql = "DELETE FROM %s WHERE %s" % (table_name, ' AND '.join(where_cond))
     utils.write_log('api').info("Delete sql: %s" % sql)
     return sql
Ejemplo n.º 9
0
def validation(val_dataLoader, criterion, args):
    print("*******  begin validation  *******")
    model.eval()
    all_loss = 0.0

    EVAL = Eval(args.n_classes)
    EVAL.reset()
    with torch.no_grad():
        with tqdm(total=len(val_dataLoader), unit='batch') as pbar:
            for batch_id, (data, target_mask) in enumerate(val_dataLoader):
                data, target_mask = data.to(device), target_mask.to(device)
                out = model(data)
                loss = criterion(out, target_mask)
                current_loss = loss.data.item()
                all_loss += current_loss

                out = out.data.cpu().numpy()
                target_mask = target_mask.data.cpu().numpy()
                EVAL.add_batch(target_mask, out.argmax(axis=1))

                pbar.update(1)
    print('[ validation ] [average loss:{}]'.format(all_loss/len(val_dataLoader)))
    PA = EVAL.Pixel_Accuracy()
    MPA = EVAL.Mean_Pixel_Accuracy()
    MIoU = EVAL.Mean_Intersection_over_Union()
    FWIoU = EVAL.Frequency_Weighted_Intersection_over_Union()
    print('[ validation ] [PA1: {:.8f}], [MPA1: {:.8f}], [MIoU1: {:.8f}], [FWIoU1: {:.8f}]'.format(PA, MPA,MIoU, FWIoU))

    log_message ='**validation**   [average loss: {:.8f} ],[PA1: {:.8f}], [MPA1: {:.8f}], [MIoU1: {:.8f}], [FWIoU1: {:.8f}]'\
        .format(all_loss/len(val_dataLoader),PA, MPA,MIoU, FWIoU)
    write_log(args.log_file_name,log_message)
    return FWIoU
Ejemplo n.º 10
0
def train(epoch, train_dataLoader, optimizer, criterion, args, best_pred):
    print("*******  begin train  *******")
    model.train()
    all_loss = 0.0
    with tqdm(total= len(train_dataLoader), unit='batch') as pbar:
        for batch_id, (data, target_img) in enumerate(train_dataLoader):
            #global_step+=1
            # lr = lr_function(global_step, WARMUP_STEP, TOTAL_STEP)
            # adjust_lr(optimizer,lr)
            scheduler(optimizer, batch_id, epoch, best_pred)

            data,target_img = data.to(device),  target_img.to(device)
            optimizer.zero_grad()
            out = model(data)

            loss = criterion(out,target_img)

            current_loss = loss.item()
            all_loss += current_loss
            avg_loss  = all_loss/(batch_id+1)

            loss.backward()
            optimizer.step()

            pbar.update(1)
            postfix_message={"cur_loss":"%.6f"%(current_loss) , "avg_loss ":"%.6f"%(avg_loss) , "lr":"%.8f"%(optimizer.param_groups[0]['lr'])}
            pbar.set_postfix(log = postfix_message)

    write_log(args.log_file_name,"[ epoch  {} ]".format(epoch))
    log_message ="**train** [ avg_loss  {:.8f} ] , [ lr  {:.8f} ]".format(avg_loss,optimizer.param_groups[0]['lr'])
    write_log(args.log_file_name,log_message)
    return avg_loss
Ejemplo n.º 11
0
def test(model, test_loader, criterion, to_log=None):
    model.eval()

    test_loss = 0
    correct = 0
    with torch.no_grad():
        for (data, target) in test_loader:
            target = target.long()
            data = torch.permute(data, (0, 2, 1, 3, 4))
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)
            test_loss += loss
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()
        test_loss /= len(test_loader.sampler)
        test_loss *= test_loader.batch_size
        acc = 100. * correct / len(test_loader.sampler)
        format_str = 'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
            test_loss, correct, len(test_loader.sampler), acc)
        print(format_str)
        if to_log is not None:
            write_log(format_str, to_log)
        return test_loss.item(), acc
Ejemplo n.º 12
0
def gitolite():
    git_confile = app.config['git_confile']
    api_dir = os.path.dirname(os.path.realpath(__file__))
    script_dir = os.path.join(api_dir.rstrip('api'), 'script')
    projects, pro_pri = utils.project_members()
    group = utils.role_members()
    try:
        # 将项目和用户信息写入配置文件中
        with open(git_confile, 'w') as f:
            str0 = ""
            for k, v in group.items():
                str0 += "@%s = %s\n" % (k, " ".join(v))
            f.write(str0)
            f.write("\n")

            str1 = ""
            for k, v in projects.items():
                # 由于projests中存放了项目所有的成员,包括负责人。需要把负责人剔除
                v = list(set(v) - set(pro_pri[k]))
                str1 += "repo %s \n" % k
                str1 += " RW+ = %s \n" % ' '.join(pro_pri[k])  # 负责人的权限最大
                if v:  # 如果项目除了负责人外,有其他成员,则设置成员的权限
                    str1 += " -  master = %s \n" % (' '.join(v)
                                                    )  # 项目成员无权操作master分支
                    str1 += " RW = %s \n" % (' '.join(v))  # 项目成员可以操作其他分支
            f.write(str1)
        # git add/commit/push生效.路径暂时写死,定版前修改
        # stdout=utils.run_script_with_timeout("sh %s/git.sh" % script_dir)
        # print stdout
        return {'code': 0, 'result': "git操作成功"}
    except:
        utils.write_log('api').error("get config error: %s" %
                                     traceback.format_exc())
        return {'code': 1, 'errmsg': "get config error"}
Ejemplo n.º 13
0
 def execute_insert_sql(self, table_name, data):
     sql = self._insert_sql(table_name, data)
     if sql:
         return self._execute(sql)
     else:
         utils.write_log('api').error('execute_insert_sql missing sql')
         return None
Ejemplo n.º 14
0
    def __init__(self, flags):

        torch.set_default_tensor_type('torch.cuda.FloatTensor')

        # fix the random seed or not
        fix_seed()

        self.setup_path(flags)

        self.network = mlp.MLPNet(num_classes=flags.num_classes)

        self.network = self.network.cuda()

        print(self.network)
        print('flags:', flags)

        if not os.path.exists(flags.logs):
            os.mkdir(flags.logs)

        flags_log = os.path.join(flags.logs, 'flags_log.txt')
        write_log(flags, flags_log)

        self.load_state_dict(flags.state_dict)

        self.configure(flags)
Ejemplo n.º 15
0
def test(model, test_loader, criterion, to_log=None):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for (azi, ele, target) in test_loader:
            # prepare input and target to device
            azi = azi.to(device, dtype=torch.float)
            ele = ele.to(device, dtype=torch.float)
            target = target.to(device, dtype=torch.long)

            output = model(azi, ele)
            loss = criterion(output, target)
            test_loss += loss
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

        test_loss /= len(test_loader.sampler)
        test_loss *= test_loader.batch_size
        acc = 100. * correct / len(test_loader.sampler)
        format_str = 'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
            test_loss, correct, len(test_loader.sampler), acc)
        print(format_str)
        if to_log is not None:
            write_log(format_str, to_log)
        return test_loss.item(), acc
Ejemplo n.º 16
0
def create_job_list(results_dir, prep_dir, scan_dir, args):
    # keeping a text file of all the genomes used
    log_other = "###   INPUT  ### \ngenome\thmmer_result\n"
    jobs = []
    for f in os.listdir(scan_dir):
        if f.endswith(".result"):
            basename = os.path.basename(f)
            basename = basename.replace(".result", "")

            scan_summary = copy.copy(args)

            scan_summary["basename"] = basename
            scan_summary["out_file"] = os.path.join(results_dir,
                                                    basename + ".csv")
            scan_summary["results_file"] = os.path.join(
                scan_dir, basename + ".result")
            scan_summary["orf_locs"] = os.path.join(prep_dir,
                                                    basename + ".bed")
            scan_summary["orfs"] = {}
            scan_summary["unfit_orfs"] = {}

            if scan_summary["report_unfit"]:
                scan_summary["report_unfit"] = os.path.join(
                    scan_summary["report_unfit"], basename + ".csv")

            jobs.append(scan_summary)
            log_other = log_other + basename + "\t" + f + "\n"
    utils.write_log(os.path.join(results_dir, "LOG"),
                    "STEP 3 : FILTER HMMER HITS", args, log_other)

    return jobs
Ejemplo n.º 17
0
def role_select(auth_info,**kwargs):
    username = auth_info['username']
    if '1' not in auth_info['r_id']:
        return json.dumps({'code': 1,'errmsg':'you not admin,no power' })
    try:
        output = ['id','name','name_cn','p_id','info']
        data = request.get_json()['params']
        fields = data.get('output', output)
          
        #查询权限表,生产id2name的字典
        result = app.config['db'].get_results('power', ['id', 'name'])
        power_name = dict([(str(x['id']), x['name']) for x in result])

        #将角色对应的p_id都转为name,最终返回的结果p_id的值都是name
        result = []
        res = app.config['db'].get_results('role', fields)
        for x in res:
            p_name = [power_name[p_id] for p_id in x['p_id'].split(',') if p_id in power_name]
            x['p_id'] = ','.join(p_name)  #将原有x['p_id']中的id转为name
            result.append(x)

        utils.write_log('api').info('%s select role list success' % username)
        return json.dumps({'code':0,'result':result,'count':len(result)})
    except:
        utils.write_log('api').error("select role list error: %s"  %  traceback.format_exc())
        return json.dumps({'code':1,'errmsg':'get rolelist failed'})
Ejemplo n.º 18
0
def create_jobs_list(args, prep_dir, scan_dir):
    ''' create a list of scanning jobs for HMMsearch
    while generating a LOG file in the SCAN directory'''
    # get version of hmmscan for log file
    log_other = get_hmmer_version(args.hmmsearch)
    log_other = log_other + get_hmmer_version(args.hmmpress)
    # keeping a text file of all the genomes used
    log_other = log_other + "###   INPUT   ### \ncnt\tgenome\tprep_fasta_file\n"

    jobs = []
    for f in os.listdir(prep_dir):
        if f.endswith(".fasta"):
            basename = os.path.basename(f)
            basename = basename.replace(".fasta", "")
            prep_file = os.path.join(prep_dir, f)

            scan_genome = {
                "basename": basename,
                "fasta_file": prep_file,
                "out_dir": scan_dir,
                "hmm_db": args.hmm_db,
                "hmmsearch": args.hmmsearch
            }
            jobs.append(scan_genome)

            log_other = log_other + \
                    str(len(jobs)) + "\t" + basename + "\t" + \
                    prep_file + "\n"

    utils.write_log(os.path.join(scan_dir, "LOG"), "STEP 2 : GENOME SCANNING",
                    vars(args), log_other)
    return jobs
Ejemplo n.º 19
0
 def execute_update_sql(self, table_name, data, where, fields=None):
     sql = self._update_sql(table_name, data, where, fields)
     if sql:
         return self._execute(sql)
     else:
         utils.write_log('api').error('execute_update_sql missing sql')
         return None
Ejemplo n.º 20
0
def server_getlist(auth_info, **kwargs):
    username = auth_info['username']
    if '1' not in auth_info['r_id']:
        return json.dumps({'code': 1, 'errmsg': 'you have no power'})
    try:
        output = [
            'server.id', 'server.host_name', 'server.name_cn', 'server.ip',
            'server.account', 'server.admin_username', 'server.admin_password',
            'server.sg_id', 'server.comment', 'server.outerip',
            'server_group.name_cn'
        ]
        data = request.get_json()['params']
        fields = data.get('output', output)
        where = data.get('where', {})
        where['server.sg_id'] = 'server_group.id'
        join = data.get('join', True)
        results = app.config['db'].get_results('server,server_group',
                                               fields,
                                               where,
                                               join=join)
        print results
        if not results:
            return json.dumps({'code': 1, 'errmsg': 'data not exist'})
        utils.write_log('api').info('%s select server list success' % username)
        return json.dumps({
            'code': 0,
            'result': results,
            'count': len(results)
        })
    except:
        utils.write_log('error').error('select server list faild %s' %
                                       traceback.format_exc())
        return json.dumps({'code': 1, 'errmsg': 'select role list fail'})
Ejemplo n.º 21
0
def train(model, data_loader, criterion, optimizer, epoch=0, to_log=None, print_freq=25):
    # create Average Meters
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()
    train_loss = []

    # switch to train mode
    model.train()
    # record start time
    start = time.time()

    for i, (azi, ele, target) in enumerate(data_loader):
        # prepare input and target to device
        azi = azi.to(device, dtype=torch.float)
        ele = ele.to(device, dtype=torch.float)
        target = target.to(device, dtype=torch.long)

        # measure data loading time
        data_time.update(time.time() - start)

        # zero the parameter gradients
        optimizer.zero_grad()

        # gradient and do SGD step
        output = model(azi, ele)
        loss = criterion(output, target)

        train_loss.append(loss.item())
        loss.backward()
        optimizer.step()

        # measure accuracy and record loss
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), data_loader.batch_size)
        top1.update(prec1.item(), data_loader.batch_size)
        top5.update(prec5.item(), data_loader.batch_size)

        # measure elapsed time
        batch_time.update(time.time() - start)
        start = time.time()

        # print training info
        if i % print_freq == 0:
            str = ('Epoch: [{0}][{1}/{2}]\t'
                   'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                   'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                   'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                   'Prec@1 {top1.val:3.3f} ({top1.avg:3.3f})\t'
                   'Prec@5 {top5.val:3.3f} ({top5.avg:3.3f})'.format(
                epoch, i, len(data_loader), batch_time=batch_time,
                data_time=data_time, loss=losses, top1=top1, top5=top5))
            print(str)

            if to_log is not None:
                write_log(str + '\n', to_log)

    return train_loss
Ejemplo n.º 22
0
def role_getone(auth_info, **kwargs):
    username = auth_info['username']
    try:
        output = ['id', 'name', 'name_cn', 'p_id', 'info']
        data = request.get_json()['params']
        fields = data.get('output', output)
        where = data.get('where', None)
        result = app.config['db'].get_one_result('role', fields, where)
        if not result:
            return json.dumps({'code': 1, 'errmsg': 'data not exist'})

        #查询power表生成id,name字典
        power_results = app.config['db'].get_results('power', ['id', 'name'])
        power_dict = dict((str(x['id']), x['name']) for x in power_results)

        p_name = [
            power_dict[p_id] for p_id in result['p_id'].split(',')
            if p_id in power_dict
        ]
        result['p_id'] = ','.join(p_name)
        utils.write_log('api').info('%s select role success' % username)
        return json.dumps({'code': 0, 'result': result})
    except:
        utils.write_log('api').error('select role by name or id faild %s' %
                                     traceback.format_exc())
        return json.dumps({
            'code': 1,
            'errmsg': 'select role by name or id faild'
        })
Ejemplo n.º 23
0
def login():
    try:
        username = request.args.get('username', None)
        passwd = request.args.get('passwd', None)
        passwd = hashlib.md5(passwd).hexdigest()
        if not (username and passwd):
            return json.dumps({'code': 1, 'errmsg': "need username or passwd"})
        result = app.config['db'].get_one_result(
            'user', ['id', 'username', 'password', 'r_id', 'is_lock'],
            {'username': username})
        if not result:
            return json.dumps({'code': 1, 'errmsg': "user is not exist"})
        if result['is_lock'] == 1:
            return json.dumps({'code': 1, 'errmsg': "user is lock"})
        if passwd == result['password']:
            data = {'last_login': time.strftime('%Y-%m-%d %H:%M:%S')}
            app.config['db'].execute_update_sql('user', data,
                                                {'username': username})
            token = utils.get_validate(result['username'], result['id'],
                                       result['r_id'],
                                       app.config['passport_key'])
            utils.write_log('api').info("%s login sucess" % username)
            return json.dumps({'code': 0, 'authorization': token})
        else:
            return json.dumps({'code': 1, 'errmsg': "passwd is wrong"})
    except:
        utils.write_log('api').error("login error: %s" %
                                     traceback.format_exc())
        return json.dumps({'code': 1, 'errmsg': "login fail"})
Ejemplo n.º 24
0
def login():
    try:
        username = request.args.get('username', None)
        passwd = request.args.get('passwd', None)
        passwd = hashlib.md5(passwd).hexdigest()
        if not (username and passwd):
            return josin.dumps({'code': 1, 'errmsg': '需要输入用户名和密码'})
        result = app.config['db'].get_one_result(
            'user', ['id', 'username', 'password', 'r_id', 'is_lock'],
            {'username': username})
        if not result:
            return json.dumps({"code": 1, "errmsg": "用户不存在"})
        if result['is_lock'] == 1:
            return json.dumps({"code": 1, "errmsg": "用户已锁定"})
        if passwd == result['password']:
            data = {'last_login': time.strftime('%Y-%m-%d %H:%M:%S')}
            app.config['db'].execute_update_sql('user', data,
                                                {'username': username})
            token = utils.get_validate(result['username'], result['id'],
                                       result['r_id'],
                                       app.config['passport_key'])
            utils.write_log('api').info('%s login success' % username)
            return json.dumps({'code': 0, 'authorization': token})
        else:
            return json.dumps({'code': 1, "errmsg": "输入密码有误"})
    except:
        utils.write_log('api').error('login error: %s', traceback.format_exc())
        return json.dumps({"code": 1, "errmsg": "login fail"})
Ejemplo n.º 25
0
def role_getlist(auth_info, **kwargs):
    username = auth_info['username']
    if '1' not in auth_info['r_id']:
        return json.dumps({'code': 1, 'errmsg': 'you have no power'})
    try:
        output = ['id', 'name', 'name_cn', 'p_id', 'info']
        data = request.get_json()['params']
        fields = data.get('output', output)
        where = data.get('where', None)
        results = app.config['db'].get_results('role', fields, where)
        if not results:
            return json.dumps({'code': 1, 'errmsg': 'data not exist'})
        power_results = app.config['db'].get_results('power', ['id', 'name'])
        power_dict = dict((str(x['id']), x['name']) for x in power_results)

        for result in results:
            p_name = [
                power_dict[p_id] for p_id in result['p_id'].split(',')
                if p_id in power_dict
            ]
            result['p_id'] = ','.join(p_name)
        utils.write_log('api').info('%s select role list success' % username)
        return json.dumps({
            'code': 0,
            'result': results,
            'count': len(results)
        })
    except:
        utils.write_log('error').errot('select role list faild %s' %
                                       traceback.format_exc())
        return json.dumps({'code': 1, 'errmsg': 'select role list faild'})
Ejemplo n.º 26
0
	def run(self):
		## get domains' lengths and domains to ignore
		self._parse_domains()
		self._parse_domains_to_ignore()

		
		log_other = "###   INPUT  ### \ngenome\tfasta_hmmer_result\tgff_hmmer_result\n" # keeping a text file of all the genomes used

		jobs = []
		for file in os.listdir(os.path.join(self.args["out_dir"],self.args["scan_id"] + "_SCAN")):
			if file.endswith(".sixframe.result"):
				basename = os.path.basename(file)
				basename = basename.replace(".sixframe.result","")
				
				scan_summary = dict(self.args)
				scan_summary["basename"] = basename
				scan_summary["out_file"] = os.path.join(self.args["results_dir"], basename + ".csv")
				scan_summary["orf_locs"] = None
				scan_summary["orfs"] = {}
				scan_summary["unfit_orfs"] = {}
				if scan_summary["report_unfit"]:
					scan_summary["report_unfit"] = os.path.join(self.args["results_dir"],"UNFIT",basename + ".csv")

				jobs.append(scan_summary)
		
		for j in jobs:
			sixframe_file, annotation_file = run_summarise(j)
			log_other = log_other + j["basename"] + "\t" + sixframe_file + "\t" + annotation_file + "\n"

		utils.write_log(os.path.join(self.args["results_dir"], "LOG"), "STEP 3 : FILTER HMMER HITS", self.args, log_other)
Ejemplo n.º 27
0
 def _select_sql(self,
                 table_name,
                 fields,
                 where=None,
                 order=None,
                 asc_order=True,
                 limit=None,
                 join=False):
     if isinstance(where, dict) and where:
         conditions = []
         for k, v in where.items():
             if join and '.' in k and '.' in v:
                 conditions.append("%s=%s" % (k, v))
             elif isinstance(v, list):
                 conditions.append('%s IN (%s)' % (k, ','.join(v)))
             elif isinstance(v, str) or isinstance(v, unicode):
                 conditions.append("%s='%s'" % (k, v))
             elif isinstance(v, int):
                 conditions.append('%s=%s' % (k, v))
         sql = 'SELECT %s FROM %s WHERE %s' % (','.join(fields), table_name,
                                               ' AND '.join(conditions))
     elif not where:
         sql = 'SELECT %s FROM %s' % (','.join(fields), table_name)
     else:
         sql = ''
     if order and (isinstance(order, str) or isinstance(order, unicode)):
         sql = '%s ORDER BY %s %s' % (sql, order,
                                      'ASC' if asc_order else 'DESC')
     if limit and isinstance(limit, tuple) and len(limit) == 2:
         sql = '%s LIMIT %s,%s' % (sql, limit[0], limit[1])
     utils.write_log('api').info('Select sql: %s' % sql)
     return sql
Ejemplo n.º 28
0
def server_getone(auth_info, **kwargs):
    username = auth_info['username']
    if '1' not in auth_info['r_id']:
        return json.dumps({'code': 1, 'errmsg': 'you have no power'})
    try:
        output = [
            'id', 'host_name', 'name_cn', 'ip', 'account', 'admin_username',
            'admin_password', 'sg_id', 'comment', 'outerip'
        ]
        data = request.get_json()['params']
        fields = data.get('output', output)
        where = data.get('where', None)
        result = app.config['db'].get_one_result('server', fields, where)
        if not result:
            return json.dumps({'code': 1, 'errmsg': 'data not exist'})
        sg_results = app.config['db'].get_results('server_group',
                                                  ['id', 'name_cn'])
        sg_dict = dict((str(x['id']), x['name_cn']) for x in sg_results)
        print sg_dict
        print result
        sg_name = sg_dict.get(result['sg_id'])
        result['sg_id'] = sg_name
        utils.write_log('api').info('%s select server success' % username)
        return json.dumps({'code': 0, 'result': result})
    except:
        utils.write_log('api').error('select server by name or id faild %s' %
                                     traceback.format_exc())
        return json.dumps({
            'code': 1,
            'errmsg': 'select server by name or id faild'
        })
Ejemplo n.º 29
0
 def get_results(self,
                 table_name,
                 fields,
                 where=None,
                 order=None,
                 asc_order=True,
                 limit=None,
                 join=False):
     sql = self._select_sql(table_name, fields, where, order, asc_order,
                            limit, join)
     if sql:
         self._execute(sql)
         results_all_set = self._fetchall()
         if results_all_set:
             results_list = []
             for result in results_all_set:
                 results_list.append(
                     dict([(item,
                            '' if result[index] is None else result[index])
                           for index, item in enumerate(fields)]))
             return results_list
         else:
             return {}
     utils.write_log('api').error('get_results missing sql')
     return None
Ejemplo n.º 30
0
    def on_epoch_end(self, epoch, logs=None):
        y_pred = self.model.predict(self.x_valid).flatten()
        y_true = self.y_valid.flatten()
        auc = roc_auc_score(y_true=y_true, y_score=y_pred)  # roc曲线的auc
        precision, recall, _thresholds = precision_recall_curve(
            y_true=y_true, probas_pred=y_pred)
        aupr = m.auc(recall, precision)
        y_pred = [1 if prob >= self.threshold else 0 for prob in y_pred]
        acc = accuracy_score(y_true=y_true, y_pred=y_pred)
        f1 = f1_score(y_true=y_true, y_pred=y_pred)

        print(type(aupr))
        logs['val_aupr'] = float(aupr)
        logs['val_auc'] = float(auc)
        logs['val_acc'] = float(acc)
        logs['val_f1'] = float(f1)

        logs['dataset'] = self.dataset
        logs['aggregator_type'] = self.aggregator_type
        logs['kfold'] = self.k
        logs['epoch_count'] = epoch + 1
        print(
            f'Logging Info - epoch: {epoch+1}, val_auc: {auc}, val_aupr: {aupr}, val_acc: {acc}, val_f1: {f1}'
        )
        write_log('log/train_history.txt', logs, mode='a')
	def train(self):
		self.train_setup()

		self.sess.run(tf.global_variables_initializer())

		# Load the pre-trained model if provided
		if self.conf.pretrain_file is not None:
			self.load(self.loader, self.conf.pretrain_file)

		# Start queue threads.
		threads = tf.train.start_queue_runners(coord=self.coord, sess=self.sess)

		# Train!
		for step in range(self.conf.num_steps+1):
			start_time = time.time()
			feed_dict = { self.curr_step : step }
			loss_value = 0

			# Clear the accumulated gradients.
			self.sess.run(self.zero_op, feed_dict=feed_dict)

			# Accumulate gradients.
			for i in range(self.conf.grad_update_every):
				_, l_val = self.sess.run([self.accum_grads_op, self.reduced_loss], feed_dict=feed_dict)
				loss_value += l_val

			# Normalise the loss.
			loss_value /= self.conf.grad_update_every

			# Apply gradients.
			if step % self.conf.save_interval == 0:
				images, labels, summary, _ = self.sess.run(
					[self.image_batch,
					self.label_batch,
					self.total_summary,
					self.train_op],
					feed_dict=feed_dict)
				self.summary_writer.add_summary(summary, step)
				self.save(self.saver, step)
			else:
				self.sess.run(self.train_op, feed_dict=feed_dict)

			duration = time.time() - start_time
			print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
			write_log('{:d}, {:.3f}'.format(step, loss_value), self.conf.logfile)

		# finish
		self.coord.request_stop()
		self.coord.join(threads)
	def train(self):
		self.train_setup()

		self.sess.run(tf.global_variables_initializer())

		# Load the pre-trained model if provided
		if self.conf.pretrain_file is not None:
			self.load(self.loader, self.conf.pretrain_file)

		# Start queue threads.
		threads = tf.train.start_queue_runners(coord=self.coord, sess=self.sess)

		# Train!
		for step in range(self.conf.num_steps+1):
			start_time = time.time()
			feed_dict = { self.curr_step : step }

			if step % self.conf.save_interval == 0:
				loss_value, images, labels, preds, summary, _ = self.sess.run(
					[self.reduced_loss,
					self.image_batch,
					self.label_batch,
					self.pred,
					self.total_summary,
					self.train_op],
					feed_dict=feed_dict)
				self.summary_writer.add_summary(summary, step)
				self.save(self.saver, step)
			else:
				loss_value, _ = self.sess.run([self.reduced_loss, self.train_op],
					feed_dict=feed_dict)

			duration = time.time() - start_time
			print('step {:d} \t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
			write_log('{:d}, {:.3f}'.format(step, loss_value), self.conf.logfile)

		# finish
		self.coord.request_stop()
		self.coord.join(threads)
Ejemplo n.º 33
0
    def __parse_prompt(self, prompt, our_team):
        """
        Determine piece to be moved and move required from prompt.
        """
        # setup return defaults
        piece, up, right, hold_move, user_feedback = None, None, None, True, None

        # first check for special commands
        if prompt.lower()[:5] == 'debug':
            level_text = prompt[5:].strip()
            user_feedback = set_debugging_level(level_text, feedback_required=True)
            return piece, up, right, hold_move, user_feedback
        elif prompt.lower() == 'redraw':
            print(self.board.draw_board())
            return piece, up, right, hold_move, user_feedback
        elif prompt.lower()[:4] == 'list':
            piece_ref_text = prompt[4:].strip()
            piece = self.get_piece(piece_ref_text)
            if piece:
                self.get_all_possible_moves(list_moves=True, pieces={piece.ref: piece})
            else:
                self.get_all_possible_moves(list_moves=True)
            return piece, up, right, hold_move, user_feedback
        elif prompt.lower() == 'log':
            if self.logging:
                write_log(LOG)
                user_feedback = "Log written to current working directory"
            else:
                user_feedback = "Logging not currently enabled, change in literals.py"
            return piece, up, right, hold_move, user_feedback

        # attempt to get details of piece to be moved (first two chars as current cell_ref)
        try:
            current_cell_ref = prompt[:2]
            [cur_row, cur_col_no] = cell_ref_to_pos(current_cell_ref)
        except (IndexError, ValueError):
            user_feedback = ('A valid cell for your current position could not be found\n' +
                '(using the first two characters from your entry: "{0}")').format(prompt)
            return piece, up, right, hold_move, user_feedback

        piece_ref = self.board.get_piece_ref(cur_row, cur_col_no)

        if piece_ref:
            piece = self.get_piece(piece_ref)

        try:
            # todo consider more efficient ways to achieve the same
            assert ([cur_row, cur_col_no] in [our_team[obj].pos for obj in our_team])
        except AssertionError:
            user_feedback = ('A piece in your team could not be found in cell: {0}\n' +
                '(using the first two characters from your entry)').format(current_cell_ref)
            return piece, up, right, hold_move, user_feedback

        # use last two characters as new cell_ref
        try:
            [new_row, new_col_no] = cell_ref_to_pos(prompt[-2:])
        except ValueError:
            user_feedback = (
                'A valid new cell could not be identified from your input: {0}'.format(prompt))
            return piece, up, right, hold_move, user_feedback

        up, right = new_row - cur_row, new_col_no - cur_col_no

        debug('piece_ref: {0} | up: {1} | right: {2}'.format(piece.ref, up, right),
              DebugLevel.mid)

        # attempt to get destination...
        try:
            assert up in range(-8, 9) and right in range(-8, 9)
        except AssertionError:
            user_feedback = (
                'A valid new cell could not be identified from your input: {0}'.format(prompt))
            return piece, up, right, hold_move, user_feedback

        hold_move = False
        return piece, up, right, hold_move, user_feedback
Ejemplo n.º 34
0
def message_handler(messages, bus_interface, upgrade_page, uninstall_page, install_page, home_page, inhibit_obj):
    for message in messages:
        try:
            (signal_type, action_content) = message

            if signal_type == "ready-download-start":
                (pkg_name, action_type) = action_content
                if action_type == ACTION_INSTALL:
                    install_page.download_ready(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_ready(pkg_name)

            elif signal_type == 'ready-download-finish':
                (pkg_name, action_type) = action_content
                if action_type == ACTION_INSTALL:
                    install_page.download_wait(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_wait(pkg_name)

            elif signal_type == 'pkgs-not-in-cache':
                (not_in_cache, action_type) = action_content
                utils.write_log("pkgs-not-in-cache:%s, action_type:%s" % (not_in_cache, action_type))
                if action_type == ACTION_INSTALL:
                    pass
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.upgrading_view.show_error("pkgs_not_in_cache", json.loads(not_in_cache))

            elif signal_type == 'pkgs-mark-failed':
                (pkg_list, action_type) = action_content
                utils.write_log("pkgs-mark-failed:%s, action_type:%s" % (pkg_list, action_type))
                if action_type == ACTION_INSTALL:
                    pass
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.upgrading_view.show_error("pkgs_mark_failed", pkg_list)

            elif signal_type == 'marked-delete-system-pkgs':
                (pkgs, action_type) = action_content
                utils.write_log("marked-delete-system-pkgs:%s, action_type:%s" % (pkgs, action_type))
                if action_type == ACTION_INSTALL:
                    pass
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.upgrading_view.show_error("marked_delete_system_pkgs", json.loads(pkgs))

            elif signal_type == 'pkgs-parse-download-error':
                (error, action_type) = action_content
                utils.write_log("pkgs-parse-download-error:%s, action_type:%s" % (error, action_type))
                if action_type == ACTION_INSTALL:
                    pass
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.upgrading_view.show_error("pkgs_parse_download_error", error)

            elif signal_type == "download-start":
                (pkg_name, action_type) = action_content
                if action_type == ACTION_INSTALL:
                    install_page.download_start(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_start(pkg_name)

            elif signal_type == "download-update":
                (pkg_name, action_type, data) = action_content
                (percent, speed, finish_number, total, downloaded_size, total_size) = data
                if action_type == ACTION_INSTALL:
                    install_page.download_update(pkg_name, percent, speed)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_update(pkg_name, percent, speed, finish_number, total, downloaded_size, total_size)

            elif signal_type == "download-finish":
                (pkg_name, action_type) = action_content
                if action_type == ACTION_INSTALL:
                    install_page.download_finish(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_finish(pkg_name)

            elif signal_type == "download-stop":
                (pkg_name, action_type) = action_content
                if action_type == ACTION_INSTALL:
                    install_page.download_stop(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_stop(pkg_name)

            elif signal_type == "download-failed":
                (pkg_name, action_type, error) = action_content
                utils.write_log("download-failed:%s, action_type:%s" % (error, action_type))
                if action_type == ACTION_INSTALL:
                    install_page.download_failed(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_failed(pkg_name, error)

            elif signal_type == "action-start":
                (pkg_name, action_type) = action_content
                if action_type == ACTION_UNINSTALL:
                    uninstall_page.action_start(pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.action_start(pkg_name)
                elif action_type == ACTION_INSTALL:
                    install_page.action_start(pkg_name)
                inhibit_obj.set_inhibit()

            elif signal_type == "action-update":
                (pkg_name, action_type, percent, status) = action_content
                if action_type == ACTION_UNINSTALL:
                    uninstall_page.action_update(pkg_name, percent)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.action_update(pkg_name, percent, utils.l18n_status_info(status))
                elif action_type == ACTION_INSTALL:
                    install_page.action_update(pkg_name, percent)

            elif signal_type == "action-finish":
                (pkg_name, action_type, pkg_info_list) = action_content
                if action_type == ACTION_UNINSTALL:
                    uninstall_page.action_finish(pkg_name, pkg_info_list)
                    utils.show_notify(_("Uninstall %s Successfully.") % pkg_name, _("Uninstall"))
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.action_finish(pkg_name, pkg_info_list)
                    global_event.emit("upgrade-finish-action", pkg_info_list)
                    utils.set_last_upgrade_time()
                    upgrade_page.refresh_status(pkg_info_list)
                    utils.show_notify(_("%s packages upgrade successfully.") % len(pkg_info_list), _("Upgrade"))
                elif action_type == ACTION_INSTALL:
                    install_page.action_finish(pkg_name, pkg_info_list)
                    utils.show_notify(_("Install %s Successfully.") % pkg_name, _("Install"))
                inhibit_obj.unset_inhibit()

                refresh_current_page_status(pkg_name, pkg_info_list, bus_interface)
                if action_type != ACTION_UPGRADE:
                    bus_interface.request_status(
                            reply_handler=lambda reply: request_status_reply_hander(
                                reply, install_page, upgrade_page, uninstall_page, pkg_info_list),
                            error_handler=lambda e: action_finish_handle_dbus_error(pkg_info_list),
                            )

            elif signal_type == 'action-failed':
                # FIXME: change failed action dealing
                (pkg_name, action_type, pkg_info_list, errormsg) = action_content
                pkg_list = [str(info[0]) for info in pkg_info_list]
                utils.write_log("action-failed:%s, action_type:%s, pkg_list: %s" % (errormsg, action_type, pkg_list))
                if action_type == ACTION_UNINSTALL:
                    uninstall_page.action_finish(pkg_name, pkg_info_list)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.upgrading_view.show_error("upgrade_failed", errormsg)
                    utils.set_last_upgrade_time()
                elif action_type == ACTION_INSTALL:
                    install_page.action_finish(pkg_name, pkg_info_list)
                inhibit_obj.unset_inhibit()

                refresh_current_page_status(pkg_name, pkg_info_list, bus_interface)
                bus_interface.request_status(
                        reply_handler=lambda reply: request_status_reply_hander(
                            reply, install_page, upgrade_page, uninstall_page),
                        error_handler=lambda e:handle_dbus_error("request_status", e),
                        )

            elif signal_type == "update-list-update":
                percent = "[%i%%] " % float(action_content[0])
                status = str(action_content[1])
                speed_str = str(action_content[2])
                status = utils.update_l18n_status_info(status)

                list_message = []
                list_message.append(percent + status)
                list_message.append(speed_str)
                global_event.emit("show-message", list_message)

            elif signal_type == "update-list-merge":
                global_event.emit("show-message", [_("Generating package list database..."), ''], 0)

            elif signal_type == "update-list-finish":
                upgrade_page.fetch_upgrade_info()
                bus_interface.request_status(
                        reply_handler=lambda reply: request_status_reply_hander(reply, install_page, upgrade_page, uninstall_page),
                        error_handler=lambda e:handle_dbus_error("request_status", e),
                        )
                global_event.emit("show-message", [_("Package lists refreshed completely."), ''], 5000)
                global_event.emit('update-list-finish')
                global_event.emit("hide-update-list-dialog")

            elif signal_type == 'update-list-failed':
                # FIXME: change failed action dealing
                upgrade_page.fetch_upgrade_info()
                bus_interface.request_status(
                        reply_handler=lambda reply: request_status_reply_hander(reply, install_page, upgrade_page, uninstall_page),
                        error_handler=lambda e:handle_dbus_error("request_status", e),
                        )
                list_message = []
                list_message.append(_("Failed to refresh package lists."))
                list_message.append(_('Try again'))
                list_message.append(lambda:global_event.emit('start-update-list'))
                global_event.emit("show-message", list_message, 0)
                global_event.emit('update-list-finish')
                global_event.emit("hide-update-list-dialog")

            elif signal_type == "parse-download-error":
                (pkg_name, action_type) = action_content
                if action_type == ACTION_INSTALL:
                    install_page.download_parse_failed(pkg_name)
                    global_event.emit("show-message", _("Problem occurred when analyzing dependencies for %s. Installation aborted.") % pkg_name)
                elif action_type == ACTION_UPGRADE:
                    upgrade_page.download_parse_failed(pkg_name)
                    global_event.emit("show-message", _("Problem occurred when analyzing dependencies for %s. Upgrade aborted.") % pkg_name)

            elif signal_type == "pkg-not-in-cache":
                pkg_name = action_content
                list_message = []
                list_message.append(_('The requested package \"%s\" was not found in the package list.') % pkg_name)
                list_message.append(_('Refresh package lists again and try again.'))
                list_message.append(lambda:global_event.emit('start-update-list'))
                global_event.emit("show-message", list_message, 0)
        except Exception, e:
            traceback.print_exc(file=sys.stdout)
            print "Message Handler Error:", e
Ejemplo n.º 35
0
    def take_turn(self, team, prompt=None, move=None):
        """
        Interact with player to facilitate moves, capture data and
        identify/store information common to all potential moves.
        Also includes optional param to specify a prompt to run
        automatically or a move object (for interface from external
        scripts).
        """
        global LOG

        self.turns += 1
        self.current_team = team
        occupied, our_team, their_team = self.get_occupied()
        validated, found_issue = False, False
        if self.check:
            user_feedback = shout(team + ' team in check',
                                  print_output=False, return_output=True)
        else:
            user_feedback = None

        # repeat prompt until a valid move is given...
        while not validated:
            # skip set up if a move object is passed in...
            if move:
                piece, up, right = move.piece, move.up, move.right
            else:
                if not prompt:
                    print(self.board.draw_board())
                    if user_feedback:
                        print(user_feedback + '\n')
                    prompt = input("[" + team + " move] >> ")

                piece, up, right, hold_move, user_feedback = \
                    self.__parse_prompt(prompt, our_team)

                if not hold_move:
                    # create object for move, this evaluates potential issues etc.
                    move = Move(piece, up, right, occupied, our_team, their_team)

            if move:
                if move.possible:
                    validated = True
                else:
                    user_feedback = move.invalid_reason
                    move = None  # clear ready for next loop

            prompt = None  # clear ready for next loop

        # noinspection PyUnboundLocalVariable
        self.__process_move(piece, move, up, right, occupied, our_team, their_team)

        # log state of game
        if self.logging:
            LOG = '\n'.join([LOG, self.__to_json()])

        # wrap up if done...
        if self.checkmate or self.turns >= 200:
            if self.turns >= self.turn_limit:
                shout('{0} moves, lets call it a draw'.format(self.turns))
            elif self.checkmate:
                move.checkmate = True
                shout('game over, {0} team wins'.format(self.current_team))
            if self.logging:
                write_log(LOG)