Exemple #1
0
def service():
    # 描述性分析 db获取任务
    list_ask_queue = getDescribeAnalysiscList()
    print "DescribeAnalysis, ", len(list_ask_queue)
    if list_ask_queue.__len__() <= 0:
        return

    for ask_parm in list_ask_queue:
        print ask_parm

        dict_id_str = ask_parm["dict_list"]
        dict_id_Arrs = dict_id_str.split(',')
        filterList = []
        for dict_id in dict_id_Arrs:
            filterList.append("_" + dict_id)
        # 描述性分析 db获取分组
        list_group_queue = getDescribeAnalysisDBList(ask_parm['groupID'])
        if list_group_queue.__len__() < 1:
            print "warning:", list_group_queue.__len__()
            continue

        # 描述性分析 获取计算
        result_dict = calc_python_spark(ask_parm, filterList,
                                        list_group_queue[0])
        print " result_dict", result_dict
        uuId = ask_parm["uuId"]
        crowID = ask_parm["crowID"]
        utype = ask_parm["type"]
        print result_dict
        if result_dict is None:
            print crowID, uuId
            updateDescribeAnalysisStatus(uuId)
            continue

        print "utype--", utype
        for dict_id in dict_id_Arrs:
            try:
                if (utype == "0"):
                    saveDescribeAnalysisResultToDB(
                        utils.create_id(uuId + crowID + dict_id), uuId,
                        dict_id, result_dict["_" + dict_id])
                    updateDescribeAnalysisStatus(uuId)
                else:
                    saveDescribeAnalysisResultToDB(
                        utils.create_id(uuId + crowID + dict_id + "0"), uuId,
                        dict_id, result_dict["_" + dict_id][0])
                    saveDescribeAnalysisResultToDB(
                        utils.create_id(uuId + crowID + dict_id + "1"), uuId,
                        dict_id, result_dict["_" + dict_id][1])
                    updateDescribeAnalysisStatus(uuId)
            except ZeroDivisionError, e:
                print e.message

            print "done", uuId, crowID, dict_id
    def search(self, results, media, lang, manual):
        PlexLog.debug("=================== Search Start ===================")

        PlexLog.debug("%s (%s)" % (self.name, self.ver))
        PlexLog.debug("Plex version: %s" % Platform.ServerVersion)

        server = Prefs["Server"]
        if not server:
            PlexLog.error("Missing server!")
            return

        movie = request_json(urljoin(server, "movie"), as_movie(media))
        if movie is None:
            return

        title = movie.get("title")
        if title is None:
            PlexLog.error("Missing or invalid title: %s" % str(movie))
            return

        aired = convert_date(movie.get("aired"))
        year = aired.year if aired is not None else 0

        # Plex throws exception that have "/" in ID
        mid = create_id(title, year)
        result = MetadataSearchResult(id=mid,
                                      name=title,
                                      year=year,
                                      lang=lang,
                                      score=100)
        results.Append(result)
        PlexLog.debug("===================  Search end  ===================")
Exemple #3
0
    def search(self, results, media, lang, manual):
        PlexLog.debug("=================== Search Start ===================")

        PlexLog.debug("%s (%s)" % (self.name, self.ver))
        PlexLog.debug("Plex version: %s" % Platform.ServerVersion)

        show = get_show(media)
        if show is None:
            return

        title = show.get("title")
        if title is None:
            PlexLog.error("Missing or invalid title: %s" % str(show))
            return

        aired = convert_date(show.get("aired"))
        year = aired.year if aired is not None else 0

        # Plex throws exception that have "/" in ID
        mid = create_id(title, year)
        result = MetadataSearchResult(id=mid,
                                      name=title,
                                      year=year,
                                      lang=lang,
                                      score=100)
        results.Append(result)
        PlexLog.debug("===================  Search end  ===================")
Exemple #4
0
def service():
    list_ask_queue = getSampleDBAskCalcList()
    print "SampleTest, ", len(list_ask_queue)
    if list_ask_queue.__len__() <= 0:
        return
    for ask_parm in list_ask_queue:
        print ask_parm
        ask_parm['popmean'] = float(ask_parm['popmean']);
        ask_parm['confidence'] = float(ask_parm['confidence']);
        dic_id_str = ask_parm["dic_id"];
        dic_id_Arrs = dic_id_str.split(',')
        filterList = []
        for dic_id in dic_id_Arrs:
            filterList.append("_"+dic_id);
    
        print ask_parm
        result_dict = calc_python_spark(ask_parm, filterList)
        uuId = ask_parm["uuId"]
        crowID = ask_parm["crowID"]
        
        print result_dict

        for dic_id in dic_id_Arrs :
             
            if result_dict.__contains__("_"+dic_id) :
                result =  result_dict["_"+dic_id]
            else :
                result = None
            saveSampleResultToDB(utils.create_id(uuId+crowID+dic_id), uuId, dic_id, result)
            updateSampleStatus(uuId)

    return
Exemple #5
0
def serverHandler():

    while True :
  
        list_ask_queue = getStudentTTestDBAskCalcList()
        if list_ask_queue.__len__() <= 0:
            continue
       
        for ask_parm in list_ask_queue:
            print ask_parm
          
            ask_parm['confidence'] = float(ask_parm['confidence']);
            
            dict_id_str = ask_parm["dict_list"];
            dict_id_Arrs = dict_id_str.split(',')
            filterList = []
            for dict_id in dict_id_Arrs:
                filterList.append("_"+dict_id);
            list_group_queue = getStudentTTestGroupDBList(ask_parm['s_group_id1'], ask_parm['s_group_id2'])
            if list_group_queue.__len__() < 2 :
                print "warning:", list_group_queue.__len__() 
                continue
            
            
            result_dict = calc_python_spark(ask_parm, filterList, list_group_queue);
            print result_dict   
            uuId = ask_parm["uuId"]
            crowID = ask_parm["crowID"]
            for dict_id in dict_id_Arrs:
                saveStudentTtestResultToDB(utils.create_id(uuId+crowID+dict_id), uuId, dict_id, result_dict["_"+dict_id])
                updateStudentTtestStatus(uuId)
         
        time.sleep(10000)
    return
Exemple #6
0
def get_entities(persons):
    entities = []
    for person in persons:
        name = person.pop(PER_NAME)
        values = person.values()
        unique_id = create_id([_.encode('utf-8') for _ in values])

        fields = [
            {'tag': t, 'value': v} for t, v in person.items()
        ]

        entities.append(create_entity(unique_id, 'person', name, fields))
    return entities
def get_entities(persons):
    entities = []
    for person in persons:
        name = person.pop('person_name')
        values = person.values()
        unique_id = create_id([_.encode('utf-8') for _ in values])

        fields = [
            {'tag': t, 'value': WHITESPACE_PATTERN.sub('', v)} for t, v in person.items()
        ]

        entities.append(create_entity(unique_id, 'person', name, fields))

    return entities
Exemple #8
0
def get_entities(persons):
    entities = []
    for person in persons:
        name = person[PER_NAME]
        values = person.values()
        unique_id = create_id([_.encode('utf-8') for _ in values])

        fields = [
            {'tag': t.strip('!'), 'value': v} for t, v in person.items()
        ]

        entities.append(create_entity(unique_id, 'person', name, fields))

    return entities
    def __init__(self, variable_list: list):
        """
        :complexity: O(n) where n is the number of variables in the clause
        :param variable_list: list of variable
        """
        if isinstance(variable_list, set):
            variable_list = [Variable(var) for var in variable_list]

        self.variable_list = variable_list
        self.__size = len(self.variable_list)
        self.id = utils.create_id()

        self.literals_set = set()
        for var in variable_list:
            self.literals_set.add(var.variable_value)

        self.__tautology = self.__setup_tautology()
    def search(self, results, media, lang, manual):
        PlexLog.debug("=================== Search Start ===================")

        PlexLog.debug("%s (%s)" % (self.name, self.ver))
        PlexLog.debug("Plex version: %s" % Platform.ServerVersion)

        server = Prefs["Server"]
        authKey = Prefs["AuthKey"]
        if not server:
            PlexLog.error("Missing server!")
            return

        requestUrl = urljoin(server, "show")
        if authKey:
            requestUrl = requestUrl + "?AuthKey=" + authKey

        PlexLog.debug("Requesting URL: %s" % requestUrl)

        show = request_json(requestUrl, as_show(media))
        if show is None:
            return

        title = show.get("title")
        if title is None:
            PlexLog.error("Missing or invalid title: %s" % str(show))
            return

        aired = convert_date(show.get("aired"))
        year = aired.year if aired is not None else 0

        # Plex throws exception that have "/" in ID
        mid = create_id(title, year)
        result = MetadataSearchResult(id=mid,
                                      name=title,
                                      year=year,
                                      lang=lang,
                                      score=100)
        results.Append(result)
        PlexLog.debug("===================  Search end  ===================")
Exemple #11
0
    def post(self, *args, **kwargs):
        if 'user' not in kwargs or args:
            self.raise401()

        grant_type = self.get_argument('grant_type', None)
        response_type = self.get_argument('response_type', None)
        redirect_uris = self.get_argument('redirect_uris', None)
        app_name = self.get_argument('app_name', None)
        description = self.get_argument('description', None)
        website = self.get_argument('website', None)

        try:
            user = kwargs['user']
            client_id = create_id()
            client_secret = create_secret()
            grant_type = grant_type or 'authorization_code'
            response_type = response_type or 'code'
            # todo scopes
            default_scopes = ['tasks', 'projects', 'repos', 'users', 'teams']
            scopes = default_scopes
            redirect_uris = parse_listed_strs(redirect_uris)
            # todo default
            default_redirect_uri = redirect_uris[0] if redirect_uris else ''

            client = Client(
                client_id=client_id, client_secret=client_secret,
                user=user, grant_type=grant_type,
                response_type=response_type, scopes=scopes,
                default_scopes=default_scopes, redirect_uris=redirect_uris,
                default_redirect_uri=default_redirect_uri, website=website,
                app_name=app_name, description=description)
            client.save()
            client_data = document_to_json(client, filter_set=_FILTER)
            self.set_status(201)
            self.write(client_data)
        except Exception as e:
            reason = e.message
            self.raise400(reason=reason)
Exemple #12
0
def serverHandler():

    list_ask_queue = getSampleDBAskCalcList()
    for ask_parm in list_ask_queue:
        print ask_parm
        ask_parm['popmean'] = float(ask_parm['popmean'])
        ask_parm['confidence'] = float(ask_parm['confidence'])
        dic_id_str = ask_parm["dic_id"]
        dic_id_Arrs = dic_id_str.split(',')
        filterList = []
        for dic_id in dic_id_Arrs:
            filterList.append("_" + dic_id)

        print ask_parm
        result_dict = calc_python_spark(ask_parm, filterList)
        uuId = ask_parm["uuId"]
        crowID = ask_parm["crowID"]

        print result_dict
        for dic_id in dic_id_Arrs:
            saveSampleResultToDB(utils.create_id(uuId + crowID + dic_id), uuId,
                                 dic_id, result_dict["_" + dic_id])
            updateSampleStatus(uuId)
Exemple #13
0
def service():
    # 获取db ,任务队列
    list_ask_queue = getStudentTTestDBAskCalcList()
    print "StudentTtest, ", len(list_ask_queue)
    if list_ask_queue.__len__() <= 0:
        return

    for ask_parm in list_ask_queue:
        print ask_parm

        ask_parm['confidence'] = float(ask_parm['confidence'])

        dict_id_str = ask_parm["dict_list"]
        dict_id_Arrs = dict_id_str.split(',')
        filterList = []
        for dict_id in dict_id_Arrs:
            filterList.append("_" + dict_id)
        #   获取分组
        list_group_queue = getStudentTTestGroupDBList(ask_parm['s_group_id1'],
                                                      ask_parm['s_group_id2'])
        if list_group_queue.__len__() < 2:
            print "warning:", list_group_queue.__len__()
            continue

        #     spark 计算分组
        result_dict = calc_python_spark(ask_parm, filterList, list_group_queue)
        print result_dict
        uuId = ask_parm["uuId"]
        crowID = ask_parm["crowID"]
        for dict_id in dict_id_Arrs:
            #    计算结果保存与更新
            saveStudentTtestResultToDB(
                utils.create_id(uuId + crowID + dict_id), uuId, dict_id,
                result_dict["_" + dict_id])
            updateStudentTtestStatus(uuId)

    return
Exemple #14
0
def _get_token(request):
    token = request.COOKIES.get("token", None)
    if not token:
        token = utils.create_id()
        print 'created token', token
    return token
Exemple #15
0
    opt = parser.parse_args()

    DATA_NAME, RECALLS, BATCH_SIZE, NUM_EPOCHS = opt.data_name, opt.recalls, opt.batch_size, opt.num_epochs
    ENSEMBLE_SIZE, META_CLASS_SIZE = opt.ensemble_size, opt.meta_class_size
    recall_ids = [int(k) for k in RECALLS.split(',')]
    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    data_dicts = torch.load('data/{}/data_dicts.pth'.format(DATA_NAME))
    train_data, test_data = data_dicts['train'], data_dicts['test']
    # sort classes and fix the class order
    all_class = sorted(train_data)
    idx_to_class = {i: all_class[i] for i in range(len(all_class))}

    for i in range(1, ENSEMBLE_SIZE + 1):
        print('Training ensemble #{}'.format(i))
        meta_id = create_id(META_CLASS_SIZE, len(data_dicts['train']))
        meta_data_dict = load_data(meta_id, idx_to_class, train_data)
        model = Model(META_CLASS_SIZE).to(DEVICE)
        optimizer = Adam(model.parameters(), lr=1e-4)
        lr_scheduler = MultiStepLR(
            optimizer,
            milestones=[int(NUM_EPOCHS * 0.5),
                        int(NUM_EPOCHS * 0.7)],
            gamma=0.1)
        criterion = CrossEntropyLoss()

        best_acc, best_model = 0, None
        for epoch in range(1, NUM_EPOCHS + 1):
            lr_scheduler.step(epoch)
            train_loss, train_acc = train(model, meta_data_dict, optimizer)
            print('Epoch {}/{} - Loss:{:.4f} - Acc:{:.4f}'.format(