def get_event_geo_location(global_config, event_key=None):
    
    session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
    
    DataModel.setEventsGeoLocation(session, event_key)
               
    session.remove()
Esempio n. 2
0
def process_files(global_config, attr_definitions, input_dir, recursive=True):
    start_time = datetime.datetime.now()
    
    # Initialize the database session connection
    db_name  = global_config['db_name']
    session  = DbSession.open_db_session(db_name)
 
    some_files_processed = False
    
    # The following regular expression will select all files that conform to 
    # the file naming format Team*.txt. Build a list of all datafiles that match
    # the naming format within the directory passed in via command line 
    # arguments.
    file_regex = re.compile('Team[a-zA-Z0-9_]+.txt')
    files = get_files(global_config, session, db_name, input_dir, file_regex, recursive)
    
    print 'files retrieved, elapsed time - %s' % (str(datetime.datetime.now()-start_time))

    # Process data files
    for data_filename in files:
        try:
            process_file( global_config, session, attr_definitions, data_filename)
        except Exception, e:
            # log the exception but continue processing other files
            log_exception(global_config['logger'], e)

        # add the file to the set of processed files so that we don't process it again. Do it outside the
        # try/except block so that we don't try to process a bogus file over and over again.       
        DataModel.addProcessedFile(session, data_filename)
        some_files_processed = True
        
        # Commit all updates to the database
        session.commit()
def set_team_geo_location(global_config, team_key=None):
    
    session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
    
    DataModel.setTeamGeoLocation(session, team_key)

    session.remove()
def load_event_info(global_config, year_str):
    
    session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
    
    if year_str.lower() == 'all':
        # get all events since the beginning of time
        year = 1992
        done = False
        while not done: 
            url_str = '/api/v2/events/%d' % year
            events_data = TbaIntf.get_from_tba_parsed(url_str)
            
            if len(events_data) == 0:
                done = True
            else:
                for event_data in events_data:
                    #print 'Event: %s' % event_data['key']
                    DataModel.addOrUpdateEventInfo(session, event_data)
                year += 1
    else:
        url_str = '/api/v2/events/%s' % year_str
        events_data = TbaIntf.get_from_tba_parsed(url_str)
        
        for event_data in events_data:
            print 'Event: %s' % event_data['key']
            DataModel.addOrUpdateEventInfo(session, event_data)
            
    session.commit()
    session.remove()
Esempio n. 5
0
def get_team_list_json(global_config, comp):
    global team_info_dict
    
    global_config['logger'].debug( 'GET Team List For Competition %s', comp )
    session = DbSession.open_db_session(global_config['db_name'])

    web.header('Content-Type', 'application/json')
    result = []

    result.append('{ "teams" : [\n')

    team_list = DataModel.getTeamsInNumericOrder(session, comp)
    for team in team_list:
        team_info = None
        # TODO - Remove this hardcoded number for the valid team number. This check prevents
        # requesting information for invalid team numbers, which has been known to happen when
        # tablet operators enter bogus team numbers by mistake
        if team.team < 10000:
            team_info = DataModel.getTeamInfo(session, int(team.team))
            
        if team_info:
            result.append('   { "team_number": "%s", "nickname": "%s" }' % (team.team,team_info.nickname))
            result.append(',\n')
        
    if len(team_list) > 0:         
        result = result[:-1]

        result.append(' ] }\n')
        return ''.join(result)
    else:
        return get_team_list_json_from_tba(global_config, comp)
Esempio n. 6
0
    def returnCheckerBoardFunction(self, Ndata: int, nx: int, ny: int, delta: float):
        """initCheckerBoardFunction

	Generates two classes similar to a chekerBoard table. One classe would be in the same position 
	as the black part of the board and the second in the white one

	Parameters:
	INPUT:
		Ndata: number of instance of each quadrant
		nx: number of quadrants in x
		ny: number of quadrants in y
		delta: scramble factor, a lower delta means more entangled classes
	OUTPUT:
		cls1, cls2 : objects from classData with encapsulates all data from a given sub-class.

	Example:
		import LoadData
		import matplotlib.pyplot as plt
		myData = LoadData.clsWorkingData()
		cls1, cls2 = myData.initCheckerBoardFunction(50,2,2,0.5)
		plt.plot(cls1.data[:,0],cls1.data[:,1],'g*')
		plt.plot(cls2.data[:,0],cls2.data[:,1],'rd'),
		plt.show()

	Modified:
		(LEO) : 17/01/2016

	Version:
		v0.1

"""

        cls1 = DataModel.clsData(1, 1, 0)
        cls2 = DataModel.clsData(0, 1, 0)
        data1 = []
        data2 = []
        i1 = 0
        i2 = 0

        for k in range(0, Ndata):
            for i in range(0, nx):
                for j in range(0, ny):
                    if divmod(i + j, 2)[1] == 1:
                        dx = -delta + 2.0 * delta * random.random()
                        dy = -delta + 2 * delta * random.random()
                        data1.append([i + dx, j + dy])
                        i1 += 1
                    else:
                        dx = -delta + 2.0 * delta * random.random()
                        dy = -delta + 2.0 * delta * random.random()
                        data2.append([i + dx, j + dy])
                        i2 += 1

        data1 = np.matrix(data1)
        data2 = np.matrix(data2)

        cls1.setData(data1)
        cls2.setData(data2)

        return cls1, cls2
def compareImplementations2():
    (x, y) = DataModel.loadData("..\\train.csv")

    y = y.astype(int)

    (x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)

    x_sub = x_train[:500,:]
    y_sub = y_train[:500]

    s_my = SimpleNN2.NeuralNetConfig(784, 70, 10)
    s_t = NN_1HL.NN_1HL(reg_lambda = 10, opti_method = 'CG')

    np.random.seed(123)

    thetas = [s_t.rand_init(784,70), s_t.rand_init(70, 10)]
    
    # Check costs
    cost_t = s_t.function(s_t.pack_thetas(thetas[0].copy(), thetas[1].copy()), 784, 70, 10, x_sub, y_sub, 10)
    print("Cost test: ", cost_t)

    cost_my = SimpleNN2.computeCost(s_my, thetas[0], thetas[1], x_sub, y_sub, 10)
    print("Cost my: ", cost_my)

    # Check gradients
    grad_t = s_t.function_prime(s_t.pack_thetas(thetas[0].copy(), thetas[1].copy()), 784, 70, 10, x_sub, y_sub, 10)
    print("Grad sum test: ", np.sum(grad_t))

    grad_my1, grad_my2 = SimpleNN2.computeGrad(s_my, thetas[0], thetas[1], x_sub, y_sub, 10)
    print("Grad sum my: ", np.sum(grad_my1) + np.sum(grad_my2))
def load_team_info( global_config, name=None):
    session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
    
    if name is None:
        global_config['logger'].debug( 'Loading Team Info For All FRC Teams' )
        page = 0
        done = False
        while not done: 
            url_str = '/api/v2/teams/%d' % (page)
            teams_data = TbaIntf.get_from_tba_parsed(url_str)
            if len(teams_data) == 0:
                done = True
            else:
                for team_data in teams_data:
                    DataModel.setTeamInfoFromTba(session, team_data)
                page += 1
    else:
        global_config['logger'].debug( 'Loading Team Info For FRC Team %s' % name )
        '''
        url_str = '/api/v2/team/frc%s/%s' % (team_str,query_str)
        for page in range(0,14): 
            teams_data = TbaIntf.get_from_tba_parsed(url_str)
            for team_data in teams_data:
                setTeamInfoFromTba(session, team_data)
        '''
    session.remove()
Esempio n. 9
0
def init_main_table():
    university_list, college_list, company_list, facility_list, normal_list = [], [], [], [], []
    model_list: list = DBConnector.cnki_query_all()
    for model in model_list:
        model: DataModel.CNKIContent
        if '大学' in model.Organ:
            university_index = model.Organ.index('大学')
            organ = model.Organ[0:university_index + 2]
            # if '实验室' in organ:
            #    organ = organ[0:organ.index('实验室') + 3]
            university_model = DataModel.CNKILocationContent()
            university_model.Organ = organ
            university_model.ori_sid = model.sid
            university_list.append(university_model)
            continue

        if '学院' in model.Organ:
            organ = model.Organ[0:model.Organ.index('学院') + 2]
            college_model = DataModel.CNKILocationContent()
            college_model.ori_sid = model.sid
            college_model.Organ = organ
            college_list.append(college_model)
            continue

        normal_model = DataModel.CNKILocationContent()
        normal_model.ori_sid = model.sid
        normal_model.Organ = model.Organ
        normal_list.append(normal_model)
    DBConnector.db_list_writer(university_list)
    DBConnector.db_list_writer(college_list)
    DBConnector.db_list_writer(normal_list)
def process_form(global_config, form):
    global_config['logger'].debug( 'Process Attribute Modify Form' )

    season = form[attr_modify_season_label].value
    comp = form[attr_modify_comp_label].value
    team = form[attr_modify_team_number_label].value
    attr_name = form[attr_modify_attribute_name_label].value
    old_value = form[attr_modify_old_value_label].value
    new_value = form[attr_modify_new_value_label].value
    
    # Initialize the database session connection
    db_name  = global_config['db_name'] + global_config['this_season']
    session  = DbSession.open_db_session(db_name)
    
    attrdef_filename = WebCommonUtils.get_attrdef_filename(short_comp=comp)
    if attrdef_filename is not None:
        attr_definitions = AttributeDefinitions.AttrDefinitions(global_config)
        attr_definitions.parse(attrdef_filename)
        attr_def = attr_definitions.get_definition(attr_name)

        try:
            DataModel.modifyAttributeValue(session, team, comp+season, attr_name, old_value, new_value, attr_def)
            result = 'Attribute %s Modified From %s to %s For Team %s' % (attr_name,old_value,new_value,team)
            session.commit()
        except ValueError as reason:   
            result = 'Error Modifying Scouting Addribute %s For Team %s: %s' % (attr_name,team,reason)
    
    session.remove()
    return result
def process_files(global_config, attr_definitions, input_dir, recursive=True):
    start_time = datetime.datetime.now()

    # Initialize the database session connection
    db_name = global_config["db_name"] + global_config["this_season"]
    session = DbSession.open_db_session(db_name)

    some_files_processed = False

    # read the ignore file list config each time through the loop. Any files
    # in the ignore list will be skipped
    ignore_filelist = read_ignore_filelist_cfg(input_dir + "IgnoreFiles.txt")

    # The following regular expression will select all files that conform to
    # the file naming format Team*.txt. Build a list of all datafiles that match
    # the naming format within the directory passed in via command line
    # arguments.
    file_regex = re.compile("Team[a-zA-Z0-9_]+.txt")
    files = get_files(global_config, session, db_name, input_dir, file_regex, recursive)

    if len(files) > 0:
        log_msg = "files retrieved, elapsed time - %s" % (str(datetime.datetime.now() - start_time))
        print log_msg
        global_config["logger"].debug("%s - %s" % (process_files.__name__, log_msg))

        global_config["logger"].debug("%s - %d Files to be processed" % (process_files.__name__, len(files)))

    # Process data files
    for data_filename in files:
        # If the file is on the ignore list (quarantined), then skip it
        if data_filename.split("/")[-1] in ignore_filelist:
            global_config["logger"].debug("%s - Ignoring file: %s" % (process_files.__name__, data_filename))
            continue

        # Make sure that the data file has not already been processed. We have seen cases
        # where the data file gets inserted into the list of files to be processed more than
        # once.
        file_processed = isFileProcessed(global_config, session, db_name, data_filename)
        if not file_processed:
            try:
                global_config["logger"].debug("%s - Processing file: %s" % (process_files.__name__, data_filename))
                process_file(global_config, session, attr_definitions, data_filename)
            except Exception, e:
                global_config["logger"].debug(
                    "%s - Error processing file: %s" % (process_files.__name__, data_filename)
                )
                # log the exception but continue processing other files
                log_exception(global_config["logger"], e)

            # add the file to the set of processed files so that we don't process it again. Do it outside the
            # try/except block so that we don't try to process a bogus file over and over again.
            DataModel.addProcessedFile(session, data_filename)
            some_files_processed = True
        else:
            global_config["logger"].debug(
                "%s - Skipping file: %s, already processed" % (process_files.__name__, data_filename)
            )

        # Commit all updates to the database
        session.commit()
Esempio n. 12
0
def readDataModel(path):
    with open(path, 'rb') as file:
        versionNumber = struct.unpack('!H',
                                      file.read(struct.calcsize('!H')))[0]
        if versionNumber != 1:
            raise ValueError(
                'This file was not created with the correct version of the hydrology script.'
            )

        coordinateType = struct.unpack('!B',
                                       file.read(struct.calcsize('!B')))[0]
        rasterResolution = struct.unpack('!f',
                                         file.read(struct.calcsize('!f')))[0]
        edgeLength = struct.unpack('!f', file.read(struct.calcsize('!f')))[0]

        shore = DataModel.ShoreModel(rasterResolution, binaryFile=file)
        hydrology = DataModel.HydrologyNetwork(binaryFile=file)
        cells = DataModel.TerrainHoneycomb(resolution=rasterResolution,
                                           edgeLength=edgeLength,
                                           shore=shore,
                                           hydrology=hydrology,
                                           binaryFile=file)
        terrain = DataModel.Terrain(binaryFile=file)

        file.close()

        return (rasterResolution, edgeLength, shore, hydrology, cells, terrain)
def compareImplementations():
    (x, y) = DataModel.loadData("..\\train.csv")

    y = y.astype(int)

    (x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)

    x_sub = x_train[:500,:]
    y_sub = y_train[:500]

    s_my = SimpleNN.SimpleNN([784, 70, 10])
    s_t = NN_1HL.NN_1HL(reg_lambda = 1, opti_method = 'CG')

    np.random.seed(123)

    thetas = [s_t.rand_init(784,70), s_t.rand_init(70, 10)]

    cost_t = s_t.function(s_t.pack_thetas(thetas[0].copy(), thetas[1].copy()), 784, 70, 10, x_sub, y_sub, 10)
    grad_t = s_t.function_prime(s_t.pack_thetas(thetas[0], thetas[1]), 784, 70, 10, x_sub, y_sub, 10)
    print(cost_t, np.sum(grad_t));

    cost_my = s_my.computeCost(s_my.combineTheta(thetas.copy()), x_sub, y_sub, 10)
    grad_my = s_my.computeGrad(s_my.combineTheta(thetas), x_sub, y_sub, 10)

    print(cost_my, np.sum(grad_my))
def process_delete_attr_form(global_config, form):
    global_config['logger'].debug( 'Process Attribute Delete Form' )

    season = form[attr_delete_season_label].value
    comp = form[attr_delete_comp_label].value
    team = form[attr_delete_team_number_label].value
    attr_name = form[attr_delete_attribute_name_label].value
    old_value = form[attr_delete_old_value_label].value
    
    # Initialize the database session connection
    db_name  = global_config['db_name'] + global_config['this_season']
    session  = DbSession.open_db_session(db_name)
    
    attrdef_filename = WebCommonUtils.get_attrdef_filename(short_comp=comp)
    if attrdef_filename is not None:
        attr_definitions = AttributeDefinitions.AttrDefinitions(global_config)
        attr_definitions.parse(attrdef_filename)
        attr_def = attr_definitions.get_definition(attr_name)

        try:
            DataModel.deleteAttributeValue(session, team, comp+season, attr_name, old_value, attr_def)
            result = 'Scouting Data Attribute Value %s Successfully Removed From %s' % (old_value,attr_name)
            session.commit()
        except ValueError as reason:   
            result = 'Error Removing Scouting Data Attribute Value %s From %s: %s' % (old_value,attr_name,reason)
                
    session.remove()
    return result
Esempio n. 15
0
def makeTeamFromDict(d):
    if type(d) != dict: print(d)
    team = DataModel.Team(**d)
    if 'calculatedData' in d.keys():
        team.calculatedData = DataModel.CalculatedTeamData(
            **d['calculatedData'])
    return team
Esempio n. 16
0
 def test_create(self):
     table_name = 'password_db'
     table_model = DataModel(table_name, PasswordData)
     table_model.new_table()
     vo = PasswordData("your", "your_password")
     res = table_model.create(vo)
     self.assertEqual(res, 1)
     os.remove("./password_db.db")
Esempio n. 17
0
 def test_new_table(self):
     table_name = 'password_db'
     table_model = DataModel(table_name, PasswordData)
     res = table_model.new_table()
     self.assertTrue(res)
     res = table_model.new_table()
     self.assertFalse(res)
     os.remove("./password_db.db")
def test1():
    (x, y) = DataModel.loadData("..\\train.csv")

    (x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)

    x_sub = x_train[:500,:]
    y_sub = y_train[:500]

    s = SimpleNN.SimpleNN([784, 70, 10])

    #s = Train.trainGradientDescent(s, x_sub, y_sub, 5)
    s = Train.trainSciPy(s, x_sub, y_sub, 5)
    acc_cv = accuracy_score(y_cv, [s.predictClass(w) for w in x_cv])
    print("Accuracy on CV set: {0}", acc_cv)
def test2():
    (x, y) = DataModel.loadData("..\\train.csv")

    y = y.astype(int)

    (x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)

    x_sub = x_train[:500,:]
    y_sub = y_train[:500]

    s = NN_1HL.NN_1HL(reg_lambda = 1, opti_method = 'CG')
    s.fit(x_sub, y_sub)

    acc_cv = accuracy_score(y_cv, [s.predict(w) for w in x_cv])
    print("Accuracy on CV set: {0}", acc_cv)
Esempio n. 20
0
def get_city_and_organ_location_by_google(start_sid):
    models = DBConnector.cnki_location_query_all_min_sid(start_sid)
    api_key = 'AIzaSyBuyQu2l_H3nCgGo84W26VwEVhFTNnm99g'
    for model in models:
        model: DataModel.CNKILocationContent
        if model.City is None:
            continue
        url = 'https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(
            model.City, api_key)
        proxy = {
            'http': 'socks5://127.0.0.1:1086',
            'https': 'socks5://127.0.0.1:1086'
        }
        web = requests.get(url=url, proxies=proxy)
        response = json.loads(web.text)
        if response['status'] != 'OK':
            continue
        print(response['results'][0]['formatted_address'])
        location = response['results'][0]['geometry']['location']
        print(location)
        response_model = DataModel.CNKILocationContent()
        response_model.City_Longitude = location['lng']
        response_model.City_Latitude = location['lat']
        DBConnector.update_cnki_location_city_location(model.sid,
                                                       response_model)
def get_team_comp_list(this_comp, team):

    my_config = ScoutingAppMainWebServer.global_config
    complist = list()

    if this_comp == None:
        this_comp = my_config["this_competition"] + my_config["this_season"]
        season = my_config["this_season"]
    else:
        season = map_comp_to_season(this_comp)

    complist.append(this_comp)

    team_complist = WebTeamData.get_team_event_list_from_tba(my_config, team, season)
    if not team_complist:
        session = DbSession.open_db_session(my_config["db_name"] + my_config["this_season"])
        team_scores = DataModel.getTeamScore(session, team)
        for score in team_scores:
            comp = score.competition.upper()
            # currently, the competition season is stored in the database
            # as part of the competition. So, we need to add it for the comparison,
            # but not as we define the complist itself
            if comp != this_comp.upper():
                complist.append(comp)

    else:
        for comp in team_complist:
            if comp.upper() != this_comp.upper():
                complist.append(comp)

    return complist
Esempio n. 22
0
 def __init__(self, frame):
     self.model = dm.DataModel()
     self.frame = frame
     self.buttons = list()
     self.spacing = 15
     self.declareButtons()
     self.popUp = list()
Esempio n. 23
0
def get_city_and_organ_location_by_tencent(start_sid):
    models = DBConnector.cnki_location_query_all()
    api_key = 'PFSBZ-GTOKP-JWHD3-VWKIL-KI4BQ-4YFQP'
    count = 0
    for model in models:
        model: DataModel.CNKILocationContent
        if model.sid < start_sid:
            continue
        url = 'https://apis.map.qq.com/ws/geocoder/v1/?address={0}&key={1}'.format(
            model.Organ, api_key)
        web = requests.get(url)
        time.sleep(0.3)
        response = json.loads(web.text)
        print(response)
        if response['status'] != 0:
            continue
        response_model = DataModel.CNKILocationContent()
        response_model.Province = response['result']['address_components'][
            'province']
        response_model.City = response['result']['address_components']['city']
        response_model.District = response['result']['address_components'][
            'district']
        response_model.Organ_Longitude = response['result']['location']['lng']
        response_model.Organ_Latitude = response['result']['location']['lat']
        DBConnector.update_cnki_location_city(model.sid, response_model)
        count = count + 1
        if count == 5000:
            break
Esempio n. 24
0
def get_team_attr_rankings_page(global_config, comp, attr_name):
        
    global_config['logger'].debug( 'GET Team Attribute Rankings' )
    
    session = DbSession.open_db_session(global_config['db_name'])
        
    attrdef_filename = './config/' + global_config['attr_definitions']
    attr_definitions = AttributeDefinitions.AttrDefinitions()
    attr_definitions.parse(attrdef_filename)
    attr = attr_definitions.get_definition(attr_name)
    try:
        stat_type = attr['Statistic_Type']
    except:
        stat_type = 'Total'

    web.header('Content-Type', 'application/json')
    result = []
    result.append('{ "rankings": [\n')
            
    team_rankings = DataModel.getTeamAttributesInRankOrder(session, comp, attr_name, False)
    for team in team_rankings:
        if stat_type == 'Average':
            value = int(team.cumulative_value/team.num_occurs)
        else:
            value = int(team.cumulative_value)
        data_str = '{ "team": %d, "value": %d }' % (team.team,value)
        result.append(data_str)
        result.append(',\n')
    if len(team_rankings) > 0:
        result = result[:-1]
        result.append('\n')
    result.append(']}')
    return ''.join(result)
Esempio n. 25
0
 def doFirstCalculationsForTeam(self, team):
     if self.su.getCompletedTIMDsForTeam(team):
         if not self.su.teamCalculatedDataHasValues(team.calculatedData):
             team.calculatedData = DataModel.CalculatedTeamData()
         t = team.calculatedData
         firstCalculationDict(team, self)
         print('> Completed first calcs for ' + str(team.number))
def TIMDCalcDict(timd, calc):
    if (not calc.su.TIMCalculatedDataHasValues(timd.calculatedData)):
        timd.calculatedData = DataModel.CalculatedTeamInMatchData()
    team = calc.su.getTeamForNumber(timd.teamNumber)
    match = calc.su.getMatchForNumber(timd.matchNumber)
    c = timd.calculatedData
    c.numGearsPlacedAuto = calc.getTotalValueForValueDict(
        timd.gearsPlacedByLiftAuto)
    c.numGearsPlacedTele = calc.getTotalValueForValueDict(
        timd.gearsPlacedByLiftTele)
    c.avgKeyShotTime = calc.getAvgKeyShotTimeForTIMD(timd, 'Key')
    c.avgHopperShotTime = calc.getAvgKeyShotTimeForTIMD(timd, 'Hopper')
    c.numHighShotsTele = calc.weightFuelShotsForDataPoint(
        timd, match, 'teleopFuelHigh', timd.highShotTimesForBoilerTele)
    c.numHighShotsAuto = calc.weightFuelShotsForDataPoint(
        timd, match, 'autoFuelHigh', timd.highShotTimesForBoilerAuto)
    c.numLowShotsTele = calc.weightFuelShotsForDataPoint(
        timd, match, 'teleopFuelLow', timd.lowShotTimesForBoilerTele)
    c.numLowShotsAuto = calc.weightFuelShotsForDataPoint(
        timd, match, 'autoFuelLow', timd.lowShotTimesForBoilerAuto)
    c.liftoffAbility = calc.liftoffAbilityForTIMD(timd)
    c.wasDisfunctional = utils.convertFirebaseBoolean(
        timd.didStartDisabled +
        utils.convertFirebaseBoolean(timd.didBecomeIncapacitated))
    c.disfunctionalPercentage = utils.convertFirebaseBoolean(
        timd.didStartDisabled) + 0.5 * utils.convertFirebaseBoolean(
            timd.didBecomeIncapacitated)
    c.numRPs = calc.RPsGainedFromMatchForAlliance(
        team.number in match.redAllianceTeamNumbers, match)
def get_team_scouting_notes_json(global_config, comp, name, store_json_file=False):
    
    global_config['logger'].debug( 'GET Team %s Scouting Notes For Competition %s', name, comp )
    
    season = WebCommonUtils.map_comp_to_season(comp)
    session = DbSession.open_db_session(global_config['db_name'] + season)

    result = []

    result.append('{ "competition" : "%s", "team" : "%s",\n' % (comp,name))
    result.append('  "scouting_notes" : [\n')

    team_notes = DataModel.getTeamNotes(session, name, comp)
    for note in team_notes:
        result.append('   { "tag": "%s", "note": "%s" }' % (note.tag,note.data))
        result.append(',\n')
        
    if len(team_notes) > 0:         
        result = result[:-1]

    result.append(' ] }\n')
    
    json_str = ''.join(result)

    if store_json_file is True:
        try:
            FileSync.put( global_config, '%s/EventData/TeamData/team%s_scouting_notes.json' % (comp,name), 'text', json_str)
        except:
            raise
        
    session.remove()
    return json_str
Esempio n. 28
0
    def _fixup_seq_numbers(data, update_seq_array, size_update_seq, update_seq,
                           bytes_per_sector):
        log = Helper.logger()

        size_in_bytes = data.getDataSize()

        ## apply fixup
        k = 0
        i = 0

        fixup_array = DataModel.BufferDataModel(update_seq_array, 'fixup')

        while k < size_in_bytes:
            if i >= size_update_seq:
                break

            k += bytes_per_sector
            seq = data.getWORD(k - 2)

            fixup = fixup_array.getWORD(i * 2)

            log.debug(
                '\tlast two bytes of sector: {:04x}, fixup {:04x}'.format(
                    seq, fixup))

            if seq != update_seq:
                log.debug(
                    '\tupdate sequence check failed, image may be corrupt, continue anyway'
                )

            fixup_s = fixup_array.getStream(i * 2, i * 2 + 2)
            data.getData()[k - 2:k] = fixup_s
            i += 1
Esempio n. 29
0
    def _fetch_vcn(self, vcn, data_run, datamodel):
        log = Helper.logger()
        file_record = self.file_record

        (n, lcn), rel_vcn = data_run

        log.debug('\t\tVCN relative to data-run: {}'.format(rel_vcn))

        bytes_per_cluster = file_record.sectors_per_cluster * file_record.bytes_per_sector
        file_offset = (
            lcn + rel_vcn
        ) * self.file_record.sectors_per_cluster * self.file_record.bytes_per_sector
        #size_in_bytes     = n * self.file_record.sectors_per_cluster * self.file_record.bytes_per_sector

        # only one vcn
        # is it possible to have more than one cluster/entry ? !TODO
        size_in_bytes = 1 * self.file_record.sectors_per_cluster * self.file_record.bytes_per_sector

        clusters = datamodel.getStream(file_offset,
                                       file_offset + size_in_bytes)

        log.debug(
            '\t\tINDX: 0x{:04x} clusters @ LCN 0x{:04x}, @ f_offset 0x{:x}, size_in_bytes {}'
            .format(n, lcn, file_offset, size_in_bytes))

        # buffered data model
        data = DataModel.BufferDataModel(clusters, 'lcn')
        return data
Esempio n. 30
0
def get_team_info_json(global_config, comp, name, store_json_file=False):   
    global_config['logger'].debug( 'GET Team %s Info', name )
    
    season = WebCommonUtils.map_comp_to_season(comp)
    session = DbSession.open_db_session(global_config['db_name'] + season)
    
    team_info = DataModel.getTeamInfo(session, int(name))
    
    if team_info is None:
        json_str = ''
    else:
        result = []
        result.append('{ "team": "%s", "team_data" : [\n' % name)
        result.append('   { "name": "%s", "value": "%s" }' % ('nickname', team_info.nickname))
        result.append(',\n')
        result.append('   { "name": "%s", "value": "%s" }' % ('affiliation', team_info.fullname))
        result.append(',\n')
        result.append('   { "name": "%s", "value": "%s" }' % ('location', team_info.location))
        result.append(',\n')
        result.append('   { "name": "%s", "value": "%s" }' % ('rookie_season', team_info.rookie_season))
        result.append(',\n')
        result.append('   { "name": "%s", "value": "%s" }' % ('website', team_info.website))
        result.append('\n')
        
        result.append(' ] }\n')
        
        json_str = ''.join(result)
    
        if store_json_file is True:
            try:
                FileSync.put( global_config, '%s/EventData/TeamData/team%s_teaminfo.json' % (comp,name), 'text', json_str)
            except:
                raise
        
    return json_str
Esempio n. 31
0
def file_db_writer():
    files = os.listdir('new_vec')
    for filename in files:
        reader = open('new_vec/{0}'.format(filename),
                      'r',
                      encoding='gbk',
                      errors='ignore')
        print('正在处理文件{0}......'.format(filename))
        model_list = []
        list_count = 0
        while True:
            line = reader.readline()
            if not line:
                break
            model = DataModel.WeiboWord2Vec()
            try:
                weibo_sid = int(line.split(':')[0])
                if weibo_sid <= 9808:
                    continue
                model.weibo_sid = weibo_sid
                model.word = line.split(':')[1]
                model.vector = line.split(':')[2]
            except IndexError:
                model.vector = None
                print(line)
                continue
            except ValueError:
                print(line)
                continue
            model_list.append(model)
            list_count = list_count + 1
            if list_count == 10000:
                DBConnector.db_list_writer(model_list)
                model_list.clear()
                list_count = 0
def update_team_data_files( global_config, year, event, directory, team=None ):
    
    global_config['logger'].debug( 'Updating Team DataFiles' )

    session = DbSession.open_db_session(global_config['db_name'] + year)
    comp = event+year

    result = False
    team_list = []
    if team == None or team == '':
        team_list = DataModel.getTeamsInNumericOrder(session, comp)
    else:
        team_list.append(team)
        
    # for now, we only support updating files in the TeamData directory, so only continue if that's the 
    # directory that was specified.
    if directory.upper() == 'TEAMDATA' or directory.upper() == 'EVENTDATA':
        for team_entry in team_list:
            
            # TODO: added a special test here to skip teams with a number greater than 10000. Some data
            # was erroneously entered with team numbers really high...
            if team_entry.team < 10000:
                get_team_info_json(global_config, comp, team_entry.team, store_json_file=True)
                get_team_score_json(global_config, team_entry.team, comp, store_json_file=True)
                get_team_score_breakdown_json(global_config, team_entry.team, comp, store_json_file=True)
                get_team_scouting_notes_json(global_config, comp, team_entry.team, store_json_file=True)           
                get_team_scouting_mediafiles_json(global_config, comp, str(team_entry.team), store_json_file=True)
                get_team_scouting_datafiles_json(global_config, comp, str(team_entry.team), store_json_file=True)
                get_team_scouting_data_summary_json(global_config, comp, team_entry.team, attr_filter=[], filter_name=None, store_json_file=True)
        result = True
        
    session.remove()
    return result
def get_team_score_json(global_config, name, comp, store_json_file=False):
        
    global_config['logger'].debug( 'GET Team %s Score For Competition %s', name, comp )
    
    season = WebCommonUtils.map_comp_to_season(comp)
    session = DbSession.open_db_session(global_config['db_name'] + season)
    
    result = []
    result.append('{ "competition" : "%s", "team" : "%s", ' % (comp,name))
    team_scores = DataModel.getTeamScore(session, name, comp)
    if len(team_scores)==1:
        result.append('"score": "%s" }' % team_scores[0].score)
    else:
        result.append('  "score": [')
        for score in team_scores:
            result.append(score.json())
            result.append(',\n')
        if len(team_scores) > 0:
            result = result[:-1]
        result.append(']}')
        
    json_str = ''.join(result)
    
    if store_json_file is True:
        try:
            FileSync.put( global_config, '%s/EventData/TeamData/team%s_scouting_score.json' % (comp,name), 'text', json_str)
        except:
            raise
        
    session.remove()
    return json_str
Esempio n. 34
0
    def returnBananaDataset(self, N: (int, int), p: (float, float), r: float, s: float):
        """initCheckerBoardFunction

	Generates two classes similar to two bananas - semi-circle classes. One classe is mirrowed and shifted related to another.

	Parameters:
	INPUT:
		N: (int,int) tuple, indicating the number of instances of each class
		p: (float,float) a tuple responsible for shifting the center of each class
		r: the ray of the "bananas"
		s: scramble factor, a higher s means more entangled classes
	OUTPUT:
		cls1, cls2 : objects from classData with encapsulates all data from a given sub-class.

	Example:
		import LoadData
		import matplotlib.pyplot as plt
		myData = LoadData.clsWorkingData()
		cls1, cls2 = myData.returnBananaDataset((200,200), (1.0,1.0), 1.7, 0.2)
		plt.plot(cls1.data[:,0],cls1.data[:,1],'g*')
		plt.plot(cls2.data[:,0],cls2.data[:,1],'rd'),
		plt.show()

	Modified:
		(LEO) : 17/01/2016

	Version:
		v0.1

"""
        cls1 = DataModel.clsData(1, 1, 0)
        cls2 = DataModel.clsData(0, 1, 0)

        domaina = np.array(0.125 * np.pi + np.random.rand(N[0], 1) * 1.25 * np.pi)
        data1 = np.matrix(np.column_stack((r * np.sin(domaina), r * np.cos(domaina)))) + np.matrix(
            np.random.rand(N[0], 2)
        )
        cls1.setData(data1)

        domainb = np.array(0.375 * np.pi - np.random.rand(N[0], 1) * 1.25 * np.pi)
        data2 = np.matrix(np.column_stack((r * np.sin(domainb) - p[0], r * np.cos(domainb) - p[1]))) + np.matrix(
            np.random.rand(N[0], 2)
        )
        cls2.setData(data2)

        return cls1, cls2
def test3():
    (x, y) = DataModel.loadData("..\\train.csv")

    (x_train, x_cv, y_train, y_cv) = DataModel.splitData(x, y)

    x_sub = x_train[:20000,:]
    y_sub = y_train[:20000]

    s = SimpleNN2.NeuralNetConfig(784, 70, 10)

    regLambda = 6.84
    #s = Train.trainGradientDescent(s, x_sub, y_sub, 5)
    th1, th2 = Train.trainSciPy2(s, x_sub, y_sub, regLambda)
    #th1, th2 = Train.trainGradientDescent2(s, x_sub, y_sub, 5)

    acc_cv = accuracy_score(y_cv, [SimpleNN2.predictClass(s, th1, th2, w) for w in x_cv])
    print("Accuracy on CV set: {0}".format(acc_cv))
def isFileProcessed(global_config, session, db_name, filepath):
    if db_name == (global_config["db_name"] + global_config["this_season"]):
        is_processed = DataModel.isFileProcessed(session, filepath)
    elif db_name == (global_config["issues_db_name"] + global_config["this_season"]):
        is_processed = IssueTrackerDataModel.isFileProcessed(session, filepath)
    elif db_name == (global_config["debriefs_db_name"] + global_config["this_season"]):
        is_processed = DebriefDataModel.isFileProcessed(session, filepath)

    return is_processed
Esempio n. 37
0
	def __init__(self, set_id = 0, in_collision = 0, breaks = 0, data = [], obj_data = DataModel.clsData()):

		if len(obj_data.data)==0:
			DataModel.clsData.__init__(self,set_id,in_collision,breaks,data)
		else:
			DataModel.clsData.__init__(self,obj_data.set_id,obj_data.in_collision,obj_data.breaks,obj_data.data)
		
				
		self.projections = Projections.clsProjectionOverAxis(self.data,self.word_ref_center,self.pi)
def isFileProcessed(session, db_name, filepath):
    if db_name == global_config['db_name']:
        is_processed = DataModel.isFileProcessed(session, filepath)
    elif db_name == global_config['issues_db_name']:
        is_processed = IssueTrackerDataModel.isFileProcessed(session, filepath)
    elif db_name == global_config['debriefs_db_name']:
        is_processed = DebriefDataModel.isFileProcessed(session, filepath)
        
    return is_processed
Esempio n. 39
0
    def returnLineSegmentDataset(self, p: (float, float), r: float):
        """initCheckerBoardFunction


		v0.1

"""
        cls1 = DataModel.clsData(1, 1, 0)
        cls2 = DataModel.clsData(0, 1, 0)

        x = np.array(np.arange(p[0], p[1], r)).transpose()
        n = x.shape[0]
        y = np.array(x) + np.random.rand(1, n) * r
        data = np.vstack((x, y))

        cls1.setData(data.transpose())

        return cls1
def process_attr_def_form(global_config, form):
    global_config['logger'].debug( 'Process Attribute Definitions Form' )
    
    attrdef_filename = WebCommonUtils.get_attrdef_filename(global_config['this_competition'])
    if attrdef_filename is not None:
        attr_definitions = AttributeDefinitions.AttrDefinitions(global_config)
        attr_definitions.parse(attrdef_filename)
        attr_dict = attr_definitions.get_definitions()

        for key, attr_def in sorted(attr_dict.items()):
            attr_def['Weight'] = form[key].value
                            
        attr_definitions.write_attr_overrides();
        competition = global_config['this_competition'] + global_config['this_season']
        if competition == None:
            raise Exception( 'Competition Not Specified!')
            
        DataModel.recalculate_scoring(global_config, competition, attr_definitions)
Esempio n. 41
0
def setDataForMatch(match):
    m = DataModel.Match()
    f = lambda key: [
        match["alliances"]["red"][key], match["alliances"]["blue"][key]
    ]
    m.number, m.redAllianceTeamNumbers, m.blueAllianceTeamNumbers = int(
        match["match_number"]), f("teams")[0], f("teams")[1]
    m.redScore, m.blueScore, m.TIMDs = 0, 0, []
    return m
Esempio n. 42
0
def cnki_read_from_file(filename, is_test=True):
    reader = open(
        r'C:\Users\macha\iCloudDrive\Documents\DataSource\核心期刊数据\汇总\{0}'.
        format(filename),
        errors='ignore',
        encoding='gbk')
    model_list = []
    while True:
        line = reader.readline()
        if not line:
            break
        items = line.split('\t')
        # SrcDatabase Title Author Organ Source Keyword Summary PubTime FirstDuty Fund Year Period Volume Period
        # PageCount
        if items[2] == '':
            continue
        if items[0] == 'SrcDatabase-来源库':
            continue
        if is_test:
            model = DataModel.CNKIContent()
        else:
            model = DataModel.CNKIMainContent()
        model.SrcDatabase = items[0]
        model.Title = items[1]
        model.Author = process_author_list(items[2])
        organ = process_organ(items[0], items[3])
        organ_list = []
        for item in organ.split(';'):
            if re.search('\d\d\d\d\d\d', item):
                continue
            organ_list.append(item)
        model.Organ = ';'.join(organ_list)
        model.Source = items[4]
        model.Keyword = process_keyword(items[0], items[5])
        model.Summary = process_summary(items[6])
        model.PubTime = items[7]
        model.FirstDuty = items[8]
        model.Fund = items[9]
        if items[10] != '':
            model.Year = int(items[10])
        model_list.append(model)
        print('已加载{0}条数据'.format(str(len(model_list))))
    DBConnector.db_list_writer(model_list)
    print('{0}已完成导入,共{1}条记录!'.format(filename, str(len(model_list))))
def get_team_attributes_page(global_config):
        
    global_config['logger'].debug( 'GET Team Attributes' )
    
    session = DbSession.open_db_session(global_config['db_name'] + global_config['this_season'])
    comp = global_config['this_competition'] + global_config['this_season']

    attrdef_filename = WebCommonUtils.get_attrdef_filename(comp=comp)
    attr_definitions = AttributeDefinitions.AttrDefinitions(global_config)
    attr_definitions.parse(attrdef_filename)
    
    web.header('Content-Type', 'application/json')
    result = []
    result.append('{ "attributes": [\n')
    team_rankings = DataModel.getTeamsInRankOrder(session, comp)
    for team_entry in team_rankings:
        result.append("{ 'Team': " + str(team_entry.team))
        result.append(", 'Score': " + '%.2f' % team_entry.score )
        team_attributes = DataModel.getTeamAttributesInOrder(session, team_entry.team, comp)
        for attribute in team_attributes:
            attr_def = attr_definitions.get_definition( attribute.attr_name )
            if attr_def:
                weight = int(float(attr_def['Weight']))
                if weight != 0:
                    result.append( ", '" + attribute.attr_name + "': ")
                    if ( attr_def['Statistic_Type'] == 'Total'):
                        #result.append( str(attribute.cumulative_value) )
                        result.append( DataModel.mapValueToString(attribute.cumulative_value, attribute.all_values, attr_def, True) )
                    elif ( attr_def['Statistic_Type'] == 'Average'):
                        #result.append( str(attribute.avg_value) )
                        result.append( DataModel.mapValueToString(attribute.avg_value, attribute.all_values, attr_def, True) )
                    else:
                        #result.append( str(attribute.attr_value) )
                        result.append( DataModel.mapValueToString(attribute.attr_value, attribute.all_values, attr_def, True) )
                    
        result.append(' }')
        result.append(',\n')
    if len(team_rankings) > 0:
        result = result[:-1]
        result.append('\n')
    result.append(']}')
    session.remove()
    return ''.join(result)
Esempio n. 44
0
 def reset_data(self):
     self.data_model = DataModel.DataModel()
     self.data_model.logger = Logger
     self.img = None
     self.texture = None
     self.texture_stars = None
     self.focal_length = 0
     self.crop_factor = 0
     self.img_list = None
     self.output_name = "aligned.tif"
     gc.collect()
Esempio n. 45
0
 def companyList2Db(self):
     if not self.fundCompanyList:
         return
     for k, company in enumerate(self.fundCompanyList):
         item = DataModel.FundCompany(code=company[0], name=company[1])
         self.session.add(item)
         logger.debug("%d company records is saved." % (k + 1))
     try:
         self.session.commit()
     except Exception as e:
         logger.error(e.msg)
         self.session.rollback()
Esempio n. 46
0
def init_location_table():
    models: [DataModel.CNKIMainContent
             ] = DBConnector.query_all(DataModel.CNKIMainContent)
    organ_model_list = []
    for model in models:
        if not model.Organ:
            continue
        for organ in model.Organ.split(';'):
            organ_model = DataModel.CNKILocationContent()
            organ_model.organ_full_name = organ
            organ_model.ori_sid = model.sid
            organ_model_list.append(organ_model)
    DBConnector.db_list_writer(organ_model_list)
Esempio n. 47
0
 def __init__(self, competition):
     super(Calculator, self).__init__()
     warnings.simplefilter('error', RuntimeWarning)
     self.comp = competition
     self.TBAC = TBACommunicator.TBACommunicator()
     self.TBAC.eventCode = self.comp.code
     self.ourTeamNum = 1678
     self.monteCarloIterations = 100
     self.su = SchemaUtils(self.comp, self)
     self.cachedTeamDatas = {}
     self.averageTeam = DataModel.Team()
     self.averageTeam.number = -1
     self.reportedTIMDs = []
     self.averageTeam.name = 'Average Team'
     self.surrogateTIMDs = []
     self.writtenMatches = []
     self.teleGearIncrements = [0, 2, 6, 12]
     self.autoGearIncrements = [1, 3, 7, 13]
     self.gearsPerRotor = [1, 2, 4, 6]
     self.gearRangesAuto = [
         range(1, 3), range(3, 7),
         range(7, 13),
         range(13, 14)
     ]
     self.gearRangesTele = [
         range(2), range(2, 6),
         range(6, 12), range(12, 13)
     ]
     # self.lifts = ['lift1', 'lift2', 'lift3']
     self.lifts = ['allianceWall', 'hpStation', 'boiler']
     self.shotKeys = {
         'autoFuelLow': 'avgLowShotsAuto',
         'autoFuelHigh': 'avgHighShotsAuto',
         'teleopFuelLow': 'avgLowShotsTele',
         'teleopFuelHigh': 'avgHighShotsTele'
     }
     self.boilerKeys = {
         'autoFuelLow': 'lowShotTimesForBoilerAuto',
         'autoFuelHigh': 'highShotTimesForBoilerAuto',
         'teleopFuelLow': 'lowShotTimesForBoilerTele',
         'teleopFuelHigh': 'highShotTimesForBoilerTele'
     }
     self.cachedTeamDatas = {}
     self.cachedComp = cache.CachedCompetitionData()
     self.cachedTeamDatas[self.averageTeam.number] = cache.CachedTeamData(
         **{'teamNumber': self.averageTeam.number})
     for t in self.comp.teams:
         self.cachedTeamDatas[t.number] = cache.CachedTeamData(
             **{'teamNumber': t.number})
Esempio n. 48
0
 def fundList2Db(self):
     if not self.fundList:
         return
     for k, fund in enumerate(self.fundList):
         item = DataModel.FundBasic(code=fund[0],
                                    name=fund[2],
                                    type_name=fund[3],
                                    pinyin_name=fund[4],
                                    pinyin_brief=fund[1])
         self.session.add(item)
         logger.debug("%d records is saved." % (k + 1))
     try:
         self.session.commit()
     except:
         self.session.rollback()
Esempio n. 49
0
 def insert_data_info(self, file_info, folder_level1, extension, date_str,
                      url, area_result, dict_category, source_result,
                      local_dir):
     """
     封装实体
     :param file_info:
     :param extension:
     :param date_str:
     :param location:
     :param url:
     :return:
     """
     # # 4.1 [to-do]得到对应的海区ID(暂时均为默认海区)
     # area_result = self.dao.find_by_name(DataArea, 'China Sea')
     # # 4.2 [to-do]得到数据源的ID(暂时均为中国)
     # source_result = self.dao.find_by_name(DataSource, 'China')
     # # 4.3 [to-do]得到数据类型的ID(暂时均为中国)
     # category_result = self.dao.find_by_name(DataCategory, folder_level1)
     datainfoModel = DataModel.DataDataInfo()
     info = DataModel.DataDataInfo()
     info.is_delete = 0
     info.gmt_create = datetime.datetime.now()
     info.gmt_modified = datetime.datetime.now()
     info.name = file_info
     info.extensions = extension
     # [to-do]暂时没有备注
     # 使用date类型传入MySQL数据库
     info.date = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
     info.area_id = area_result.id
     info.category_id = dict_category.get(folder_level1)
     info.source_id = source_result.id
     info.url = url
     # [to-do]暂时没有FTP文件的最新修改日期
     info.size = os.path.getsize(local_dir)
     info.location = extension
     self.dao.insert_one(info)
Esempio n. 50
0
def getData():
    '''
    Gets the data from the csv file
    Returns a list of Schools
    '''
    f_open = open(FILENAME, 'r')
    #Ignore labels
    f_open.readline()
    f_open.readline()
    schools = []
    for line in f_open.readlines():
        try:
            schools.append(DataModel.School(line))
        except:
            print "Could not parse", line
    return schools
Esempio n. 51
0
	def __init__(self):
		## Configuration
		self.settings = configparser.ConfigParser()
		self.settings['settings'] = {}
		self.settings['hardware'] = { 'type': 'none' }
		self.settings['view'] = {
									'startFreq': '1000000',
									'stopFreq': '10000000',
									'showMarker': 'True',
									'colorScheme': '1',
									'colorScheme': '1',
									'dBDivIndex': '4',
									'numSamplesIndex': '2',
									'refLevel': '10'
								}

		self.model = DataModel.DataModel()
Esempio n. 52
0
def write_zhihu_csv():
    for i in range(1, 2834):
        result_list = get_top_tf_idf_word_zhihu(30, i)
        model = DataModel.ZhihuCut()
        model.sid = i
        word_list = []
        for item in result_list:
            word = item[1]
            word_list.append(word)
        model.top_word_cut = '\t'.join(word_list)
        zhihu_sid = DBConnector.get_zhihu_sid_by_cut_sid(i)
        vector_result = ZhihuProcess.vector_merge(zhihu_sid)
        if vector_result == 'empty':
            continue
        model.top_words_vector = vector_result
        DBConnector.update_zhihu_top_cut(model)
        print('完成zhihu_cut_sid={0}'.format(str(i)))
Esempio n. 53
0
def get_location_by_organ_city(organ, city):
    r = redis.Redis(host='192.168.22.197',
                    port=8173,
                    db=1,
                    encoding='gbk',
                    decode_responses=True)
    if r.exists(organ):
        return True
    params = {
        'address': organ,
        'city': city,
        'key': 'f60f13bfa6f8ebc1952e6d21b72ccd11'
    }
    url = 'https://restapi.amap.com/v3/geocode/geo?'.format(organ)
    web = requests.get(url, params=params)
    response = json.loads(web.text)
    print(response)
    response_model = DataModel.UniversityLocation()
    response_model.university_name = organ
    if response['status'] == '0' or response['count'] == '0':
        DBConnector.db_writer(response_model)
        return False
    response_model.province = response['geocodes'][0]['province']
    city = response['geocodes'][0]['city']
    if type(city) is str:
        response_model.city = city
    else:
        response_model.city = None
    district = response['geocodes'][0]['district']
    if type(district) is str:
        response_model.district = district
    else:
        response_model.district = None
    response_model.longitude = float(
        response['geocodes'][0]['location'].split(',')[0])
    response_model.latitude = float(
        response['geocodes'][0]['location'].split(',')[1])
    location_dict = dict(province=response_model.province,
                         city=response_model.city,
                         district=response_model.district,
                         longitude=response_model.longitude,
                         latitude=response_model.latitude)
    r.hmset(organ, location_dict)
    print('已添加{0}位置信息'.format(organ))
    return True
Esempio n. 54
0
def assemble_model():
    model = DataModel.Zhihu()
    model.url = text_list[0]
    model.author = text_list[1]
    model.question = text_list[2]
    model.post_date = datetime.datetime.strptime(date_str, '%Y-%m-%d')
    model.like_num = int(like)
    model.comment_num = int(comment)
    index = answer_ori.rfind('编辑于')
    if index == -1:
        index = answer_ori.rfind('发布于')
    answer = answer_ori[:index]
    result, count = re.subn('<.*?>', '', answer)
    if count == 0:
        model.answer = answer
    else:
        model.answer = result
    return model
Esempio n. 55
0
def delete_small_weibo_cut():
    db_session: sessionmaker = create_db_session(DBName.MySQL)
    new_session = db_session()
    models = new_session.query(DataModel.WeiboCut).all()
    new_session.close()
    model_list = []
    for model in models:
        model: DataModel.WeiboCut
        if model.words_vector is None:
            continue
        if len(model.word_cut.split('\t')) < 3:
            continue
        new_model = DataModel.WeiboCutNoShort()
        new_model.weibo_sid = model.weibo_sid
        new_model.weibo_content = model.weibo_content
        new_model.word_cut = model.word_cut
        new_model.words_vector = model.words_vector
        model_list.append(new_model)
    db_list_writer(model_list)
Esempio n. 56
0
    def __init__(self):

        super(RaspberryController, self).__init__()

        self.instance = None

        self.setupUi(self)

        self.setup_actions()

        self.setup_openmic_controller()

        self.data_model = DataModel.DataModel(self.agentsComboBox,
                                              self.tasksComboBox,
                                              self.showTaskAgentsComboBox)

        self.initialize_ui()

        self.show()
Esempio n. 57
0
def zhihu_word_cut():
    models = DBConnector.query_all(DataModel.ZhihuClean)
    cutter = WordCutter.WordCut(stop_word_list='stoplist_最终使用版.txt')
    cut_model_list = []
    for model in models:
        model: DataModel.ZhihuClean
        result_code, result, result_json = cutter.word_cut_baiduyun(model.answer)
        if result_code != 0:
            print(model.sid)
            print(result)
            continue
        cut_model = DataModel.ZhihuCut()
        cut_line = result
        cut_model.zhihu_sid = model.sid
        cut_model.zhihu_answer = model.answer
        cut_model.answer_word_cut = cut_line
        print(cut_line)
        cut_model_list.append(cut_model)
    DBConnector.db_list_writer(cut_model_list)
    def __init__(
            self, n=100000, border_value=1.5,
            input_parameters_below_border=(1,),
            input_parameters_above_border=(0.5,),
            output_parameters_below_border=(0.5,),
            output_parameters_above_border=(1,),
            input_intensity=(0.5,),
            output_intensity=(1,),
            input_distribution="exponential",
            output_distribution="exponential",
            input_time_distribution="exponential",
            output_time_distribution="exponential",
            ):

        # distribution law of volumes of receipt of the resource
        self._input_distribution = input_distribution

        # distribution law of volumes of resource loss
        self._output_distribution = output_distribution

        # distribution law of lengths of time intervals
        self._input_time_distribution = input_time_distribution
        self._output_time_distribution = output_time_distribution

        # parameters of distribution law of volumes of receipt of the resource (below and above S-border)
        self._input_parameters_below_border = input_parameters_below_border
        self._input_parameters_above_border = input_parameters_above_border

        # parameters of distribution law of volumes of resource loss (below and above S-border)
        self._output_parameters_below_border = output_parameters_below_border
        self._output_parameters_above_border = output_parameters_above_border

        # parameters of distribution law of lengths of time intervals
        self._input_intensity = input_intensity
        self._output_intensity = output_intensity

        # number of iterations
        self._n = n

        self._border_value = border_value

        self._data = DataModel.DataModel(n)
Esempio n. 59
0
    def sel_inventory(text_widget):
        inventory = []
        pp = DP.InventoryProcessor('Python210FinalDB.db')
        sql = pp.build_sel_code()
        for row in pp.execute_sql_code(sql):
            inventory.append(DM.Inventory(row[0], row[1]))
        pp.db_con.commit()
        pp.db_con.close()

        text_widget['state'] = 'normal'
        text_widget.delete(1.0, tk.END)

        if inventory is None:
            text_widget.insert("No data available")

        if inventory is not None:
            text_widget.insert(tk.END, "InventoryID | InventoryDate\n")
            for row in inventory:
                text_widget.insert(tk.END, str(row) + "\n")
        text_widget['state'] = 'disabled'
Esempio n. 60
0
def get_nedd_author():
    models = DBConnector.query_all(DataModel.NEDDContent)
    for model in models:
        model: DataModel.NEDDContent
        author_models = []
        for author in model.author.split(' and'):
            author_model = DataModel.NEDDAuthorContent()
            try:
                if author[0] == ' ':
                    author = author[1:]
                if author[-1] == ' ':
                    author = author[:-1]
            except IndexError:
                continue
            author_model.author = author
            author_model.unique_id = model.unique_id
            author_model.nedd_sid = model.sid
            author_models.append(author_model)
        DBConnector.db_list_writer(author_models)
        print('Finish sid = {0}'.format(model.sid))