示例#1
0
文件: corp.py 项目: vanderheyde/ecm
def update():
    """
    Fetch a /corp/CorporationSheet.xml.aspx api response, parse it and store it to
    the database.
    """
    LOG.info("fetching /corp/CorporationSheet.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/CorporationSheet.xml.aspx
    corpApi = api_conn.corp.CorporationSheet(characterID=api.get_charID())
    api.check_version(corpApi._meta.version)

    currentTime = timezone.make_aware(corpApi._meta.currentTime, timezone.utc)

    LOG.debug("parsing api response...")
    corp = update_corp_info(corpApi, currentTime)

    LOG.debug("name: %s [%s]", corp.corporationName, corp.ticker)
    if corp.alliance: LOG.debug("alliance: %s <%s>", corp.alliance.name, corp.alliance.shortName)
    else: LOG.debug("alliance: None")
    LOG.debug("CEO: %s", corpApi.ceoName)
    LOG.debug("tax rate: %d%%", corp.taxRate)
    LOG.debug("member limit: %d", corp.memberLimit)

    update_hangar_divisions(corpApi, currentTime)
    update_wallet_divisions(corpApi, currentTime)
    
    LOG.info("corp info updated")
示例#2
0
def update():
    """
    Fetch a /corp/ContactList.xml.aspx api response, parse it and store it to
    the database.
    """
    LOG.info("fetching /corp/ContactList.xml.aspx...")
    api_conn = api.connect()
    corpApi = api_conn.corp.ContactList(characterID=api.get_charID())
    api.check_version(corpApi._meta.version)
    currentTime = timezone.make_aware(corpApi._meta.currentTime, timezone.utc)
    
    my_corp = Corporation.objects.mine()
    
    # clean existing standings first
    Standing.objects.filter(corp=my_corp).delete()
    
    for contact in corpApi.corporateContactList:
        Standing.objects.create(corp=my_corp,
                                contactID=contact.contactID,
                                is_corp_contact=True,
                                contactName=contact.contactName,
                                value=contact.standing,
                                )
    
    for contact in corpApi.allianceContactList: 
        Standing.objects.create(corp=my_corp,
                                contactID=contact.contactID,
                                is_corp_contact=False,
                                contactName=contact.contactName,
                                value=contact.standing,
                                )
        
    UpdateDate.mark_updated(model=Standing, date=currentTime)
    LOG.info("corp standings updated")
示例#3
0
文件: assets.py 项目: vanderheyde/ecm
def update_assets_names():
    LOG.debug('Updating player defined names...')

    assets_to_name = Asset.objects.filter(name=None,
                                          container1=None,
                                          container2=None,
                                          singleton=True,
                                          hasContents=True,
                                          quantity=1).values_list('itemID',
                                                                  flat=True)
    api_conn = api.connect()
    named_assets = []
    # Fetch all the x,y,z positions of the assets from the API
    for sub_list in tools.sublists(assets_to_name,
                                   sub_length=50):  # max 50 items per request
        LOG.debug('fetching /corp/Locations.xml.aspx...')
        ids = ','.join(map(str, sub_list))
        try:
            locations_api = api_conn.corp.Locations(
                characterID=api.get_charID(), ids=ids)
            for loc in locations_api.locations:
                named_assets.append((loc.itemID, loc.itemName))
        except api.Error, err:
            # error can happen if a ship/asset found in a SMA/CHA does not belong to the corp
            LOG.warning(
                '%s (code %s). Item IDs: %s (names will not be retrieved for these items).',
                err.code, str(err), ids)
示例#4
0
文件: standings.py 项目: kriberg/ecm
def update():
    """
    Fetch a /corp/ContactList.xml.aspx api response, parse it and store it to
    the database.
    """
    LOG.info("fetching /corp/ContactList.xml.aspx...")
    api_conn = api.connect()
    corpApi = api_conn.corp.ContactList(characterID=api.get_charID())
    api.check_version(corpApi._meta.version)
    currentTime = timezone.make_aware(datetime.utcfromtimestamp( \
            corpApi._meta.currentTime), timezone.utc)
    
    my_corp = Corporation.objects.mine()
    
    # clean existing standings first
    Standing.objects.filter(corp=my_corp).delete()
    
    for contact in corpApi.corporateContactList:
        Standing.objects.create(corp=my_corp,
                                contactID=contact.contactID,
                                is_corp_contact=True,
                                contactName=contact.contactName,
                                value=contact.standing,
                                )
    
    for contact in corpApi.allianceContactList:
        Standing.objects.create(corp=my_corp,
                                contactID=contact.contactID,
                                is_corp_contact=False,
                                contactName=contact.contactName,
                                value=contact.standing,
                                )
        
    UpdateDate.mark_updated(model=Standing, date=currentTime)
    LOG.info("corp standings updated")
示例#5
0
def update():
    """
    Retrieve all corp members' titles and roles.
    We store all the changes in the database

    If there's an error, nothing is written in the database
    """
    LOG.info("fetching /corp/MemberSecurity.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/MemberTracking.xml.aspx
    memberSecuApi = api_conn.corp.MemberSecurity(characterID=api.get_charID())
    api.check_version(memberSecuApi._meta.version)

    currentTime = timezone.make_aware(memberSecuApi._meta.currentTime, timezone.utc)
    cachedUntil = timezone.make_aware(memberSecuApi._meta.cachedUntil, timezone.utc)
    LOG.debug("current time : %s", str(currentTime))
    LOG.debug("cached util : %s", str(cachedUntil))

    LOG.debug("parsing api response...")
    oldRoles  = {}
    oldTitles = {}

    # we fetch the old data from the database
    for role in RoleMembership.objects.all():
        oldRoles[role] = role
    for title in TitleMembership.objects.all():
        oldTitles[title] = title

    newRoles  = {}
    newTitles = {}

    # for performance, we fetch all the Roles & RoleTypes here
    allRoleTypes = RoleType.objects.all()
    allRoles = {}
    for role in Role.objects.all():
        allRoles[(role.roleID, role.roleType_id)] = role
        
    my_corp = Corporation.objects.mine()
    
    all_members = Member.objects.all()
    
    for member in memberSecuApi.members:
        if all_members.filter(characterID=member.characterID):
            # only get roles/titles for existing members.
            newRoles.update(parseOneMemberRoles(member, allRoleTypes, allRoles))
            newTitles.update(parseOneMemberTitles(member, my_corp))

    # Store role changes
    roleDiffs = storeRoles(oldRoles, newRoles, currentTime)

    # Store title changes
    titleDiffs = storeTitles(oldTitles, newTitles, currentTime)
    LOG.info("%d role changes, %d title changes", roleDiffs, titleDiffs)

    # update members access levels
    for m in Member.objects.all():
        m.accessLvl = m.get_access_lvl()
        m.save()
示例#6
0
def update():
    """
    Retrieve all corp members' titles and roles.
    We store all the changes in the database

    If there's an error, nothing is written in the database
    """
    LOG.info("fetching /corp/MemberSecurity.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/MemberTracking.xml.aspx
    memberSecuApi = api_conn.corp.MemberSecurity(characterID=api.get_charID())
    api.check_version(memberSecuApi._meta.version)

    currentTime = timezone.make_aware(memberSecuApi._meta.currentTime, timezone.utc)
    cachedUntil = timezone.make_aware(memberSecuApi._meta.cachedUntil, timezone.utc)
    LOG.debug("current time : %s", str(currentTime))
    LOG.debug("cached util : %s", str(cachedUntil))

    LOG.debug("parsing api response...")
    oldRoles  = {}
    oldTitles = {}

    # we fetch the old data from the database
    for role in RoleMembership.objects.all():
        oldRoles[role] = role
    for title in TitleMembership.objects.all():
        oldTitles[title] = title

    newRoles  = {}
    newTitles = {}

    # for performance, we fetch all the Roles & RoleTypes here
    allRoleTypes = RoleType.objects.all()
    allRoles = {}
    for role in Role.objects.all():
        allRoles[(role.roleID, role.roleType_id)] = role
        
    my_corp = Corporation.objects.mine()
    
    all_members = Member.objects.all()
    
    for member in memberSecuApi.members:
        if all_members.filter(characterID=member.characterID):
            # only get roles/titles for existing members.
            newRoles.update(parseOneMemberRoles(member, allRoleTypes, allRoles))
            newTitles.update(parseOneMemberTitles(member, my_corp))

    # Store role changes
    roleDiffs = storeRoles(oldRoles, newRoles, currentTime)

    # Store title changes
    titleDiffs = storeTitles(oldTitles, newTitles, currentTime)
    LOG.info("%d role changes, %d title changes", roleDiffs, titleDiffs)

    # update members access levels
    for m in Member.objects.all():
        m.accessLvl = m.get_access_lvl()
        m.save()
示例#7
0
文件: titles.py 项目: vanderheyde/ecm
def update():
    """
    Retrieve all corp titles, their names and their role composition.
    If there are changes in the composition of the titles,
    the changes are also stored in the database.

    If there's an error, nothing is written in the database
    """
    logger.info("fetching /corp/Titles.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/Titles.xml.aspx
    titlesApi = api_conn.corp.Titles(characterID=api.get_charID())
    api.check_version(titlesApi._meta.version)

    currentTime = timezone.make_aware(titlesApi._meta.currentTime, timezone.utc)
    cachedUntil = timezone.make_aware(titlesApi._meta.cachedUntil, timezone.utc)
    logger.debug("current time : %s", str(currentTime))
    logger.debug("cached util : %s", str(cachedUntil))

    logger.debug("parsing api response...")
    
    my_corp = Corporation.objects.mine()
    
    newList = []
    # we get all the old TitleComposition from the database
    oldList = list(TitleComposition.objects.all())

    for title in titlesApi.titles:
        newList.extend(parse_one_title(title, my_corp))

    diffs = []
    if len(oldList) != 0 :
        diffs = getDiffs(newList, oldList, currentTime)
        if diffs :
            for d in diffs: d.save()
            # we store the update time of the table
            UpdateDate.mark_updated(model=TitleCompoDiff, date=currentTime)

            TitleComposition.objects.all().delete()
            for c in newList: c.save()
            # we store the update time of the table
            UpdateDate.mark_updated(model=TitleComposition, date=currentTime)
        # if no diff, we do nothing
    else:
        # 1st import
        for c in newList: c.save()
        # we store the update time of the table
        UpdateDate.mark_updated(model=TitleComposition, date=currentTime)

    # update titles access levels
    for t in Title.objects.all():
        t.accessLvl = t.get_access_lvl()
        t.save()

    logger.info("%d roles in titles parsed, %d changes since last scan", len(newList), len(diffs))
示例#8
0
文件: titles.py 项目: evecm/ecm
def update():
    """
    Retrieve all corp titles, their names and their role composition.
    If there are changes in the composition of the titles,
    the changes are also stored in the database.

    If there's an error, nothing is written in the database
    """
    logger.info("fetching /corp/Titles.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/Titles.xml.aspx
    titlesApi = api_conn.corp.Titles(characterID=api.get_charID())
    api.check_version(titlesApi._meta.version)

    currentTime = timezone.make_aware(titlesApi._meta.currentTime, timezone.utc)
    cachedUntil = timezone.make_aware(titlesApi._meta.cachedUntil, timezone.utc)
    logger.debug("current time : %s", str(currentTime))
    logger.debug("cached util : %s", str(cachedUntil))

    logger.debug("parsing api response...")
    
    my_corp = Corporation.objects.mine()
    
    newList = []
    # we get all the old TitleComposition from the database
    oldList = list(TitleComposition.objects.all())

    for title in titlesApi.titles:
        newList.extend(parse_one_title(title, my_corp))

    diffs = []
    if len(oldList) != 0 :
        diffs = getDiffs(newList, oldList, currentTime)
        if diffs :
            for d in diffs: d.save()
            # we store the update time of the table
            UpdateDate.mark_updated(model=TitleCompoDiff, date=currentTime)

            TitleComposition.objects.all().delete()
            for c in newList: c.save()
            # we store the update time of the table
            UpdateDate.mark_updated(model=TitleComposition, date=currentTime)
        # if no diff, we do nothing
    else:
        # 1st import
        for c in newList: c.save()
        # we store the update time of the table
        UpdateDate.mark_updated(model=TitleComposition, date=currentTime)

    # update titles access levels
    for t in Title.objects.all():
        t.accessLvl = t.get_access_lvl()
        t.save()

    logger.info("%d roles in titles parsed, %d changes since last scan", len(newList), len(diffs))
示例#9
0
def fetch_transaction_entries(wallet, lastKnownID):
    api_conn = api.connect()

    LOG.info("fetching /corp/WalletTransactions.xml.aspx "
             "(accountKey=%d)..." % wallet.walletID)
    charID = api.get_charID()

    # In Iceland an empty wallet causes errors....
    try:
        walletsApi = api_conn.corp.WalletTransactions(
            characterID=charID, accountKey=wallet.walletID, rowCount=256)
        api.check_version(walletsApi._meta.version)

        transactions = list(walletsApi.transactions)
        if len(transactions) > 0:
            minID = min([e.transactionID for e in walletsApi.transactions])
        else:
            minID = 0

        # after the first fetch, we perform "journal walking"
        # only if we got 256 transactions in the response (meaning more to come)
        # or if the lastKnownID is in the current 256 transactions
        # (transactions are supposed to be sorted by decreasing refIDs)
        while len(walletsApi.transactions) == 256 and minID > lastKnownID:
            LOG.info("fetching /corp/WalletTransactions.xml.aspx "
                     "(accountKey=%d, fromID=%d)..." %
                     (wallet.walletID, minID))
            walletsApi = api_conn.corp.WalletTransactions(
                characterID=charID,
                accountKey=wallet.walletID,
                fromID=minID,
                rowCount=256)
            api.check_version(walletsApi._meta.version)
            transactions.extend(list(walletsApi.transactions))
            if len(walletsApi.transactions) > 0:
                minID = min([e.transactionID for e in walletsApi.transactions])

        # we sort the transactions by increasing refIDs in order to remove
        # the ones we already have in the database
        transactions.sort(key=lambda e: e.transactionID)

        while len(transactions
                  ) != 0 and transactions[0].transactionID <= lastKnownID:
            # we already have this entry, no need to keep it
            del transactions[0]

        return transactions
    except api.Error, e:
        LOG.error(
            "API returned: %s. WalletTransactions for account key %s might be empty."
            % (str(e), wallet.walletID))
        return ''
示例#10
0
def fetch_transaction_entries(wallet, lastKnownID):
    api_conn = api.connect()

    LOG.info("fetching /corp/WalletTransactions.xml.aspx "
                "(accountKey=%d)..." % wallet.walletID)
    charID = api.get_charID()

    # In Iceland an empty wallet causes errors....
    try:
        walletsApi = api_conn.corp.WalletTransactions(characterID=charID,
                                                      accountKey=wallet.walletID,
                                                      rowCount=256)
        api.check_version(walletsApi._meta.version)

        transactions = list(walletsApi.transactions)
        if len(transactions) > 0:
            minID = min([e.transactionID for e in walletsApi.transactions])
        else:
            minID = 0

        # after the first fetch, we perform "journal walking"
        # only if we got 256 transactions in the response (meaning more to come)
        # or if the lastKnownID is in the current 256 transactions
        # (transactions are supposed to be sorted by decreasing refIDs)
        while len(walletsApi.transactions) == 256 and minID > lastKnownID:
            LOG.info("fetching /corp/WalletTransactions.xml.aspx "
                        "(accountKey=%d, fromID=%d)..." % (wallet.walletID, minID))
            walletsApi = api_conn.corp.WalletTransactions(characterID=charID,
                                                          accountKey=wallet.walletID,
                                                          fromID=minID,
                                                          rowCount=256)
            api.check_version(walletsApi._meta.version)
            transactions.extend(list(walletsApi.transactions))
            if len(walletsApi.transactions) > 0:
                minID = min([e.transactionID for e in walletsApi.transactions])

        # we sort the transactions by increasing refIDs in order to remove
        # the ones we already have in the database
        transactions.sort(key=lambda e: e.transactionID)

        while len(transactions) != 0 and transactions[0].transactionID <= lastKnownID:
            # we already have this entry, no need to keep it
            del transactions[0]

        return transactions
    except api.Error, e:
        LOG.error("API returned: %s. WalletTransactions for account key %s might be empty."
                  % (str(e), wallet.walletID))
        return ''
示例#11
0
文件: pos.py 项目: vanderheyde/ecm
def update():
    """
    Retrieve all POS informations
    First : get the POS list using StarbaseList
    Then : retreive information of each of them using StarbaseDetails
    And update the database.

    If there's an error, nothing is written in the database
    If the cache date didnot change ...ignore...
    """
    conn = api.connect()
    charID = api.get_charID()

    logger.info("fetching /corp/StarbaseList.xml.aspx...")
    apiPOSList = conn.corp.StarbaseList(characterID=charID)
    api.check_version(apiPOSList._meta.version)

    newPOSes = 0
    updatedPOSes = 0
    sov = get_sov_info(conn.map.Sovereignty().solarSystems)
    oldPOSesIDs = list(POS.objects.all().values_list('item_id', flat=True))
    for row in apiPOSList.starbases:
        pos, created = POS.objects.get_or_create(item_id=row.itemID)
        if created:
            newPOSes += 1
        else:
            oldPOSesIDs.remove(row.itemID)
            updatedPOSes += 1
        get_basic_info(pos, row)

        logger.info("fetching /corp/StarbaseDetail.xml.aspx?itemID=%d..." % row.itemID)
        apiCurPOS = conn.corp.StarbaseDetail(characterID=charID,
                                             itemID=row.itemID)
        
        
        cached_until = timezone.make_aware(apiCurPOS._meta.cachedUntil, timezone.utc)
        if cached_until != pos.cached_until:
            pos.cached_until = cached_until
            get_details(pos, apiCurPOS, sov)
        else:
            logger.info("POS %s is cached until %s: no update required",
                        row.itemID, pos.cached_until)
        pos.save()

    # if this list is not empty, it means that some POSes have disapeared since last scan.
    if len(oldPOSesIDs) > 0:
        POS.objects.filter(item_id__in=oldPOSesIDs).delete()

    logger.info("%d POS updated, %d new, %d removed", updatedPOSes, newPOSes, len(oldPOSesIDs))
示例#12
0
文件: assets.py 项目: Betriebsrat/ecm
def update_assets_locations(assets_to_locate):
    LOG.debug('Locating assets to their closest celestial object...')

    api_conn = api.connect()
    located_assets = []
    # Fetch all the x,y,z positions of the assets from the API
    for sub_list in tools.sublists(assets_to_locate, sub_length=50): # max 50 items per request
        LOG.debug('fetching /corp/Locations.xml.aspx...')
        ids = ','.join(map(str, sub_list))
        try:
            locations_api = api_conn.corp.Locations(characterID=api.get_charID(), ids=ids)
            for loc in locations_api.locations:
                located_assets.append((loc.itemID, loc.itemName, loc.x, loc.y, loc.z))
        except api.Error, err:
            # error can happen if a ship/asset found in a SMA/CHA does not belong to the corp
            LOG.warning('%s (code %s). Item IDs: %s (names will not be retrieved for these items).',
                        err.code, str(err), ids)
示例#13
0
文件: assets.py 项目: vanderheyde/ecm
def update_assets_locations(assets_to_locate):
    LOG.debug('Locating assets to their closest celestial object...')

    api_conn = api.connect()
    located_assets = []
    # Fetch all the x,y,z positions of the assets from the API
    for sub_list in tools.sublists(assets_to_locate,
                                   sub_length=50):  # max 50 items per request
        LOG.debug('fetching /corp/Locations.xml.aspx...')
        ids = ','.join(map(str, sub_list))
        try:
            locations_api = api_conn.corp.Locations(
                characterID=api.get_charID(), ids=ids)
            for loc in locations_api.locations:
                located_assets.append(
                    (loc.itemID, loc.itemName, loc.x, loc.y, loc.z))
        except api.Error, err:
            # error can happen if a ship/asset found in a SMA/CHA does not belong to the corp
            LOG.warning(
                '%s (code %s). Item IDs: %s (names will not be retrieved for these items).',
                err.code, str(err), ids)
示例#14
0
文件: assets.py 项目: Betriebsrat/ecm
def update_assets_names():
    LOG.debug('Updating player defined names...')

    assets_to_name = Asset.objects.filter(name=None,
                                          container1=None,
                                          container2=None,
                                          singleton=True,
                                          hasContents=True,
                                          quantity=1).values_list('itemID', flat=True)
    api_conn = api.connect()
    named_assets = []
    # Fetch all the x,y,z positions of the assets from the API
    for sub_list in tools.sublists(assets_to_name, sub_length=50): # max 50 items per request
        LOG.debug('fetching /corp/Locations.xml.aspx...')
        ids = ','.join(map(str, sub_list))
        try:
            locations_api = api_conn.corp.Locations(characterID=api.get_charID(), ids=ids)
            for loc in locations_api.locations:
                named_assets.append((loc.itemID, loc.itemName))
        except api.Error, err:
            # error can happen if a ship/asset found in a SMA/CHA does not belong to the corp
            LOG.warning('%s (code %s). Item IDs: %s (names will not be retrieved for these items).',
                        err.code, str(err), ids)
示例#15
0
文件: assets.py 项目: vanderheyde/ecm
def update():
    """
    Retrieve all corp assets and calculate the changes.

    If there's an error, nothing is written in the database
    """
    global IGNORE_CONTAINERS_VOLUMES

    LOG.info("fetching /corp/AssetList.xml.aspx...")
    api_conn = api.connect()
    apiAssets = api_conn.corp.AssetList(characterID=api.get_charID())
    api.check_version(apiAssets._meta.version)

    currentTime = timezone.make_aware(apiAssets._meta.currentTime,
                                      timezone.utc)
    cachedUntil = timezone.make_aware(apiAssets._meta.cachedUntil,
                                      timezone.utc)
    LOG.debug("current time : %s", str(currentTime))
    LOG.debug("cached util : %s", str(cachedUntil))

    LOG.debug("fetching old assets from the database...")
    old_items = {}
    for asset in Asset.objects.all():
        old_items[asset] = asset

    new_items = {}
    LOG.debug("%d assets fetched", len(old_items.keys()))

    IGNORE_CONTAINERS_VOLUMES = Setting.get('assets_ignore_containers_volumes')

    # we store the itemIDs of all the assets we want to locate
    # then query /corp/Locations.xml with the list
    assets_to_locate = []

    fill_cache()

    LOG.debug("parsing api response...")
    for row in apiAssets.assets:
        if row.typeID == cst.BOOKMARK_TYPEID:
            continue  # we don't give a flying @#!$ about the bookmarks...

        if row.locationID >= cst.STATIONS_IDS:
            # this row contains assets in a station
            if row.typeID == cst.OFFICE_TYPEID:
                row_is_office(office=row, items_dic=new_items)
            else:
                row_is_in_hangar(item=row, items_dic=new_items)
        else:
            # this row contains assets in space
            try:
                if cst.HAS_HANGAR_DIVISIONS[row.typeID]:
                    row_is_pos_corp_hangar(corpArray=row, items_dic=new_items)
                else:
                    row_is_pos_array(array=row, items_dic=new_items)
                assets_to_locate.append(row.itemID)
            except KeyError:
                # unhandled typeID, this may be a reactor array or some other crap
                pass

    LOG.info("%d assets parsed", len(new_items))

    clear_cache()

    # I grouped all the DB writes here so that it doesn't make a too long DB transaction.
    # The assets parsing can last more than 10 minutes on slow servers.
    write_results(new_items, old_items, assets_to_locate, currentTime)

    diffs = []
    if old_items:
        LOG.debug("computing diffs since last asset scan...")
        diffs = calc_assets_diff(old_items=old_items,
                                 new_items=new_items,
                                 date=currentTime)

    if diffs:
        write_diff_results(diffs, currentTime)
示例#16
0
def update():
    """
    Retrieve all corp members, with all basic information about them.
    If some members have left or have arrived we also store the diff in the database.

    If there's an error, nothing is written in the database
    """
    LOG.info("fetching /corp/MemberTracking.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/MemberTracking.xml.aspx
    membersApi = api_conn.corp.MemberTracking(characterID=api.get_charID(), extended=1)
    api.check_version(membersApi._meta.version)

    currentTime = timezone.make_aware(membersApi._meta.currentTime, timezone.utc)
    cachedUntil = timezone.make_aware(membersApi._meta.cachedUntil, timezone.utc)
    LOG.debug("current time : %s", str(currentTime))
    LOG.debug("cached util : %s", str(cachedUntil))

    LOG.debug("parsing api response...")
    newMembers = {}
    oldMembers = {}
    notCorped  = {}
    oldAccessLvls = {}
    oldOwners = {}
    my_corp = Corporation.objects.mine()

    # we get the old member list from the database
    for m in Member.objects.all():
        if m.corp == my_corp:
            oldMembers[m] = m
        else:
            notCorped[m] = m
        oldAccessLvls[m.characterID] = m.accessLvl
        oldOwners[m.characterID] = m.owner

    for member in membersApi.members:
        m = parseOneMember(member, my_corp)
        session = MemberSession(character_id = m.characterID,
                                session_begin = m.lastLogin,
                                session_end = m.lastLogoff,
                                session_seconds = (member.logoffDateTime-member.logonDateTime).seconds)
        
        dbsession = MemberSession.objects.filter(character_id = m.characterID,
                                                 session_begin = m.lastLogin)
        if len(dbsession) == 0:
            session.save()
        newMembers[m] = m

    diffs, leaved = getDiffs(oldMembers, newMembers, currentTime)
    # "leaved" is the list of members that leaved (not a list of MemberDiff but real Character objects)
    # If we delete the old members each time, then all the diffs in roles/titles will not match
    # as the foreign keys will be gone from the members table...
    for L in leaved:
        L.corp = None
        newMembers[L] = L

    LOG.info("%d members parsed, %d changes since last scan", len(newMembers), len(diffs))

    for m in notCorped.values():
        try:
            # if the previously "not corped" members can now be found in the "new members"
            # we do nothing
            newMembers[m]
        except KeyError:
            # if the previously "not corped" members still cannot be found in the "new members"
            # we add them again to the members list
            newMembers[m] = m

    for m in newMembers.values():
        try:
            # we restore the old access levels from the database
            m.accessLvl = oldAccessLvls[m.characterID]
            m.owner = oldOwners[m.characterID]
        except KeyError:
            # 'm' is a brand new member, his/her access level didn't exist before
            # we leave it to the default value '0'
            continue
    
    for m in newMembers.values():
        # to be sure to store the nicknames change, etc.
        # even if there are no diff, we always overwrite the members
        m.save()

    if len(oldMembers) > 0 and len(diffs) > 0 :
        for d in diffs:
            d.save()
        # we store the update time of the table
        UpdateDate.mark_updated(model=MemberDiff, date=currentTime)

    # we store the update time of the table
    UpdateDate.mark_updated(model=Member, date=currentTime)
示例#17
0
文件: assets.py 项目: Betriebsrat/ecm
def update():
    """
    Retrieve all corp assets and calculate the changes.

    If there's an error, nothing is written in the database
    """
    global IGNORE_CONTAINERS_VOLUMES

    LOG.info("fetching /corp/AssetList.xml.aspx...")
    api_conn = api.connect()
    apiAssets = api_conn.corp.AssetList(characterID=api.get_charID())
    api.check_version(apiAssets._meta.version)

    currentTime = timezone.make_aware(apiAssets._meta.currentTime, timezone.utc)
    cachedUntil = timezone.make_aware(apiAssets._meta.cachedUntil, timezone.utc)
    LOG.debug("current time : %s", str(currentTime))
    LOG.debug("cached util : %s", str(cachedUntil))

    LOG.debug("fetching old assets from the database...")
    old_items = {}
    for asset in Asset.objects.all():
        old_items[asset] = asset

    new_items = {}
    LOG.debug("%d assets fetched", len(old_items.keys()))

    IGNORE_CONTAINERS_VOLUMES = Setting.get('assets_ignore_containers_volumes')

    # we store the itemIDs of all the assets we want to locate
    # then query /corp/Locations.xml with the list
    assets_to_locate = []

    fill_cache()

    LOG.debug("parsing api response...")
    for row in apiAssets.assets:
        if row.typeID == cst.BOOKMARK_TYPEID:
            continue # we don't give a flying @#!$ about the bookmarks...

        if row.locationID >= cst.STATIONS_IDS:
            # this row contains assets in a station
            if row.typeID == cst.OFFICE_TYPEID:
                row_is_office(office=row, items_dic=new_items)
            else:
                row_is_in_hangar(item=row, items_dic=new_items)
        else:
            # this row contains assets in space
            try:
                if cst.HAS_HANGAR_DIVISIONS[row.typeID]:
                    row_is_pos_corp_hangar(corpArray=row, items_dic=new_items)
                else:
                    row_is_pos_array(array=row, items_dic=new_items)
                assets_to_locate.append(row.itemID)
            except KeyError:
                # unhandled typeID, this may be a reactor array or some other crap
                pass

    LOG.info("%d assets parsed", len(new_items))

    clear_cache()

    # I grouped all the DB writes here so that it doesn't make a too long DB transaction.
    # The assets parsing can last more than 10 minutes on slow servers.
    write_results(new_items, old_items, assets_to_locate, currentTime)

    diffs = []
    if old_items:
        LOG.debug("computing diffs since last asset scan...")
        diffs = calc_assets_diff(old_items=old_items, new_items=new_items, date=currentTime)

    if diffs:
        write_diff_results(diffs, currentTime)
示例#18
0
def update():
    """
    Retrieve all corp members, with all basic information about them.
    If some members have left or have arrived we also store the diff in the database.

    If there's an error, nothing is written in the database
    """
    LOG.info("fetching /corp/MemberTracking.xml.aspx...")
    # connect to eve API
    api_conn = api.connect()
    # retrieve /corp/MemberTracking.xml.aspx
    membersApi = api_conn.corp.MemberTracking(characterID=api.get_charID(),
                                              extended=1)
    api.check_version(membersApi._meta.version)

    currentTime = timezone.make_aware(membersApi._meta.currentTime,
                                      timezone.utc)
    cachedUntil = timezone.make_aware(membersApi._meta.cachedUntil,
                                      timezone.utc)
    LOG.debug("current time : %s", str(currentTime))
    LOG.debug("cached util : %s", str(cachedUntil))

    LOG.debug("parsing api response...")
    newMembers = {}
    oldMembers = {}
    notCorped = {}
    oldAccessLvls = {}
    oldOwners = {}
    my_corp = Corporation.objects.mine()

    # we get the old member list from the database
    for m in Member.objects.all():
        if m.corp == my_corp:
            oldMembers[m] = m
        else:
            notCorped[m] = m
        oldAccessLvls[m.characterID] = m.accessLvl
        oldOwners[m.characterID] = m.owner

    for member in membersApi.members:
        m = parseOneMember(member, my_corp)
        session = MemberSession(character_id=m.characterID,
                                session_begin=m.lastLogin,
                                session_end=m.lastLogoff,
                                session_seconds=(member.logoffDateTime -
                                                 member.logonDateTime).seconds)

        dbsession = MemberSession.objects.filter(character_id=m.characterID,
                                                 session_begin=m.lastLogin)
        if len(dbsession) == 0:
            session.save()
        newMembers[m] = m

    diffs, leaved = getDiffs(oldMembers, newMembers, currentTime)
    # "leaved" is the list of members that leaved (not a list of MemberDiff but real Character objects)
    # If we delete the old members each time, then all the diffs in roles/titles will not match
    # as the foreign keys will be gone from the members table...
    for L in leaved:
        L.corp = None
        newMembers[L] = L

    LOG.info("%d members parsed, %d changes since last scan", len(newMembers),
             len(diffs))

    for m in notCorped.values():
        try:
            # if the previously "not corped" members can now be found in the "new members"
            # we do nothing
            newMembers[m]
        except KeyError:
            # if the previously "not corped" members still cannot be found in the "new members"
            # we add them again to the members list
            newMembers[m] = m

    for m in newMembers.values():
        try:
            # we restore the old access levels from the database
            m.accessLvl = oldAccessLvls[m.characterID]
            m.owner = oldOwners[m.characterID]
        except KeyError:
            # 'm' is a brand new member, his/her access level didn't exist before
            # we leave it to the default value '0'
            continue

    for m in newMembers.values():
        # to be sure to store the nicknames change, etc.
        # even if there are no diff, we always overwrite the members
        m.save()

    if len(oldMembers) > 0 and len(diffs) > 0:
        for d in diffs:
            d.save()
        # we store the update time of the table
        UpdateDate.mark_updated(model=MemberDiff, date=currentTime)

    # we store the update time of the table
    UpdateDate.mark_updated(model=Member, date=currentTime)