Пример #1
0
def crawl():
    try:
        customer_list = common.get_customer_list()
        for customer in customer_list:
            today = datetime.today().date()
            today = str(today.year) + '-' + str(today.month) + '-' + str(
                today.day)
            proxies = {'http': 'http://172.30.4.18:8080'}
            req = requests.get("http://steel.today.kr/ct_list.php?stxt=" +
                               customer[0],
                               proxies=proxies)
            html = req.text
            soup = BeautifulSoup(html, 'html.parser')

            for link in soup.select('span > h3 > a'):
                session = requests.Session()
                session.headers[
                    'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
                req = session.get("http://steel.today.kr/" + link.get('href'),
                                  proxies=proxies)
                html = req.content
                soup = BeautifulSoup(html, 'html.parser')
                body = soup.select('span.Reporter_time > p')
                if body[2].get_text().find(today) != -1:
                    body = soup.select('span.news_view_text')
                    common.count(body, customer, '철강신문',
                                 "http://steel.today.kr/" + link.get('href'))
            time.sleep(0.5)
    except Exception as e:
        print(e)
Пример #2
0
 def loadNetwork(self, network, cost=None, cutoff=None, numToFind=None, searchDist=None, cutoffFld=None, numToFindFld=None, mappings=[]):
   common.progress('creating routing layer')
   if not numToFind:
     numToFind = common.count(self.places)
   self.naLayer = self.makeNALayer(common.checkFile(network), self.NA_LAY, cost, cutoff, numToFind)
   self.cost = cost
   self.placeMapper.addSilentField(cost, float)
   common.progress('calculating places\' network locations')
   self.calculateLocations(network, self.places, searchDist)
   common.progress('loading places to network')
   # create mappings
   toMappingList = common.NET_FIELDS + [(self.NAME_MAP, self.placesIDField, None)]
   for item in mappings:
     toMappingList.append(item + [None])
   fromMappingList = toMappingList[:]
   if cutoffFld:
     fromMappingList.append((self.CUTOFF_PREFIX + cost, cutoffFld, None))
   if numToFindFld:
     fromMappingList.append((self.NUM_TO_FIND_HEADER, numToFindFld, None))
   # load locations
   arcpy.AddLocations_na(self.NA_LAY, self.OD_SUBLAYERS[0], self.places, self.networkMappings(fromMappingList), '', append='clear')
   arcpy.AddLocations_na(self.NA_LAY, self.OD_SUBLAYERS[1], self.places, self.networkMappings(toMappingList), '', append='clear')
   self.routeSublayer = common.sublayer(self.naLayer, self.OUTPUT_SUBLAYER)
   self.linkMapper = conversion.LinkFieldMapper(self.routeSublayer)
   try:
     self.linkMapper.addMapField(self.OUT_COST_PREFIX + cost, cost)
   except conversion.FieldError:
     raise conversion.FieldError, 'cost attribute %s not found in network dataset' % cost
Пример #3
0
def bools(cursor, request, response):
  params = request.params
  keyword = params.get("query","")
  limit = int(params.get("limit","25"))
  offset = int(params.get("offset","0"))

  query = """\
    SELECT id, json FROM bools
  """

  args = []

  keyword = keyword.strip()
  if keyword != "":
    keywords, condition = like_condition(keyword, 'json::TEXT')
    args.extend(keywords)
    query += " WHERE {0}".format(condition)
  query += " ORDER BY id"

  all_count = count(cursor,query, args)

  query += " LIMIT %s OFFSET %s"
  args.extend([limit, offset])
  cursor.execute(query, args)
  items = []
  for row in cursor:
    id, json = row
    json["id"] = id
    items.append(json)

  data = {
    "all": all_count,
    "items": items,
  }
  return data
Пример #4
0
def rules(cursor, request, response):
    params = request.params
    limit = int(params.get("limit", "25"))
    offset = int(params.get("offset", "0"))

    query = """\
    SELECT id, json FROM rules
  """

    args, condition = rule_condition(params)
    if len(condition) > 0:
        query += " WHERE " + condition

    query += " ORDER BY id"
    all_count = count(cursor, query, args)

    query += " LIMIT %s OFFSET %s"
    args.extend([limit, offset])
    cursor.execute(query, args)
    items = []
    for row in cursor:
        id, json = row
        json["id"] = id
        items.append(json)

    data = {
        "all": all_count,
        "items": items,
    }
    return data
Пример #5
0
 def mapData(self, table, mappers=[]):
   prog = common.progressor('mapping attributes', common.count(table))
   cur = arcpy.UpdateCursor(table)
   for row in cur:
     for mapper in mappers:
       row = mapper.remap(row, row)
     cur.updateRow(row)
     prog.move()
   del cur, row
   prog.end()
Пример #6
0
 def process(self):
   # check if ID is integer - if not, create an integer field
   count = common.count(self.zones)
   idFld = self.zoneMapper.getIntIDField(setID=True, unmapPrevious=False, progressor=common.progressor('creating temporary IDs', count))
   self.zoneMapper.loadData(common.progressor('loading zone data', count))
   # generate SWM file
   common.progress('generating spatial matrix')    
   arcpy.GenerateSpatialWeightsMatrix_stats(self.zones, idFld, self.swmFile, self.method)
   common.progress('converting matrix to table')
   arcpy.ConvertSpatialWeightsMatrixtoTable_stats(self.swmFile, self.tmpTable)
   self.zoneMapper.setJoinIDFields([idFld, self.NEIGH_ID_FLD])
Пример #7
0
def miss_dict_count_before(miss_dict, score_key, predicate, last_set_only):
    cnt = 0
    for (key, mpoints) in miss_dict.items():
        if key == score_key or len(key) > len(score_key):
            break
        if last_set_only and len(score_key) != len(key):
            continue
        if len(key) < len(score_key) or (len(key) == len(score_key)
                                         and key < score_key):
            cnt += co.count(mpoints, predicate)
    return cnt
Пример #8
0
 def getGeometryDict(self, attributes={}):
   prog = common.progressor('caching geometries', common.count(self.routeSublayer))
   geomDict = {}
   inCur = arcpy.SearchCursor(self.routeSublayer)
   for inRow in inCur:
     ids = self.placeMapper.getIDs(inRow)
     geomDict[ids] = {'shape' : inRow.shape}
     for attrTo in attributes:
       geomDict[ids][attrTo] = inRow.getValue(attributes[attrTo])
     prog.move()
   prog.end()
   del inCur, inRow
   return geomDict
Пример #9
0
def processes(cursor, request, response):
    params = request.params
    limit = int(params.get("limit", "25"))
    offset = int(params.get("offset", "0"))

    query = """\
    SELECT id, json FROM processes
  """

    name = params.get("name", "")
    domain = params.get("domain", "")

    args = []
    conditions = []
    if name != "":
        conditions.append("""
      json->>'name' = %s
    """)
        args.append(name)

    if domain != "":
        conditions.append("""
      EXISTS (
        SELECT * FROM domain_crews 
        WHERE domain = %s
        AND json->'label'->>'domain' = crew
      )
    """)
        args.append(domain)

    condition = " AND ".join(conditions)
    if len(condition) > 0:
        query += " WHERE " + condition

    query += " ORDER BY id"
    all_count = count(cursor, query, args)

    query += " LIMIT %s OFFSET %s"
    args.extend([limit, offset])
    cursor.execute(query, args)
    items = []
    for row in cursor:
        id, json = row
        json["id"] = id
        items.append(json)

    data = {
        "all": all_count,
        "items": items,
    }
    return data
Пример #10
0
def filecontexts_accesable(cursor, request, response):
  params = request.params
  source = params.get("source", "")
  
  keyword = params.get("filter", "")

  offset = int(params.get("offset", "0"))
  limit = int(params.get("limit", "25"))
  
  args = [source]

  keywords, keyword_condition = like_condition(keyword, 'contexts.json::TEXT')
  args.extend(keywords)

  query = """\
    SELECT 
      DISTINCT(contexts.id), 
      contexts.json,
      (EXISTS (SELECT * FROM context_file_refs AS refs WHERE refs.context_id = contexts.id)) AS has_files
    FROM contexts
    JOIN rule_context_refs AS refs ON refs.context_id = contexts.id
    JOIN rules ON rules.id = refs.rule_id AND rules.json->>'type'='allow'
    JOIN domain_crews AS sources ON sources.crew = rules.json->>'source' AND sources.domain = %s
    WHERE {0}
    ORDER BY contexts.id
  """.format(keyword_condition)
 
  all_count = count(cursor, query, args)
  query = query + " LIMIT %s OFFSET %s "
  args.extend([limit, offset])
  
  items = []
  cursor.execute(query, args)
  for row in cursor:
    id, json, has_files = row
    json["id"] = id
    json["has_files"] = has_files
    items.append(json)

  data = {
    "all": all_count,
    "items": items,
  }
  return data
Пример #11
0
 def remapData(self, source, output, mappers=[], processor=None):
   count = common.count(source)
   if count == 0:
     common.warning('remapping source is empty, empty output generated')
     return
   prog = common.progressor('remapping records', count)
   inCur = arcpy.SearchCursor(source)
   outCur = arcpy.InsertCursor(output)
   # common.debug('remapping')
   for inRow in inCur:
     outRow = outCur.newRow()
     for mapper in mappers:
       outRow = mapper.remap(inRow, outRow, processor)
       # common.debug(mapper, outRow)
       if outRow is None:
         break
     if outRow is not None:
       outCur.insertRow(outRow)
     prog.move()
   prog.end()
   del inCur, inRow, outCur, outRow
Пример #12
0
def files(cursor, request, response):
    params = request.params
    path = params.get("path", "")
    limit = int(params.get("limit", "25"))
    offset = int(params.get("offset", "0"))
    query = """\
    WITH refs AS (
      SELECT file_id, max(context_id) AS context_id 
      FROM context_file_refs
      GROUP BY file_id
    )
    SELECT files.*, contexts.id AS context_id, contexts.json AS context 
    FROM files 
    LEFT OUTER JOIN refs ON refs.file_id = files.id
    LEFT OUTER JOIN contexts ON refs.context_id = contexts.id
  """
    args = []

    paths, condition = path_condition(path)
    args.extend(paths)
    query += " WHERE {0}".format(condition)
    query += " ORDER BY files.id"

    all_count = count(cursor, query, args)

    query += " LIMIT %s OFFSET %s"
    args.extend([limit, offset])
    cursor.execute(query, args)
    files = []
    for row in cursor:
        id, json, context_id, context = row
        json["id"] = id
        if not context_id is None:
            context["id"] = context_id

        json["context"] = context
        files.append(json)

    data = {"all": all_count, "files": files}
    return data
Пример #13
0
def files(cursor, request, response):
  params = request.params
  fcontext_id = int(params.get("fcontext", "-1"))
  path = params.get("path","")
  keyword = params.get("keyword", "")
  limit = int(params.get("limit","25"))
  offset = int(params.get("offset","0"))

  path = "{0}%".format(like_escape(path))

  query = """\
    SELECT files.* FROM files 
    JOIN context_file_refs AS refs ON refs.file_id = files.id AND refs.context_id = %s
    WHERE files.json->>'path' LIKE %s
  """
  args = [fcontext_id, path]

  keywords, condition = like_condition(keyword, 'json::TEXT')
  args.extend(keywords)
  query += " AND {0}".format(condition)
  query += " ORDER BY files.id"
  all_count = count(cursor, query, args)

  query += " LIMIT %s OFFSET %s"
  args.extend([limit, offset])
  cursor.execute(query, args)

  items = []
  for row in cursor:
    id, json = row
    json["id"] = id
    items.append(json)

  data = {
    "all": all_count,
    "items": items
  }
  return data
Пример #14
0
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import common

try:
    today = datetime.today().date()
    today = str(today.year)+'-'+str(today.month)+'-'+str(today.day)
    today = '2017-11-28'
    req = requests.get("http://www.ihalla.com/search_old.php?mode=Y&searchword=%EB%8F%99%EB%B6%80&search_type=0&s_category=T§ion=&s_day="+today+"&e_day="+today+"&x=139&y=12")
    html = req.text
    soup = BeautifulSoup(html,'html.parser')

    for link in soup.select('td.title > a'):
        session = requests.Session()
        session.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'
        req = session.get("http://www.ihalla.com"+link.get('href'))
        html = req.content
        soup = BeautifulSoup(html, 'html.parser')
        body = soup.select('div.cont_gisa')
        common.count(body)
except Exception as e:
    print(e)
Пример #15
0
def miss_dict_count_at(miss_dict, score_key, predicate):
    if score_key in miss_dict:
        return co.count(miss_dict[score_key], predicate)
    return 0
Пример #16
0
def miss_dict_count(miss_dict, key_predicate, point_predicate):
    cnt = 0
    for (key, mpoints) in miss_dict.items():
        if key_predicate(key):
            cnt += co.count(mpoints, point_predicate)
    return cnt
Calculated parameters calibrated on real interactions
B parameter value: %g
G parameter value: %g

STATISTICAL ANALYSIS

"""

with common.runtool(9) as parameters:
    interactions, selQuery, massFromFld, massToFld, interactFld, lengthFld, optimizationMethod, outputFld, reportFileName = (
        parameters
    )

    ## ASSEMBLE INPUT
    common.progress("counting interactions")
    count = common.count(interactions)
    if count == 0:
        raise ValueError, "no interactions found"
    common.message("Found " + str(count) + " interactions.")

    common.progress("loading interactions")
    modelInters = loaders.BasicReader(
        interactions,
        {"strength": interactFld, "distance": lengthFld, "massFrom": massFromFld, "massTo": massToFld},
        targetClass=modeling.GravityInteraction,
        where=selQuery,
    ).read()
    # rows = arcpy.SearchCursor(interactions, selQuery)
    # modelInters = []
    # for row in rows:
    # try:
Пример #18
0
def max_overlapping_lines(lines: list[Line]):
    return count(
        Counter(itertools.chain(*map(Line.points_between, lines))).items(),
        lambda each: each[1] > 1)
Пример #19
0
def find_increasing_triples(data: Dataset) -> Solution:
    avg_values = [first + second + third for first, second, third in triples(data)]
    return count(is_increasing, pairwise(avg_values))
Пример #20
0
def find_increasing(data: Dataset) -> Solution:
    return count(is_increasing, pairwise(data))
Пример #21
0
strLayer = 'tmp_i095'
relLayer = 'tmp_i043'

with common.runtool(7) as parameters:
  interLayer, strengthFld, lengthFld, minStrengthStr, minRelStrengthStr, maxLengthStr, output = parameters
  if minStrengthStr or maxLengthStr:
    queries = []
    if minStrengthStr:
      common.progress('assembling absolute strength exclusion')
      minStrength = common.toFloat(minStrengthStr, 'minimum absolute interaction strength')
      queries.append(common.query(interLayer, '[%s] >= %g', strengthFld, minStrength))
    if maxLengthStr:
      common.progress('assembling absolute length exclusion')
      maxLength = common.toFloat(maxLengthStr, 'maximum absolute interaction length')
      queries.append(common.query(interLayer, '[%s] <= %g', lengthFld, maxLength))
    common.selection(interLayer, strLayer, ' OR '.join(queries))
  else:
    strLayer = interLayer
  if minRelStrengthStr:
    common.progress('performing relative strength exclusion')
    minRelStrength = common.toFloat(minRelStrengthStr, 'minimum relative interaction strength')
    relQuery = common.query(interLayer, '[%s] > 0 AND ([%s] / [%s] * 1000) >= %g', lengthFld, strengthFld,
      lengthFld, minRelStrength)
    common.select(strLayer, relLayer, relQuery)
  else:
    relLayer = strLayer
  common.progress('counting selected interactions')
  common.message('%i interactions selected.' % common.count(relLayer))
  common.progress('writing output')
  common.copy(relLayer, output)
Пример #22
0
import arcpy, common, randomize

with common.runtool(3) as parameters:
  conns, sdStr, target = parameters
  sd = common.toFloat(sdStr, 'standard deviation of position change')
  common.progress('copying connections')
  arcpy.CopyFeatures_management(conns, target)
  shpFld = arcpy.Describe(target).ShapeFieldName

  prog = common.progressor('randomizing', common.count(target))
  rows = arcpy.UpdateCursor(target)
  for row in rows:
    newPt = randomize.randomizePoint(row.getValue(shpFld), sd)
    row.setValue(shpFld, newPt)
    rows.updateRow(row)
    prog.move()

  prog.end()
  del row, rows
Пример #23
0
 ## GET AND PREPARE THE ATTRIBUTES
 # obtained from the tool input
 points, ptsIDFld, weightFld, normStr, transferFldsStr, tolerStr, outPath = parameters
 location, outName = os.path.split(outPath)
 normalization = math.sqrt(common.toFloat(normStr, 'normalization value') / math.pi)
 tolerance = common.toFloat(tolerStr, 'positional tolerance value')
 transferFlds = common.parseFields(transferFldsStr)
 
 common.progress('creating weighting layer')
 common.overwrite(True)
 circLayer = common.createFeatureClass(os.path.join(location, TMP_CIRCPTS), crs=points)
 inShpFld = arcpy.Describe(points).ShapeFieldName
 circShapeFld = arcpy.Describe(circLayer).ShapeFieldName
 arcpy.AddField_management(circLayer, ptsIDFld, common.outTypeOfField(points, ptsIDFld))
 
 inCount = common.count(points)
 common.progress('opening weighting layer')
 inCur = arcpy.SearchCursor(points)
 outCur = arcpy.InsertCursor(circLayer)
 prog = common.progressor('weighting points', inCount)
 pi2 = 2 * math.pi
 for inRow in inCur:
   # load input geometry
   pt = inRow.getValue(inShpFld).getPart(0)
   id = inRow.getValue(ptsIDFld)
   coor = (pt.X, pt.Y)
   # load radius
   radius = math.sqrt(inRow.getValue(weightFld)) * normalization
   print inRow.getValue(weightFld), radius, normalization
   ptCount = max(int(pi2 * radius / tolerance), 3)
   delta = pi2 / ptCount
Пример #24
0
 def loadPlaces(self):
   self.placeMapper.loadData(common.progressor('loading place data', common.count(self.places)))