コード例 #1
0
def writePricing(csv, resort, date, stayPrice, ticketPrice, bookingURL):
    length = len(resort)
    i = 0
    while i < length:
        fields = (resort[i], date, stayPrice[i], ticketPrice[i], bookingURL[i])
        csv.writerow(fields)
        i = i + 1
コード例 #2
0
def writeWeather(csv, resort, updateDate, snowDepth, status):
    length = len(resort)
    i = 0
    while i < length:
        fields = (resort[i], updateDate[i], snowDepth[i], status[i])
        csv.writerow(fields)
        i = i + 1
コード例 #3
0
ファイル: file_exchange.py プロジェクト: Sk1f161/ERP
    def create_exchange_file(self, cr, uid, id, context=None):
        if isinstance(id,list):
            id = id[0]
        output_file = TemporaryFile('w+b')
        fieldnames = ['id', 'name','referential_id:id', 'type','do_not_update', 'mapping_template_id:id', 'encoding', 'format', 'search_filter','delimiter', 'folder_path', 'archive_folder_path', 'filename', 'action_before_all', 'action_after_all', 'action_before_each', 'action_after_each', 'check_if_import', 'pre_processing']
        csv = FileCsvWriter(output_file, fieldnames, encoding="utf-8", writeheader=True, delimiter=',', quotechar='"')
        current_file = self.browse(cr, uid, id, context=context)
        row = {
            'id': current_file.get_absolute_id(context=context),
            'name': current_file.name,
            'referential_id:id':current_file.referential_id.get_absolute_id(context=context),
            'type': current_file.type,
            'do_not_update': str(current_file.do_not_update),
            'mapping_template_id:id': current_file.mapping_id.get_absolute_id(context=context),
            'encoding': current_file.encoding,
            'format': current_file.format,
            'search_filter': current_file.search_filter or '',
#            'lang'
            'delimiter': current_file.delimiter,
            'folder_path': current_file.folder_path or '',
            'archive_folder_path': current_file.archive_folder_path or '',
            'filename': current_file.filename,
            'action_before_all': current_file.action_before_all or '',
            'action_after_all': current_file.action_after_all or '',
            'action_before_each': current_file.action_before_each or '',
            'action_after_each': current_file.action_after_each or '',
            'check_if_import': current_file.check_if_import or '',
            'pre_processing': current_file.pre_processing or '',
        }
        csv.writerow(row)
        return self.pool.get('pop.up.file').open_output_file(cr, uid, 'file.exchange.csv', output_file, 'File Exchange Export', context=context)
コード例 #4
0
def writeOutput(rows, ofile):
    """ Function that will write the output file for Cybersource """
    csv = open_csv(ofile, "w",
                   config.get('OrderGroove', 'outputColumnNames').split(','))
    csv.writeheader()
    for k in rows.keys():
        csv.writerow(rows[k])
コード例 #5
0
def writeFlight(csv, city, resort, date, trip):
    length = len(resort)
    i = 0
    while i < length:
        fields = (city, resort[i], date, trip[i][0], trip[i][1])
        csv.writerow(fields)
        i = i + 1
コード例 #6
0
def volcar_info(pdp, csv):
    producto = pdp.productoVenta and pdp.productoVenta
    nombre_producto = producto and producto.descripcion or ""
    prod_estandar_parte = pdp.prodestandar
    prod_estandar_producto = producto and producto.prodestandar or ""
    fecha = utils.str_fecha(pdp.fecha)
    #horaini = utils.str_hora_corta(pdp.horainicio)
    #horafin = utils.str_hora_corta(pdp.horafin)
    horaini = utils.str_fechahora(pdp.fechahorainicio)
    horafin = utils.str_fechahora(pdp.fechahorafin)
    produccion_m2 = pdp.get_produccion()[0]
    produccion_kg = sum([a.peso_sin for a in pdp.articulos])
    productividad = pdp.calcular_productividad()
    rendimiento = pdp.calcular_rendimiento()
    observaciones = pdp.observaciones
    observaciones_paradas = "; ".join(
            [parada.observaciones for parada in pdp.incidencias])
    tiempo_produccion, tiempo_paradas = calcular_tiempo_trabajado(pdp)
    csv.writerow((nombre_producto, 
                  prod_estandar_parte, 
                  prod_estandar_producto, 
                  fecha, 
                  horaini, 
                  horafin, 
                  produccion_m2, 
                  produccion_kg, 
                  productividad, 
                  rendimiento, 
                  observaciones, 
                  observaciones_paradas, 
                  tiempo_produccion, 
                  tiempo_paradas))
コード例 #7
0
def ecrit(liste, a, b, com, csv):
    if liste[a] == 0:
        fileLog.write("	=>%s\n" % (com))
        lstLine = []
        for i in range(len(liste[b])):
            lstLine.append(liste[b][i])
        csv.writerow(lstLine)
コード例 #8
0
def compute_and_log_miou(current_frame, current_timestamp, csv, deadline=210):
    """ Computes the mIOU for the given frame relative to the previous frames
    and logs it to the given csv file.

    Args:
        current_frame: The frame to compute the mIOU for.
        current_timestamp: The timestamp associated with the frame.
        csv: The csv file to write the results to.
    """
    SAVED_FRAMES.append((current_timestamp, current_frame))

    # Remove data older than the deadline that we don't need anymore.
    while (current_timestamp - SAVED_FRAMES[0][0]) * 1000 > deadline:
        SAVED_FRAMES.popleft()

    # Go over each of the saved frames, compute the difference in the
    # timestamp, and the mIOU and log both of them.
    for old_timestamp, old_frame in SAVED_FRAMES:
        (mean_iou, class_iou) = \
            current_frame.compute_semantic_iou_using_masks(old_frame)
        time_diff = current_timestamp - old_timestamp

        # Format of the CSV file: (latency_in_ms, class, mean IOU)
        csv.writerow([time_diff * 1000, "Scene", mean_iou])

        # Insert the results for the person.
        person_key = 4
        if person_key in class_iou:
            csv.writerow([time_diff * 1000, "Person", class_iou[person_key]])
コード例 #9
0
def spracuj_vyjazd(url, csv):
    ret = {
        'Podtyp': '',
        'Nahlasenie': '',
        'Lokalizacia': '',
        'Uhasenie': '',
        'Sprava': ''
    }
    r = requests.get('http://www.firebrno.cz/modules/incidents/' + str(url))
    soup = BeautifulSoup(r.text, "html.parser")
    incident = soup.find('div', 'inc-detail')
    ret['Nahlasenie'] = re.findall('[0-9]+:[0-9]+', (incident.p).get_text())[0]
    for tag in incident('p'):

        i = tag.get_text().find(":")
        part = [(tag.get_text())[:i], (tag.get_text())[i + 1:]]
        if 'Podtyp' in part[0]:
            ret['Podtyp'] = part[1]
        elif 'Popis' in part[0]:
            ret['Sprava'] = part[1]
            casi = re.findall('[0-9]+:[0-9]+', part[1])
            if (len(casi)) == 1:
                ret['Lokalizacia'] = casi[0]
                ret['Uhasenie'] = casi[0]
            elif (len(casi)) == 2:
                ret['Lokalizacia'] = casi[0]
                ret['Uhasenie'] = casi[1]
    csv.writerow(ret)
コード例 #10
0
def csvWrite(filename, bookmarks, header):
    import csv
    csvfile = open(filename, "wb")
    csv = csv.writer(csvfile, delimiter=";")
    csv.writerow(header)
    for bookmark in bookmarks:
        csv.writerow(values(bookmark, header))
    csvfile.close()
コード例 #11
0
def save_csv_entry(csv, article, status):
    title = article['ti'][0] if 'ti' in article else ''
    source = article['fo'][0] if 'fo' in article else ''
    authors = ', '.join(article['au']) if 'au' in article else ''
    colection = ', '.join(article['in']) if 'in' in article else ''

    csv.writerow( [article['id'], title.encode('utf-8'), authors.encode('utf-8'), 
        source.encode('utf-8'), colection, status] )
コード例 #12
0
def create_numbers():
    numbers = np.arange(1, 200001).reshape(2000, 100)
    import csv
    with open('file.csv', 'w') as csv_file:
        csv = csv.writer(csv_file)

        for num in numbers:
            csv.writerow(num)
コード例 #13
0
def process_user(csv, user):
    cell = user.getLastProfile().contact_user.phone_cell
    if cell and not re.match(r"\d{3}-\d{3}-\d{4}", cell):
        raise ValueError("Cell for %s not formatted properly!" % user.name())
    csv.writerow([
            user.name(),
            user.email,
            cell])
コード例 #14
0
def writeResort(csv, resort, price, difficulty, startSeason, endSeason, image, rating, city, state):
    length = len(resort)
    i = 0
    while i < length:
#        fields = (resort[i],location[i][0], location[i][1], startSeason[i], endSeason[i], image[i], rating[i], difficulty[i])
        fields = (resort[i],city[i], state[i], startSeason[i], endSeason[i], image[i], rating[i], difficulty[i])
        csv.writerow(fields)
        i = i + 1            
コード例 #15
0
    def _writeGroupI(self, csv, elementModels):
        group = "I"
        for element in elementModels:
            csvRow = [group, element.elementNumber]

            for node in element.getIncidences():
                csvRow.append(node)

            csv.writerow(self._csvPad(csvRow))
コード例 #16
0
def writePrints(pole, closest, poles, csv, wallCsv):
    if int(pole[-1]) >= 2: # walls and lines are in wkt format
        wkt = pole[1]
        startx = float(wkt[wkt.find('(')+1:wkt.find(' ')])
        starty = float(wkt[wkt.find(' ')+1:wkt.find(',')])
        endx = float(wkt[wkt.find(',')+1:wkt.rfind(' ')])
        endy = float(wkt[wkt.rfind(' ')+1:wkt.find(')')])
        if startx > endx: # switch
            tmpx = startx
            tmpy = starty
            startx = endx
            starty = endy
            endx = tmpx
            endy = tmpy
    else:
        x = float(pole[1])
        y = float(pole[2])

    for other in poles:        
        if int(other[-1]) >= 2: # walls and lines are in wkt format
            wkt = other[1]
            oStartx = float(wkt[wkt.find('(')+1:wkt.find(' ')])
            oStarty = float(wkt[wkt.find(' ')+1:wkt.find(',')])
            oEndx = float(wkt[wkt.find(',')+1:wkt.rfind(' ')])
            oEndy = float(wkt[wkt.rfind(' ')+1:wkt.find(')')])
            oX = 0
            oY = 0
            if oStartx > oEndx: # switch
                tmpx = oStartx
                tmpy = oStarty
                oStartx = endx
                oStarty = endy
                endx = tmpx
                endy = tmpy                
        else:
            oX= float(other[1])
            oY= float(other[2])
        if int(pole[-1]) < 2:
            if int(other[-1]) >= 2:
                (angle, distance) = calcLineDistAngle([(oStartx, oStarty), (oEndx, oEndy)], (x, y))
            else:
                direction = (oX - x, oY - y)
                if direction[0] == 0 and direction[1] == 0: continue
                angle = math.acos(direction[1]/math.sqrt(direction[0]**2 + direction[1]**2))
                if direction[0] < 0: angle *= -1
                distance = dist(pole, other)
            csv.writerow([pole[0], x, y, 0.0, other[0], distance, angle, pole[-1]])
        else:
            if int(other[-1]) >= 2:
                distance = calcLineAngle([(startx, starty), (endx, endy)], [(oStartx, oStarty), (oEndx, oEndy)]) #if where fingerprinting two walls against each other, we use their respective angle as dist            
                angle = calcLineAngle([(startx, starty), (endx, endy)], [(startx, starty), (startx, starty+100.0)])
            else:
                (angle, distance) = calcLineDistAngle([(startx, starty), (endx, endy)], (oX, oY))              
                angle -= math.pi
                if angle < -math.pi: angle += 2*math.pi  
            wallCsv.writerow([pole[0], startx, starty, endx, endy, other[0], distance, angle, oX, oY, pole[-1]])
コード例 #17
0
    def _writeGroupD(self, csv, simulation):
        group = "D"
        csvRow = [group]

        # order in simulation KOD array is the same as
        # whats expected by GS2

        for kod in GS2KOD:
            csvRow.append(simulation.getOutputModifier(kod))

        csv.writerow(self._csvPad(csvRow))
コード例 #18
0
def log_data(csv, position, time, acc, speed, distance, cd, battery, energy):
    csv.writerow({
        "nodeId": position,
        "time": time,
        "acceleration": acc,
        "speed": speed,
        "distance": distance,
        "cd": cd,
        "battery": battery,
        "energy": energy
    })
コード例 #19
0
def vis_detections_video(im,
                         class_name,
                         dets,
                         csv_file,
                         csv,
                         frame_id,
                         encoder,
                         tracker,
                         thresh=0.6):
    """Draw detected bounding boxes."""
    detections = []
    scores = []
    h, w, _ = im.shape
    thick = int((h + w) // 300)
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return im
    for i in inds:
        scores.append(dets[i, -1])

    for i in inds:
        bbox = dets[i, :4]
        boxResults = process_box(bbox, scores, h, w, thresh)
        if boxResults is None:
            continue
        left, right, top, bot, mess, max_indx, confidence = boxResults
        detections.append(np.array([left, top, right, bot]).astype(np.float64))

    detections = np.array(detections)
    trackers = tracker.update(detections)
    for track in trackers:
        bbox = [int(track[0]), int(track[1]), int(track[2]), int(track[3])]
        id_num = str(int(track[4]))
        csv.writerow([
            frame_id, id_num,
            int(bbox[0]),
            int(bbox[1]),
            int(bbox[2]) - int(bbox[0]),
            int(bbox[3]) - int(bbox[1])
        ])
        csv_file.flush()
        cv2.rectangle(im, (int(bbox[0]), int(bbox[1])),
                      (int(bbox[2]), int(bbox[3])), (0, 255, 255), thick // 3)
        if ((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) > 30000):
            cv2.putText(im, "teacher" + id_num,
                        (int(bbox[0]), int(bbox[1]) - 12), 0, 1e-3 * h,
                        (0, 255, 255), thick // 6)
        else:
            cv2.putText(im, id_num, (int(bbox[0]), int(bbox[1]) - 12), 0,
                        1e-3 * h, (255, 255, 255), thick // 6)
        # cv2.rectangle(im,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),2)
        # cv2.rectangle(im,(int(bbox[0]),int(bbox[1])-10),(int(bbox[0]+200),int(bbox[1])+10),(10,10,10),-1)
        # cv2.putText(im, id_num,(int(bbox[0]),int(bbox[1]-2)),cv2.FONT_HERSHEY_SIMPLEX,.45,(255,255,255))#,cv2.CV_AA)
    return im
コード例 #20
0
 def parse_page(self, response):
     print("parsing URL")
     print(response.url)
     csv_file = 'github_stars.csv'
     list_github_starts = response.css(
         'div.d-inline-block > h3 > a::text').extract()
     if (len(list_github_starts) < 0):
         for s in list_github_starts:
             with open(csv_file, 'a+') as f:
                 fwriter = csv.writer(f)
                 csv.writerow(s)
コード例 #21
0
def main():
    with open('nsa.txt','r') as schools:
        for school in schools:
            # Remove extraneous chars
            school = school.strip()

            # Check to see if school supports SSL
            if checkConnectivity(school) == False:
                log.write(school + " doesn't support SSL, skipping\n")
            
            # Looks like we do, so start the test
            else:
                # Start the test and sleep for 10 seconds to check for mismatch or cached result
                params = buildParams({'d':school})
                # Make sure the first time a school is tested that the clear cache link is used
                test = runTest(clear, params)
                sleep(10)
                test = runTest(url, params)
                mismatch = isCertificateMismatch(test)
                
                # Is there a mismatch?
                if mismatch == True:
                    # There is, so restart the test, wait 90 seconds, and get the results again
                    params = buildParams({'d':school, 'ignoreMismatch':'on'})
                    test = runTest(clear, params)
                    sleep(90)
                    test = runTest(url, params)
                    rating = getRating(test)
                    scores = getScores(test)
                    try:
                        csv.writerow([school, 'Y', rating, scores['Certificate'], scores['Protocol Support'], scores['Key Exchange'], scores['Cipher Strength']])
                    except:
                        csv.writerow([school, 'unknown error'])
                
                elif mismatch == False:
                    # Is there a cached page present or did the test kick off?
                    rating = getRating(test)
                    if rating != '':
                        csv.writerow([school, 'N', rating])
                    else:
                        # Start another test, wait 90 seconds, and check again
                        test = runTest(url, params)
                        sleep(90)
                        test = runTest(url, params)
                        rating = getRating(test)
                        scores = getScores(test)
                        try:
                            csv.writerow([school, 'N', rating, scores['Certificate'], scores['Protocol Support'], scores['Key Exchange'], scores['Cipher Strength']])
                        except:
                            csv.writerow([school, 'unknown error'])
    log.close()
コード例 #22
0
    def _writeGroupF(self, csv, ssModels):
        group = "F-1"
        csvRow = [group]

        for node in ssModels:
            csvRow.append(node.nodeID)
            csvRow.append(node.FQ)
            if len(csvRow) == 9:
                csv.writerow(self._csvPad(csvRow))
                csvRow = [group]

        if len(csvRow) > 1:
            csv.writerow(self._csvPad(csvRow))

        group = "F-2"
        csvRow = [group]

        for node in ssModels:
            csvRow.append(node.nodeID)
            csvRow.append(node.CFQ)
            if len(csvRow) == 9:
                csv.writerow(self._csvPad(csvRow))
                csvRow = [group]

        if len(csvRow) > 1:
            csv.writerow(self._csvPad(csvRow))
コード例 #23
0
    def _writeGroupH(self, csv, nodeModels):
        group = "H-1"
        csvRow = [group, 0.0]

        csv.writerow(self._csvPad(csvRow))

        # we will support the other case for group H-2 later
        group = "H-2"
        csvRow = [group, 9999.0]
        csv.writerow(self._csvPad(csvRow))

        group = "H-3"
        csvRow = [group]

        for node in nodeModels:
            csvRow.append(node.I)
            csvRow.append(node.PHII)

            # gorup + 4 pairs
            if len(csvRow) == 9:
                csv.writerow(self._csvPad(csvRow))
                csvRow = [group]

        if len(csvRow) > 1:
            csv.writerow(self._csvPad(csvRow))
コード例 #24
0
def getdata(driver, csv):
    #get time table
    classtimetable = driver.find_elements_by_xpath(
        "//table[@class='unitList']/tbody/tr")
    #classtimetable = driver.find_element_by_class_name("unitList")
    #print(classtimetable.text)
    for i in classtimetable:
        #print(i.text)
        #for j in i.find_elements_by_tag_name("td"):
        #    print(j.text)
        comp_list = [j.text for j in i.find_elements_by_tag_name("td")]
        print(comp_list)
        print(comp_list, file=sys.stderr)
        csv.writerow(comp_list)
コード例 #25
0
    def _writeGroupQ(self, csv, materials):

        # sub group Q-1
        group = "Q-1"
        csvRow = [group]
        for mat in materials:
            csvRow.append(mat.getInterpolationPointCount())

            # allows 20 values in this group
            if len(csvRow) == 21:
                csv.writerow(csvRow)
                csvRow = [group]

        # make sure the last row actually has meaningful data
        if len(csvRow) > 1:
            csv.writerow(self._csvPad(csvRow))

        groups = ["Q-2", "Q-3", "Q-4"]
        for mat in materials:
            matData = [
                mat.pressureHead, mat.moistureContent,
                mat.hydraulicConductivity
            ]
            for i in range(3):
                csvRow = [groups[i]]
                for elem in matData[i]:
                    csvRow.append(elem)
                    # these groups contains at most 8 data points plus the group
                    if len(csvRow) == 9:
                        csv.writerow(self._csvPad(csvRow))
                        csvRow = [groups[i]]

                if len(csvRow) > 1:
                    csv.writerow(self._csvPad(csvRow))
コード例 #26
0
    def _writeGroupL(self, csv, nodeModels):
        group = "L"
        csvRow = [group]

        for node in nodeModels:
            if node.boundary.getData() == "Constant Concentration (Dirichlet)":
                csvRow.append(node.I)

            if len(csvRow) == 21:
                csv.writerow(self._csvPad(csvRow))
                csvRow = [group]

        if len(csvRow) > 1:
            csv.writerow(self._csvPad(csvRow))
コード例 #27
0
    def _writeGroupC(self, csv, model):
        group = "C"

        csvRow1 = [
            group, model.AFMOBX, model.AFMOBY, model.APOR, model.AELONG,
            model.AETRANS, model.APHII, model.ACONCI, model.XFACT
        ]

        csvRow2 = [
            group, model.YFACT, model.ATETA, model.AAL, model.AKD, model.ALAM,
            model.ARHO
        ]

        csv.writerow(self._csvPad(csvRow1))
        csv.writerow(self._csvPad(csvRow2))
コード例 #28
0
ファイル: jydoop.py プロジェクト: lonnen/jydoop
def outputWithoutKey(path, results):
    """
    Output values into a reasonable text file. If the values are simple,
    they are printed directly. If they are complex tuples/lists, they are
    printed as csv.
    """
    f = open(path, 'w')
    w = csv.writer(f)
    for k, v in results:
        l = []
        unwrap(l, v)
        if len(l) == 1:
            print >>f, v
        else:
            csv.writerow(l)
コード例 #29
0
def outputWithoutKey(path, results):
    """
    Output values into a reasonable text file. If the values are simple,
    they are printed directly. If they are complex tuples/lists, they are
    printed as csv.
    """
    f = open(path, 'w')
    w = csv.writer(f)
    for k, v in results:
        l = []
        unwrap(l, v)
        if len(l) == 1:
            print >> f, v
        else:
            csv.writerow(l)
コード例 #30
0
ファイル: test.py プロジェクト: SB233/GaussianMixtureModel
def main(trainingData, testData, csv):
    nbSamples = getFileLength(testData)
    # methods = ["Guess", "Random", "K-Means"]
    methods = ["Guess"]
    for m in methods:
        print "\n#", m
        for i in range(4):
            print ".",
            res = executeCmd(trainingData, testData, method=m)
            if res[2] is not None:
                print res[2]
            nbDiff = diffFile(testData, outputFile)
            accuracy = (float(nbDiff) / nbSamples) * 100
            row = [m, accuracy, res[0]]
            csv.writerow(row)
コード例 #31
0
ファイル: test.py プロジェクト: pmdartus/GaussianMixtureModel
def main(trainingData, testData, csv):
    nbSamples = getFileLength(testData)
    # methods = ["Guess", "Random", "K-Means"]
    methods = ["Guess"]
    for m in methods:
        print "\n#", m
        for i in range(4):
            print".",
            res = executeCmd(trainingData, testData, method=m)
            if res[2] is not None:
                print res[2]
            nbDiff = diffFile(testData, outputFile)
            accuracy = (float(nbDiff) / nbSamples)*100
            row = [m, accuracy, res[0]]
            csv.writerow(row)
コード例 #32
0
ファイル: tests.py プロジェクト: kimchouard/sjtu-ist-gmm
def main(trainingData, testData, runs, csv):
    # Write csv header
    csv.writerow(["method", "errorRate", "time"])
    nbSamples = getFileLength(testData)
    methods = ["random"]
    for m in methods:
        print "\n#", m
        for i in range(runs):
            print".",
            res = executeCmd(trainingData, testData, method=m)
            if res[2] is not None:
                print res[2]
            nbDiff = diffFile(testData, outputFile)
            errorRate = (float(nbDiff) / nbSamples)*100
            row = [m, errorRate, res[0]]
            csv.writerow(row)
コード例 #33
0
ファイル: tests.py プロジェクト: Zeus0428/sjtu-ist-gmm
def main(trainingData, testData, runs, csv):
    # Write csv header
    csv.writerow(["method", "errorRate", "time"])
    nbSamples = getFileLength(testData)
    methods = ["random"]
    for m in methods:
        print "\n#", m
        for i in range(runs):
            print ".",
            res = executeCmd(trainingData, testData, method=m)
            if res[2] is not None:
                print res[2]
            nbDiff = diffFile(testData, outputFile)
            errorRate = (float(nbDiff) / nbSamples) * 100
            row = [m, errorRate, res[0]]
            csv.writerow(row)
コード例 #34
0
ファイル: admin.py プロジェクト: webcommittee/cfwebsite
 def entries_view(self, request, form_id):
     """
     Displays the form entries in a HTML table with option to
     export as CSV file.
     """
     if request.POST.get("back"):
         change_url = admin_url(RegistrationPage, "change", form_id)
         return HttpResponseRedirect(change_url)
     form = get_object_or_404(RegistrationPage, id=form_id)
     entries_form = EntriesForm(form, request, request.POST or None)
     #delete_entries_perm = "%s.delete_formentry" % FormEntry._meta.app_label
     #can_delete_entries = request.user.has_perm(delete_entries_perm)
     submitted = entries_form.is_valid()
     if submitted:
         if request.POST.get("export"):
             response = HttpResponse(content_type="text/csv")
             timestamp = slugify(datetime.now().ctime())
             fname = "%s-%s.csv" % (form.slug, timestamp)
             header = "attachment; filename=%s" % fname
             response["Content-Disposition"] = header
             queue = StringIO()
             delimiter = settings.FORMS_CSV_DELIMITER
             try:
                 csv = writer(queue, delimiter=delimiter)
                 writerow = csv.writerow
             except TypeError:
                 queue = BytesIO()
                 delimiter = bytes(delimiter, encoding="utf-8")
                 csv = writer(queue, delimiter=delimiter)
                 writerow = lambda row: csv.writerow([
                     c.encode("utf-8") if hasattr(c, "encode") else c
                     for c in row
                 ])
             writerow(entries_form.columns())
             for row in entries_form.rows(csv=True):
                 writerow(row)
             data = queue.getvalue()
             response.write(data)
             return response
         elif request.POST.get("delete"):
             selected = request.POST.getlist("selected")
             if selected:
                 entries = CompanyProfile.objects.filter(id__in=selected)
                 count = entries.count()
                 if count > 0:
                     entries.delete()
                     message = ungettext("1 entry deleted",
                                         "%(count)s entries deleted", count)
                     info(request, message % {"count": count})
     template = "admin/forms/entries.html"
     context = {
         "title": _("View Entries"),
         "entries_form": entries_form,
         "opts": self.model._meta,
         "original": form,
         "can_delete_entries": True,
         "submitted": submitted
     }
     return render_to_response(template, context, RequestContext(request))
コード例 #35
0
def statsExtractor(pageId, page, boostTemplates, csv):
    "Export raw stats"
    boost = 1
    for key, value in boostTemplates.iteritems():
        if key in page['template']:
            boost *= value

    csv.writerow([page['title'].replace("\"", "\"\""),
                 pageId,
                 page['incoming_links'],
                 len(page['external_link']),
                 page['text_bytes'],
                 len(page['heading']),
                 len(page['redirect']),
                 len(page['outgoing_link']),
                 page.get('popularity_score', 0),
                 boost])
コード例 #36
0
def statsExtractor(pageId, page, boostTemplates, csv):
    "Export raw stats"
    boost = 1
    for key, value in boostTemplates.iteritems():
        if key in page['template']:
            boost *= value

    csv.writerow([page['title'].replace("\"", "\"\""),
                 pageId,
                 page['incoming_links'],
                 len(page['external_link']),
                 page['text_bytes'],
                 len(page['heading']),
                 len(page['redirect']),
                 len(page['outgoing_link']),
                 page.get('popularity_score', 0),
                 boost])
コード例 #37
0
ファイル: file_exchange.py プロジェクト: Sk1f161/ERP
 def create_file_default_fields(self, cr, uid, id, context=None):
     if isinstance(id,list):
         id = id[0]
     output_file = TemporaryFile('w+b')
     fieldnames = ['id', 'import_default_field:id', 'import_default_value', 'file_id:id', 'mapping_template_id:id', 'type']
     csv = FileCsvWriter(output_file, fieldnames, encoding="utf-8", writeheader=True, delimiter=',', quotechar='"')
     current_file = self.browse(cr, uid, id, context=context)
     for field in current_file.import_default_fields:
         row = {
             'id': field.get_absolute_id(context=context),
             'import_default_field:id': field.import_default_field.get_external_id(context=context)[field.import_default_field.id],
             'import_default_value': field.import_default_value,
             'file_id:id': current_file.get_absolute_id(context=context),
             'mapping_template_id:id': field.mapping_id.get_absolute_id(context=context),
             'type': field.type,
         }
         csv.writerow(row)
     return self.pool.get('pop.up.file').open_output_file(cr, uid, 'file.default.import.values.csv', output_file, 'File Exchange Fields Export', context=context)
コード例 #38
0
def dump_info(vms, csv):
    """Dump information about VMs of a certain hypervisor into a CSV file"""
    for vm in vms:
        ips = vm['interfaces']
        if len(ips) > 0:
            ip = ips[0].get('ipv4_address', ips[0]['mac'])
        else:
            ip = 'missing'
        hn = execute('hostname')
        # uptime in h
        uptime_period = 60 * 60.0
        name = vm['name']
        mem = vm['memory'] / 1024.0
        disk = vm['diskspace']['/'] / 1024.0
        vcpus = vm['vcpu']
        uptime = vm['uptime'] if vm['uptime'] else 0
        uptime /= uptime_period
        print name, mem, disk, vcpus, uptime, ip, hn
        csv.writerow([name, mem, disk, vcpus, uptime, ip, hn])
コード例 #39
0
ファイル: on-dump-csv.py プロジェクト: AsherBond/opennode-tui
def dump_info(vms, csv):
        """Dump information about VMs of a certain hypervisor into a CSV file"""
        for vm in vms:
                ips = vm['interfaces']
                if len(ips) > 0:
                    ip = ips[0].get('ipv4_address', ips[0]['mac'])
                else:
                    ip = 'missing'
                hn = execute('hostname')
                # uptime in h
                uptime_period = 60 * 60.0
                name = vm['name']
                mem = vm['memory'] / 1024.0
                disk = vm['diskspace']['/'] / 1024.0
                vcpus = vm['vcpu']
                uptime = vm['uptime'] if vm['uptime'] else 0
                uptime /= uptime_period
                print name, mem, disk, vcpus, uptime, ip, hn
                csv.writerow([name, mem, disk, vcpus, uptime, ip, hn])
コード例 #40
0
def writeResort(csv, resort, price, difficulty, startSeason, endSeason, image,
                imageMap, imageMapXL, rating, city, state):
    length = len(resort)
    i = 0
    while i < length:
        #        fields = (resort[i],location[i][0], location[i][1], startSeason[i], endSeason[i], image[i], rating[i], difficulty[i])
        # Check size of image file and if below a threshold, use imageMap
        imageFileName = 'images/Ski resort ' + resort[i] + '.jpg'
        imageStat = os.stat(imageFileName)
        if imageStat.st_size > 12000:
            fields = (resort[i], city[i], state[i], startSeason[i],
                      endSeason[i], image[i], imageMapXL[i], rating[i],
                      difficulty[i])
        else:
            fields = (resort[i], city[i], state[i], startSeason[i],
                      endSeason[i], imageMap[i], imageMapXL[i], rating[i],
                      difficulty[i])
        csv.writerow(fields)
        i = i + 1
コード例 #41
0
def output_collections(coll_json, csv, rest_url, comm_name, comm_id, verbose,
                       inc_items):
    collection_id = 0

    if verbose:
        print("Collections:")

    for collection in coll_json:
        collection_id += 1
        if verbose:
            print("\t{}".format(collection['name']))
        item_list_url = rest_url + '/collections/' + collection[
            'uuid'] + '/items'

        resp = requests.get(item_list_url)
        if resp.status_code != 200:
            print("Status {}: unable to get item list for '{}'".format(
                resp.status_code, collection['name']))
            continue
        if verbose:
            print("    Items ({}):".format(collection['numberItems']))

        if not inc_items:
            csv.writerow([
                comm_id,
                comm_name,
                collection_id,
                collection['name'] + '(' + collection['uuid'] + ')',
                collection['numberItems'],
            ])

        else:
            i = 0
            for item in resp.json():
                i += 1
                if verbose:
                    print("\t\t{}".format(item['name']))
                csv.writerow([
                    comm_id, comm_name, collection_id,
                    collection['name'] + '(' + collection['uuid'] + ')',
                    collection['numberItems'], i, item['name'],
                    jscholarship + item['handle'], item['uuid']
                ])
コード例 #42
0
def modify_row(csv, row, i):
	woman = 0
	aux = []
	for item in row:
		if(item == 'Woman'): 
			woman = 1
			item = 0
		if(item == 'Man'):
			item = 1
		if(item == '?'):
			if(woman == 1): 
				item = 1
			else:
				item = 0
		
		item = [item]
		aux = np.append(aux, item, 1)
		aux.shape
	
	csv.writerow(aux)
コード例 #43
0
def statsExtractor(pageId, page, boostTemplates, csv):
    "Export raw stats"
    boost = 1
    for key, value in boostTemplates.iteritems():
        if key in page["template"]:
            boost *= value

    csv.writerow(
        [
            page["title"].replace('"', '""'),
            pageId,
            page["incoming_links"],
            len(page["external_link"]),
            page["text_bytes"],
            len(page["heading"]),
            len(page["redirect"]),
            len(page["outgoing_link"]),
            page.get("popularity_score", 0),
            boost,
        ]
    )
コード例 #44
0
ファイル: admin.py プロジェクト: Roastmaster/careerfair
 def entries_view(self, request, form_id):
     """
     Displays the form entries in a HTML table with option to
     export as CSV file.
     """
     if request.POST.get("back"):
         change_url = admin_url(RegistrationPage, "change", form_id)
         return HttpResponseRedirect(change_url)
     form = get_object_or_404(RegistrationPage, id=form_id)
     entries_form = EntriesForm(form, request, request.POST or None)
     #delete_entries_perm = "%s.delete_formentry" % FormEntry._meta.app_label
     #can_delete_entries = request.user.has_perm(delete_entries_perm)
     submitted = entries_form.is_valid()
     if submitted:
         if request.POST.get("export"):
             response = HttpResponse(content_type="text/csv")
             timestamp = slugify(datetime.now().ctime())
             fname = "%s-%s.csv" % (form.slug, timestamp)
             header = "attachment; filename=%s" % fname
             response["Content-Disposition"] = header
             queue = StringIO()
             delimiter = settings.FORMS_CSV_DELIMITER
             try:
                 csv = writer(queue, delimiter=delimiter)
                 writerow = csv.writerow
             except TypeError:
                 queue = BytesIO()
                 delimiter = bytes(delimiter, encoding="utf-8")
                 csv = writer(queue, delimiter=delimiter)
                 writerow = lambda row: csv.writerow([c.encode("utf-8")
                     if hasattr(c, "encode") else c for c in row])
             writerow(entries_form.columns())
             for row in entries_form.rows(csv=True):
                 writerow(row)
             data = queue.getvalue()
             response.write(data)
             return response
         elif request.POST.get("delete"):
             selected = request.POST.getlist("selected")
             if selected:
                 entries = CompanyProfile.objects.filter(id__in=selected)
                 count = entries.count()
                 if count > 0:
                     entries.delete()
                     message = ungettext("1 entry deleted",
                                         "%(count)s entries deleted", count)
                     info(request, message % {"count": count})
     template = "admin/forms/entries.html"
     context = {"title": _("View Entries"), "entries_form": entries_form,
                "opts": self.model._meta, "original": form,
                "can_delete_entries": True,
                "submitted": submitted}
     return render_to_response(template, context, RequestContext(request))
コード例 #45
0
ファイル: file_exchange.py プロジェクト: Sk1f161/ERP
 def create_file_fields(self, cr, uid, id, context=None):
     if isinstance(id,list):
         id = id[0]
     output_file = TemporaryFile('w+b')
     fieldnames = ['id', 'is_required', 'name', 'custom_name', 'sequence', 'mappinglines_template_id:id', 'file_id:id', 'default_value', 'advanced_default_value', 'merge_key']
     csv = FileCsvWriter(output_file, fieldnames, encoding="utf-8", writeheader=True, delimiter=',', quotechar='"')
     current_file = self.browse(cr, uid, id, context=context)
     for field in current_file.field_ids:
         row = {
             'file_id:id': field.file_id.get_absolute_id(context=context),
             'name': field.name,
             'id': field.get_absolute_id(context=context),
             'sequence': str(field.sequence),
             'mappinglines_template_id:id': field.mapping_line_id and field.mapping_line_id.get_absolute_id(context=context) or '',
             'custom_name': field.custom_name or '',
             'default_value': field.default_value or '',
             'is_required': str(field.is_required),
             'advanced_default_value': field.advanced_default_value or '',
             'merge_key': str(field.merge_key),
         }
         csv.writerow(row)
     return self.pool.get('pop.up.file').open_output_file(cr, uid, 'file.fields.csv', output_file, 'File Exchange Fields Export', context=context)
コード例 #46
0
ファイル: bulk.py プロジェクト: edgemaster/labpro
def setupDevice(lp, incid, csv):
    numsensors = 0
    rv = []
    deviceid = incid.increment()

    description = prompt("Enter device description, eg room, location etc.")

    if lp.status["battery"] > 0:
        print "WARNING: Battery may be low! Currently rated at level %s" % lp.status["battery"]
    lp.doCommand(0)
    lp.doCommand(6, 3)  # Turn sound off - hope for battery boost
    lp.doCommand(6, 5, deviceid)  # Set ID
    rv.append(deviceid)

    for sensor in (1, 2, 3, 4):
        lp.doCommand(1, sensor, 1)
        sensortype = lp.getSensorStatus(sensor)
        if sensortype not in (10, 34):
            lp.doCommand(1, sensor, 0)
            rv.append(0)
        else:
            sensorname = lp.doCommand(116, sensor)
            print "Found channel %s, detected type %s as a %s" % (sensor, sensortype, sensorname)
            rv.append(sensortype)
            numsensors += 1

    if numsensors:
        samples = 12287 / numsensors
        sampletime = (4 * 24 * 60 * 60) / samples
        # sampletime = 5
        lp.doCommand(3, sampletime, samples, 0, 0, 0, 0, 0, 0, 0, 0)
        rv.append(samples)
        rv.append(sampletime)
        rv.append(description)
        rv.append(time.time())
        csv.writerow(rv)
    else:
        print "No sensors found!"
コード例 #47
0
ファイル: helpers.py プロジェクト: Heldroe/django-fobi
    def export_to_csv(self):
        """
        Export data to CSV.
        """
        #response = HttpResponse(mimetype="text/csv")
        response = self._get_initial_response(mimetype="text/csv")
        response['Content-Disposition'] = \
            'attachment; filename=db_store_export_data.csv'

        data_headers = self._get_data_headers()
        data_keys = data_headers.keys()
        data_values = data_headers.values()

        queue = StringIO()
        try:
            csv_obj = csv.writer(
                queue, delimiter=CSV_DELIMITER, quotechar=CSV_QUOTECHAR
                )
            writerow = csv_obj.writerow
        except TypeError:
            queue = BytesIO()
            delimiter = bytes(CSV_DELIMITER, encoding="utf-8")
            quotechar = bytes(CSV_QUOTECHAR, encoding="utf-8")
            csv_obj = csv.writer(
                queue, delimiter=delimiter, quotechar=quotechar
                )
            writerow = lambda row: csv.writerow(
                [safe_text(cell) for cell in row]
                )

        data_values = [safe_text(value) for value in data_values]

        writerow(data_values)

        for obj in self.queryset:
            data = json.loads(obj.saved_data)
            row_data = []
            for cell, key in enumerate(data_keys):
                row_data.append(data.get(key, ''))

            writerow(row_data)

        data = queue.getvalue()
        response.write(data)
        return response
コード例 #48
0
def malc0deCsv():
    """Build current http://malc0de.com/database/ CSV file.
    
    This is the IRS UNIX support position pre-interview
    programming assignment for candidate David L. Craig.

    This is his first ever Python project, although he
    looked at Python 3 on April 4, 2012 to help his
    oldest son debug a problem, and scanned the Linux Mint
    live installer for debugging an install much further
    back in time.  There is surely a more elegant solution
    than this.

    The development included about 12 hours absorbing the
    Python 2.7.3 tutorial, another 11 obtaining and
    learning bs4, and another 2 for using csv; all while
    trying to squeeze in previously planned happy holidays
    with the family.

    Project requirements:
    
    Hints
    ===
    
    import csv, datetime, requests, time
    
    from bs4 import BeautifulSoup
    
    
    Requirements
    ====
    
    * Utilize Python 2.7.3
    
    * Source URL => http://malc0de.com/database/
    
    * Scrape the following values from the page
    
    Date, File Name{If there is a file name in the URL},IP Address, ASN, ASN
    Name,MD5 hash
    
    * Note IP Addresses need to be written in the following format to the CSV
    file ->
    
    10{.}10{.}10{.}10
    
    * Write the product to a CSV file with file format
    
    Discovery Date,File Name,IP Address,ASN,ASN Name,MD5
    
    * CSV file format needs to be as follows
    
    "Malecode-Culled-Product-" + the_current_date_time + '.csv'
    
    * Post your code and output product to www.github.com and email back URL
    links to the code and the CSV output product to this email.
    """
    #-----------------------------------------------------------------------
    import csv, datetime, urllib2, sys, re
    from bs4 import BeautifulSoup
    from datetime import datetime, date, time
    the_current_date_time = datetime.utcnow().strftime("%Y_%m_%d-%H_%M_%S_UT")
    of = "Malecode-Culled-Product-" + the_current_date_time + '.csv'
    print "Creating file", of

    # The current column headers (Domain is assumed to be File Name)
    hl    = ['<th>Date</th>'                           ,
             '<th>Domain</th>'                         ,
             '<th>IP</th>'                             ,
             '<th>CC</th>'                             ,
             '<th>ASN</th>'                            ,
             '<th>Autonomous System Name</th>'         ,
             '<th>Click Md5 for VirusTotal Report</th>']
    ch = ('Discovery Date', # CSV headers tuple
          'File Name',
          'IP Address',
          'ASN',
          'ASN Name',
          'MD5')
    
    nr = 0 # accumulate number of data records processed
    with open(of,'' 'w') as f:
        try:
            csv = csv.writer(f)
        except IOError as e:
            print "I/O error({0}): {1}".format(e.errno, e.strerror)
            raise
        except:
            print "Unexpected error:", sys.exc_info()[0]
            raise
        csv.writerow(ch)
        for pg in range(1, 99999): # for each page (break on empty table)
            murl = 'http://malc0de.com/database/?&page=' + str(pg)
            try:
                malhtml = BeautifulSoup(urllib2.urlopen(murl), "lxml")
            except IOError as e:
                print "I/O error({0}): {1}".format(e.errno, e.strerror)
                raise
            except:
                print "Unexpected error:", sys.exc_info()[0]
                raise
            if pg == 1: # first page only, verify the column headers
                hc = malhtml.table.tr.contents
                if len(hc) != 7:
                    print 'Table does not contain seven columns--aborting'
                    sys.exit()
                for hx in range(6):
                    if str(hc[hx]) != hl[hx]:
                        print 'Column', hx + 1, 'has an unexpected heading',
                        print '(' + str(hc[hx]) + ')--aborting'
                        sys.exit()
            td = malhtml.table.find_all("td") # Extract all td entries in the page
            for dx in range(0, len(td), 7): # For every row of seven td entries:
                # print 'dx:', str(dx)
                rdt = BeautifulSoup(str(td[dx + 0]))
                vdt = rdt.td.string.encode('utf-8')
                # print 'vdt:', str(vdt)
                rfn = BeautifulSoup(str(td[dx + 1]))
                # strip '<td> ' and '</td>' (some rfn.td values don't string (?) )
                # vfn = rfn.td.prettify()[5:-6]
                try:
                    vfn = rfn.td.string.encode('utf-8')
                except:
                    vfn = rfn.td.string
                # print 'vfn:', str(vfn)
                rip = BeautifulSoup(str(td[dx + 2]))
                vip = re.sub('[.]', '{.}', rip.td.a.string, count=3)
                # print 'vip:', str(vip)
                rcc = BeautifulSoup(str(td[dx + 3]))
                vcc = rcc.td.a.string.encode('utf-8')
                # print 'vcc:', str(vcc)
                rnu = BeautifulSoup(str(td[dx + 4]))
                vnu = rnu.td.a.string.encode('utf-8')
                # print 'vnu:', str(vnu)
                rna = BeautifulSoup(str(td[dx + 5]))
                try:
                    vna = rna.td.string.encode('utf-8')
                except:
                    vna = rna.td.string
                # print 'vna:', str(vna)
                rmd = BeautifulSoup(str(td[dx + 6]))
                vmd = rmd.td.a.string.encode('utf-8')
                # print 'vmd:', str(vmd)
                # print '-' * 50
                csv.writerow((vdt, vfn, vip, vcc, vnu, vna, vmd))
            print 'Data rows for page', str(pg) + ':', len(td) / 7
            nr = nr + len(td) / 7
            if len(td) == 0:
                break
    f.closed
    print 'Total data rows:', str(nr)
コード例 #49
0
ファイル: generate_data.py プロジェクト: atyndall/cits3230
    sys.stdout.flush()
    end_time = time.time()
    return [seed, rate, corrupt, loss, 'failure', e.output, (end_time - start_time)]
    
  end_time = time.time()
  print "COMPLETE: cnet %d with s=%d, r=%s, c=%d, l=%d" % (i, seed, rate, corrupt, loss)
  sys.stdout.flush()
  
  csvline = [seed, rate, corrupt, loss, 'success', res, (end_time - start_time)]
  
  for line in res.split('\n')[1:16]:
    r = resreg.search(line)
    csvline.append(r.groups()[1])
    
  return csvline
  
with open('out.csv', 'wb') as csvf:
  csv = csv.writer(csvf)
  csv.writerow(headers)
  csvf.flush()
  
  pool = multiprocessing.Pool(PROCESSES)
  
  products = itertools.product(seeds, rates, corruptions, losses)

  for result in pool.imap_unordered(compute, enumerate(products)):
    csv.writerow(result)
    csvf.flush()
    
  pool.terminate()
コード例 #50
0
ファイル: dump_csv.py プロジェクト: gkralik/lightspeed
#!/usr/bin/env python
import os
import sys
import csv
import sqlite3

base_dir = os.path.dirname(os.path.realpath(__file__))
db_path = os.path.join(base_dir, 'db/lightspeed.db')

if len(sys.argv) == 2:
    db_path = os.path.realpath(sys.argv[1])

try:
    conn = sqlite3.connect(db_path)
    c = conn.cursor()
    fieldnames = ['ID', 'Ping (ms)', 'Download (Mbit/s)',
                  'Upload (Mbit/s)', 'Timestamp', 'Duration (s)', 'Error']

    csv = csv.writer(sys.stdout, delimiter=';', quoting=csv.QUOTE_MINIMAL)

    result = c.execute('SELECT * FROM measurements')

    csv.writerow(fieldnames)
    csv.writerows(result)
except sqlite3.Error as e:
    print('Error:', e.args[0])
finally:
    if conn:
        conn.close()
コード例 #51
0
ファイル: item_state.py プロジェクト: SerpentCS/ebay
 def action_export(self, cr, uid, ids, context=None):
     def rss(element=None):
         if element is not None:
             return ET.tostring(element, 'utf-8')
         return ET.Element('rss', version='2.0')
     
     def rss_channel(rss):
         rss_channel = ET.SubElement(rss, 'channel')
         title = ET.SubElement(rss_channel, 'title')
         title.text = 'title'
         link = ET.SubElement(rss_channel, 'link')
         link.text = 'link'
         description = ET.SubElement(rss_channel, 'description')
         description.text = 'description'
         return rss_channel
     
     def rss_channel_item(channel, item):
         channel_item = ET.SubElement(channel, 'item')
         title = ET.SubElement(channel_item, 'title')
         title.text = item.name
         link = ET.SubElement(channel_item, 'link')
         if item.ebay_user_id.sandbox:
             link.text = "http://cgi.sandbox.ebay.com/ws/eBayISAPI.dll?ViewItem&item=%s" % item.item_id
         else:
             link.text = "http://cgi.ebay.com/ws/eBayISAPI.dll?ViewItem&item=%s" % item.item_id
         description = ET.SubElement(channel_item, 'description')
         description.text = 'description'
         item_id = ET.SubElement(channel_item, 'itemID')
         item_id.text = item.item_id
         currency = ET.SubElement(channel_item, 'currency')
         currency.text = item.currency
         price = ET.SubElement(channel_item, 'price')
         price.text = str(item.start_price)
         return channel_item
     
     if context is None:
         context = {}
     ebay_item_obj = self.pool.get('ebay.item')
     this = self.browse(cr, uid, ids)[0]
     user = this.ebay_user_id
     domain = [('ebay_user_id', '=', user.id)]
     
     fp = cStringIO.StringIO()
     csv = UnicodeWriter(fp)
     csv.writerow(['sku', 'rss'])
     
     for id in ebay_item_obj.search(cr, uid, domain, context=context):
         item = ebay_item_obj.browse(cr, uid, id, context=context)
         id_occupy = [item.id]
         item_rss = rss()
         channel = rss_channel(item_rss)
         ebay_items = []
         for category in item.ebay_item_category_id:
             ebay_items.extend(category.ebay_item_ids)
         for itm in ebay_items:
             if itm.id not in id_occupy and itm.state == 'Active' \
                 and itm.listing_type == 'FixedPriceItem' \
                 and itm.ebay_user_id.id == user.id:
                 id_occupy.append(itm.id)
                 rss_channel_item(channel, itm)
                 # 4 x 3 table
                 if len(id_occupy) == 12 + 1:
                     break
         if len(id_occupy) > 1:
             csv.writerow([str(item.id), rss(item_rss)])
             
     gz_data = cStringIO.StringIO()
     gz = gzip.GzipFile(filename='dandelion-rss', mode='wb', fileobj=gz_data)
     gz.write(fp.getvalue())
     gz.close()
     
     out = base64.encodestring(gz_data.getvalue())
     gz_data.close()
     fp.close()
     
     this.name = "dandelion-rss-%s.gz" % (datetime.now().strftime('%Y%m%d-%H%M%S'))
     self.write(cr, uid, this.id, {'state': 'download',
                               'data': out,
                               'name': this.name}, context=context)
     
     return {
         'name': "Export Inventory RSS",
         'type': 'ir.actions.act_window',
         'res_model': 'ebay.item.rss',
         'view_mode': 'form',
         'view_type': 'form',
         'res_id': this.id,
         'views': [(False, 'form')],
         'target': 'new',
     }
コード例 #52
0
ファイル: Random_Benchmark.py プロジェクト: emhuff/DarkWorlds
if __name__ == "__main__":


  n_skies=file_len('../Data/Test_haloCounts.csv')-1 # Test set only, doesnt train
     
  position_halo=np.zeros([n_skies,2,3],float) #Set up the array in which I will
                                                #assign my estimated positions
    
  nhalo=np.loadtxt('../Data/Test_haloCounts.csv',\
                   usecols=(1,),delimiter=',',skiprows=1) #Load in the number
                                                          #of halos for each sky



  c = c.writer(open("../Data/Random_Benchmark_test.csv", "wb")) #Now write the array to a csv file
  c.writerow([str('SkyId'),str('pred_x1'),str( 'pred_y1'),str( 'pred_x2'),str( 'pred_y2'),str( 'pred_x3'),str(' pred_y3')])
  for k in xrange(n_skies):

    for n in xrange(int(nhalo[k])):
        position_halo[k,0,n]=rd.random()*4200.
        position_halo[k,1,n]=rd.random()*4200.
        
    halostr=['Sky'+str(k+1)] #Create a string that will write to the file
                             #and give the first element the sky_id
    for n in xrange(3):
      halostr.append(position_halo[k,0,n]) #Assign each of the
                                           #halo x and y positions to the string
      halostr.append(position_halo[k,1,n])
    c.writerow(halostr) #Write the string to a csv
                        #file with the sky_id and the estimated positions
    
コード例 #53
0
ファイル: bubble2-all.py プロジェクト: NicoletteS/django101
        post = False
        texts.append(text)
    if line.find("<h4 class='small'>") != -1:
        date_found = True
    elif date_found:
        create_date(line)
        date_found = False

#here we create a csv file with the output of sentiment analysis
import csv
import subprocess
import json

c = open("C:/Users/Nicolette/djangogirls/django101/social/static/social/data-all.csv", "wb")
csv = csv.writer(c)
csv.writerow(["Hashtag", "Positive", "Negative", "Neutral", "Day", "Month", "Year"])

#extract hashtags and texts
counter = 0
for text in texts:
    hashtags = []
    message = ""
    for word in text.split():
        if word[0] == "*" or word[0] == "#":
            str = word.lower()
            if str.endswith(",") or str.endswith("!") or str.endswith("."):
                str = str[:-1]
            hashtags.append(str)
        else:
            message += word + " "
    
コード例 #54
0
    return i + 1


if __name__ == "__main__":

    n_skies = file_len("../data/Test_haloCounts.csv") - 1  # Test set only, doesnt train

    position_halo = np.zeros([n_skies, 2, 3], float)  # Set up the array in which I will
    # assign my estimated positions

    nhalo = np.loadtxt("../data/Test_haloCounts.csv", usecols=(1,), delimiter=",", skiprows=1)  # Load in the number
    # of halos for each sky

    c = c.writer(open("Random_Benchmark.csv", "wb"))  # Now write the array to a csv file
    c.writerow(
        [str("SkyId"), str("pred_x1"), str("pred_y1"), str("pred_x2"), str("pred_y2"), str("pred_x3"), str(" pred_y3")]
    )
    for k in xrange(n_skies):

        for n in xrange(int(nhalo[k])):
            position_halo[k, 0, n] = rd.random() * 4200.0
            position_halo[k, 1, n] = rd.random() * 4200.0

        halostr = ["Sky" + str(k + 1)]  # Create a string that will write to the file
        # and give the first element the sky_id
        for n in xrange(3):
            halostr.append(position_halo[k, 0, n])  # Assign each of the
            # halo x and y positions to the string
            halostr.append(position_halo[k, 1, n])
        c.writerow(halostr)  # Write the string to a csv
        # file with the sky_id and the estimated positions
コード例 #55
0
ファイル: sensor_logging.py プロジェクト: gregjan/weather-pi
      #Wind Dir:
      # These need to be mapped to a direction. Right now record raw, we'll convert later or in the graphs.
      winddir = readadc(winddir_adc, SPICLK, SPIMOSI, SPIMISO, SPICS)
      winddir = direction(winddir)

      #Wind Speed
      #interupt: 1 = 180deg, 2 int = 1 full rotation.
      #Like Rain, this is just recording "hits",
      windspeed = windspeed_count
      windspeed_count = 0;

      #Record to CSV
      #todo provide current time as first element in data
      timenow = 0
      data = [ timenow, bmpaltitude, bmptempF, bmppressure, bmpsealevelpressure, dhthumidity, dewpoint, heatindex, winddir, windspeed, rain ]
      print "Data: ",
      print (data)
      csv.writerow(data)

      #Sleep
      time.sleep(10) #set to whatever

  except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
    print "\nKilling Thread..."
    runner = False
  print "Almost done."
  fp.close()
  GPIO.cleanup()
  print "Done.\nExiting."
  exit();
コード例 #56
0
ファイル: bingoloader.py プロジェクト: kbuzsaki/bingoloader
 def writeToCsv(self, csv):
     csv.writerow(["race id: ", self.raceid, self.raceUrl])
     csv.writerow(["bingo seed: ", self.board.seed, self.bingoUrl])
     csv.writerow(["bingo version: ", self.board.version])
     csv.writerow(["date: ", self.date])
     csv.writerow(["goal: ", self.goal])
     csv.writerow([])
     csv.writerow(["goals"])
     for goalsRow in self.board.goalsGrid:
         csv.writerow(goalsRow)
     csv.writerow([])
     csv.writerow(["results"])
     for rank, result in enumerate(self.results):
         csv.writerow([rank + 1] + result.getInfo())
コード例 #57
0
ファイル: sired.py プロジェクト: valem06/pyafipws
            for dif in difs:
                print dif
                
            sired.EstablecerParametro("cae", "61123022925855")
            sired.EstablecerParametro("fch_venc_cae", "20110320")
            sired.EstablecerParametro("motivo_obs", "")
            ok = sired.ActualizarFactura(id_factura)
            ok = sired.ObtenerFactura(id_factura)
            assert sired.factura["cae"] == "61123022925855"

            sys.exit(0)

        if '--leer' in sys.argv:
            claves = [clave for clave, pos, leng in VENTAS_TIPO1 if clave not in ('tipo','info_adic')]
            csv = csv.DictWriter(open("ventas.csv","wb"), claves, extrasaction='ignore')
            csv.writerow(dict([(k, k) for k in claves]))
            f = open("VENTAS.txt")
            for linea in f:
                if str(linea[0])=='1':
                    datos = leer(linea, VENTAS_TIPO1)
                    csv.writerow(datos)
            f.close()
        else:
            # cargar datos desde planillas CSV separadas o JSON:
            if entrada['encabezados'].lower().endswith("csv"):
                facturas = items = leer_planilla(entrada['encabezados'], ";")
                if 'detalles' in entrada:
                    detalles = leer_planilla(entrada['detalles'], ";")

                # pre-procesar:
                for factura in facturas:
コード例 #58
0
    print hmtl
    return parseTableDistritos(hmtl)

#number, street, city, countr
def geolocate(zona=''):
    if zona in geocache:
        gps = geocache.get(zona).split(',')
        print 'geolocate %s from cache' % zona
        return "%s,%s" % (gps[0] or '9.9280694', gps[1] or '-84.0907246')
    g = geocoder.google("%s, Costa Rica" % zona)
    geocache[zona] = "%s,%s" % (g.lat or '9.9280694', g.lng or '-84.0907246')
    return  geocache[zona]


datos = {}
datos['asalto'] = query(Key='asalto',Year=_YEAR)
datos['roboVeh']= query(Key='roboVeh',Year=_YEAR)
datos['robo']= query(Key='robo',Year=_YEAR)
datos['hurto']= query(Key='hurto',Year=_YEAR)
datos['tachaVeh']= query(Key='tachaVeh',Year=_YEAR)
datos['homicidios']= query(Key='homicidios',Year=_YEAR)



with open('exports.csv', 'wb') as csvfile:
    csv = csv.writer(csvfile, delimiter='|', quoting=csv.QUOTE_MINIMAL)
    for col1 in datos:
        for k,v in datos[col1].items():
            # tipo,zona1,cantidad,ano,gps
            csv.writerow([col1,k,v,_YEAR,geolocate(zona=k)])
コード例 #59
0
ファイル: bubble2-star2.py プロジェクト: NicoletteS/django101
        post = True
        text = ""
    if post:
        stripped_line = line.strip().replace("<li class='list-group-item'>", "").replace("</li>", "")
        text += stripped_line
    if line.find("</li>") != -1 and post:
        post = False
        texts.append(text)
#here we create a csv file with the output of sentiment analysis
import csv
import subprocess
import json

c = open("C:/Users/Nicolette/djangogirls/django101/social/static/social/data-star.csv", "wb")
csv = csv.writer(c)
csv.writerow(["Hashtag", "Positive", "Negative", "Neutral", "Day", "Month", "Year"])

#extract hashtags and texts
counter = 0
for text in texts:
    hashtags = []
    message = ""
    for word in text.split():
        if word[0] == "*":
            str = word.lower()
            if str.endswith(",") or str.endswith("!") or str.endswith("."):
                str = str[:-1]
            hashtags.append(str)
        else:
            message += word + " "