Esempio n. 1
0
def nepalonline(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("napa.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        # wr.writerow(["Input Part #","OE (Competitor Brand)","Competitor Part No.","Output – (WVE Part No.)","Part Type (Description)"])
        wr.writerow([
            "Input Part ", "Output – Part#", "Mfg and Part Type",
            "OE(Product Line)", " Online Price"
        ])

        total = len(duration)
        for i, row in enumerate(duration):
            a = webscraper_nepalonline(row)
            print(a)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break
    a = File.objects.all().count() + 1
    filename = "napaonline" + str(a) + ".xlsx"
    folder = "NapaOnline"
    url = store_s3(filecsv="napa.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)
    return url
Esempio n. 2
0
def go_to_sleep(self, duration):
    progress_recorder = ProgressRecorder(self)

    for i in range(5):
        sleep(duration)
        progress_recorder.set_progress(i + 1, 5, f'On {i}')
    return 'Done'
Esempio n. 3
0
def wve(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("wve.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        # wr.writerow(["Input Part #","OE (Competitor Brand)","Competitor Part No.","Output – (WVE Part No.)","Part Type (Description)"])
        wr.writerow([
            "Input Part # (Mfg. Part Number)", "OE (Comp/OE)",
            "Output – Part#", "Part Type"
        ])

        total = len(duration)
        for i, row in enumerate(duration):
            a = scraper_WVE(row)
            print(a)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                try:
                    b.pop(2)
                except:
                    pass
                print(b)
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break
    a = File.objects.all().count() + 1
    filename = "wve" + str(a) + ".xlsx"
    folder = "WVE"
    url = store_s3(filecsv="wve.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)
    return url
Esempio n. 4
0
def scraping_routine(self, start_id, end_id):
    """Iterates through a given range of numbers and checks the recreation.gov API to see if there
    are any matching campground ID numbers. If a campground ID has already been saved, then it is
    updated. If not then a new campground object is created.
    :param start_id:    lower bound of the search range
    :type start_id:     str
    :param end_id:      high bound of the search range
    :type end_id:       str
    """
    # Instantiate progress recorder to show users
    progress_recorder = ProgressRecorder(self)
    # Check what is already saved in database
    status_quo = Campground.objects.values_list('camp_id', flat=True)
    # Prepare variables
    start_id = int(start_id)
    end_id = int(end_id) + 1
    job_count = end_id - start_id
    DATA = []
    i = 1
    for camp_id in range(start_id, end_id):
        data = scrape_camp_info(camp_id, status_quo)
        DATA.append(data)
        progress_recorder.set_progress(
            i,
            job_count,
            description=
            f"{i} out of {job_count} potential campground IDs checked")
        i += 1
    count = len(DATA)
    return {"count": count, "data": DATA}
Esempio n. 5
0
    def run(self, contact_info_id, *args, **kwargs):
        '''
        Returns task process details.
        Appends each contact row in contact table.

        Parameters:
        contact_info_id (int): contact info pk param.
        '''
        progress_recorder = ProgressRecorder(self)
        contact_info = self.get_contact_info(contact_info_id)
        try:
            dataframe = DataframeUtil.get_validated_dataframe(contact_info.document.path)
            total_record = dataframe.shape[0]
            for index, row in dataframe.iterrows():
                contact = self.insert_into_row(row)
                if contact:
                    contact_info.contacts.add(contact)
                
                # Set status of progress in backend result to notify clinet.
                progress_recorder.set_progress(
                    index + 1, total=total_record, description="Inserting row into table"
                )
                print("Inserting row %s into table" % index)

            return {
                "detail": "Successfully import user"
            }
        except Exception as e:
            contact_info.is_success = False
            contact_info.reason = str(e)
            contact_info.save()
Esempio n. 6
0
def update_progress(self, proc):
	# Create progress recorder instance
	progress_recorder = ProgressRecorder(self)

	while True:
		# Read wget process output line-by-line
		line = proc.stdout.readline()

		# If line is empty: break loop (wget process completed)
		if line == b'':
			break

		linestr = line.decode('utf-8')
		if '%' in linestr:
			# Find percentage value using regex
			percentage = re.findall('[0-9]{0,3}%', linestr)[0].replace('%','')
			# Print percentage value (celery worker output)
			print('TASK' + str(percentage))
			# Build description
			progress_description = 'Downloading (' + str(percentage) + '%)'
			# Update progress recorder
			progress_recorder.set_progress(int(percentage), 100, description=progress_description)
		else:
			# Print line
			print('TASK' + str(linestr))
			
		# Sleep for 100ms
		sleep(0.1)
Esempio n. 7
0
def northvilletask(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    total = len(duration)
    whole_data = []
    for i, row in enumerate(duration):
        # if i==0:
        #     continue
        a = scrape_northville(row)
        if a:
            whole_data = whole_data + a
        print("++++++++++++++++++++++++++++++++++++++++++++++++++")
        progress_recorder.set_progress(i + 1, total, row)
        check = Switch_Scrap.objects.all()[0]
        stop = check.stop
        if stop:
            break
    a = File.objects.all().count() + 1
    merged_df = pd.concat(whole_data)
    merged_df.to_csv('northville.csv')
    folder = "northville"
    filename = "northville{0}.csv".format(str(a))
    url = northville_s3(filecsv="northville.csv",
                        folder=folder,
                        filename=filename,
                        FileName=fileName)
    return url
Esempio n. 8
0
def import_zip_codes(self, fileid):
    log.info("ZipImporter started. Upload-PK: {}".format(fileid))
    progress_recorder = ProgressRecorder(self)
    current = 0
    csvf = Upload.objects.get(pk=fileid)
    with open(csvf.record.path, 'r', encoding='utf-8') as f:
        total = sum(1 for line in f)
        log.info("Total Rows to import: {}".format(total))
        f.seek(0)
        next(f, None)  #Skip Header
        reader = csv.DictReader(f,
                                fieldnames=('osm_id', 'ort', 'plz',
                                            'bundesland'),
                                delimiter=';')
        Zip_Code.objects.all().delete()
        for counter, row in enumerate(reader):
            content = dict(row)
            try:
                new = Zip_Code.objects.create(zip_code=content['plz'],
                                              city=content['ort'],
                                              state=content['bundesland'])
                new.save()
            except Exception as e:
                log.debug("Error occured")
                log.debug(content)
                log.debug(e)
                pass
            current = counter + 1
            progress_recorder.set_progress(current, total)
    csvf.finished = True
    csvf.save()
    log.info("Task completed")
    print("")
    print("Fertig")
    return 'Done'
Esempio n. 9
0
def enviarCodigoTask(self, IPAddress, codigo, filename, autor, kp, ki, kd, ref, rep):

    progress_recorder = ProgressRecorder(self)
    progress_recorder.set_progress(0, 3)

    esp = MicropythonESP(IPAddress, progress_recorder)
    esp.runexperiment(codigo, filename)

    dfilename = filename + ".csv"
    vfilename = filename + ".mp4"
    gfilename = filename + ".png"

    os.rename("temp/"+filename+".csv", "media/gangorra/csv/"+filename+".csv")

    dados = pd.read_csv("media/gangorra/csv/" + filename + ".csv")
    grafico = dados.plot.line()
    grafico.figure.savefig("media/gangorra/graficos/" + filename + ".png")

    user = User.objects.get(username=autor)
    r = ExperimentoGangorra(title=filename,
                            author=user,
                            modelo_kp=kp,
                            modelo_ki=ki,
                            modelo_kd=kd,
                            modelo_referencia=ref,
                            modelo_repeticoes=rep,
                            csvArquivo=dfilename,
                            videoArquivo=vfilename,
                            graficoArquivo=gfilename)
    r.publish()
Esempio n. 10
0
def summary_summarize_task(self, pk):
    ''' retrive csv file from s3.
        read into datframe.
        get summaization of document
        add summary to list
        add list to dictionary
    '''
    progress_recorder = ProgressRecorder(
        self)  # create progress recorder object
    doc = Summary_Documents.objects.get(
        pk=pk)  # get the document ref from the database
    documentName = str(doc.document)  # get the name of the doc
    aws_id = os.environ.get('AWS_ACCESS_KEY_ID')  # aws access
    aws_secret = os.environ.get('AWS_SECRET_ACCESS_KEY')  #aws access
    REGION = 'eu-west-1'
    client = boto3.client(
        's3',
        region_name=REGION,
        aws_access_key_id=aws_id,
        aws_secret_access_key=aws_secret
    )  # create the client to retrieve the file from storage
    bucket_name = "doc-sort-file-upload"
    object_key = documentName
    csv_obj = client.get_object(Bucket=bucket_name, Key=object_key)
    body = csv_obj['Body']
    csv_string = body.read().decode('utf-8')
    data = pd.read_csv(StringIO(csv_string))  # read csv into dataframe
    documents = data['content']
    docs_summarized = 0
    docs_not_summarized = 0
    total_docs = 0
    documents_len = []
    summary_len = []
    result = []  # new column to hold result integer (0,1,2)value
    count = 0
    for doc in documents:  # iterate through filtered list
        documents_len.append(len(doc))
        summary = summarize(doc, ratio=0.03)  # get summary
        result.append(summary)
        summary_len.append(len(summary))
        total_docs += 1
        if result == None:
            result.append("Document too short")
            docs_not_summarized += 1
        docs_summarized += 1
        progress_recorder.set_progress(count + 1,
                                       len(documents))  # update progress
        count += 1  # update count

    summary_dict = {}

    # Adding list as value
    summary_dict["Result"] = result
    summary_dict["Total_docs"] = total_docs
    summary_dict["Docs_summarized"] = docs_summarized
    summary_dict["Docs_not_summarized"] = docs_not_summarized
    summary_dict["Documents_len"] = documents_len
    summary_dict["Summary_len"] = summary_len

    return summary_dict
Esempio n. 11
0
def brute_buckets(self, keyword):
    progress_recorder = ProgressRecorder(self)

    total = len(buckets_bruteforce)
    k = []
    for c,i in enumerate(buckets_bruteforce):


        req = requests.get("https://" + keyword + i.rstrip() + ".s3.amazonaws.com", verify=False)


        if req.status_code == 200 or req.status_code == 403:
            am = AmazonBuckets(bucket=keyword + i.rstrip() + ".s3.amazonaws.com", confirmed=False,for_later=False)
            am.save()

        progress_recorder.set_progress(c+1, total=total)

        self.update_state(state="PROGRESS",
                          meta={"results": "https://" + keyword + i.rstrip() + ".s3.amazonaws.com",
                                "code": req.status_code, "percentage": c / total * 100})

    self.update_state(state="SUCCESS",
                      meta={"type": 'amazon', "total": total})

    raise Ignore()
Esempio n. 12
0
def sairtex(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("air.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow(["Input Part #", "Output Part#", "OE", "Part Name"])
        total = len(duration)
        for i, row in enumerate(duration):
            a = airtex(row)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break
    #
    # f = open("air.xlsx", "r", encoding='utf-8')
    # g=f.read()

    a = File.objects.all().count() + 1
    filename = "Airtex" + str(a) + ".xlsx"
    folder = "Airtex"
    url = store_s3(filecsv="air.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)

    return url
Esempio n. 13
0
def opticat(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("opticat.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow([
            "Input Part # (Mfg. Part Number)", "Output - Part Number#",
            "Manufacturer", "	OE (Item/part Description)",
            "OE Number (Item/Part Description)"
        ])
        total = len(duration)
        for i, row in enumerate(duration):
            a = scraper_opticat(row)
            print(a)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break

    #
    a = File.objects.all().count() + 1
    filename = "opticat" + str(a) + ".xlsx"
    folder = "Opticat"
    url = store_s3(filecsv="opticat.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)
    return url
Esempio n. 14
0
def my_task(self, seconds):
    progress_recorder = ProgressRecorder(self)
    for i in range(seconds):
        print('123123')
        time.sleep(1)
        progress_recorder.set_progress(i + 1, seconds)
    return 'done'
Esempio n. 15
0
def bwd(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("bwd.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow([
            "Input Part # (Mfg. Part Number)", "Output - Part Number#",
            "Part Type (Product Mfg. Name)",
            "OE Number (Item/Part Description)"
        ])
        total = len(duration)
        for i, row in enumerate(duration):
            a = scraper_BWD(row)
            print(a)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                try:
                    b.pop(3)
                except:
                    pass
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break
    a = File.objects.all().count() + 1
    filename = "bwd" + str(a) + ".xlsx"
    folder = "BWD"
    url = store_s3(filecsv="bwd.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)
    return url
Esempio n. 16
0
def autoparts(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("DENSAutoparts.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow([
            "Input Part # (Mfg. Part Number)", "Manufacturer", "Part Type",
            "DENSO Part Number"
        ])
        total = len(duration)
        for i, row in enumerate(duration):
            a = scraper_densoautoparts(row)
            print(a)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                print(row)
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break
    a = File.objects.all().count() + 1
    filename = "DENSAutoparts" + str(a) + ".xlsx"
    folder = "densoautoparts"
    url = store_s3(filecsv="DENSAutoparts.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)
    return url
Esempio n. 17
0
def webmotors(self, duration, fileName):
    progress_recorder = ProgressRecorder(self)
    with open("motor.csv", 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow([
            "Input Part # (Mfg. Part Number)", "Output - Part Number#",
            "Source", "Part Type"
        ])
        total = len(duration)
        for i, row in enumerate(duration):
            a = scraper_usmotorworks(row)
            print(a)
            progress_recorder.set_progress(i + 1, total, row)
            for b in a:
                wr.writerow(b)
            check = Switch_Scrap.objects.all()[0]
            stop = check.stop
            if stop:
                break

    # f = open("motor.xlsx", "r", encoding='utf-8')
    # g =f.read()
    #
    a = File.objects.all().count() + 1
    filename = "Usmotor" + str(a) + ".xlsx"

    folder = "Usmotor"
    url = store_s3(filecsv="motor.csv",
                   folder=folder,
                   filename=filename,
                   FileName=fileName)
    return url
def get_nasa_image(self, seconds):
    progress_recorder = ProgressRecorder(self)
    result = 0
    for i in range(seconds):
        time.sleep(4)
        result += i

        start_date = datetime.datetime.strptime('06/16/1995', '%m/%d/%Y')
        end_date = datetime.datetime.now()
        time_between_dates = end_date - start_date
        days_between_dates = time_between_dates.days
        image_not_suitable = True

        while image_not_suitable:
            random_number_of_days = random.randrange(days_between_dates)
            random_date = start_date + datetime.timedelta(
                days=random_number_of_days)
            nasa_image_result = nasa.picture_of_the_day(random_date)

            if 'url' in nasa_image_result and nasa_image_result['url'].endswith(
                ('.bmp', '.gif', '.heif', '.jpeg', '.jpg', '.png', '.svg',
                 '.webp')):
                break

        progress_recorder.set_progress(i + 1,
                                       seconds,
                                       description=nasa_image_result)
    return result
Esempio n. 19
0
def generate_playlist(self, playlist_url):
    progress_recorder = ProgressRecorder(self)
    token = get_access_token()
    sp = get_spotify_client(token)
    uid = sp.current_user()['id']
    html = fetch_url(playlist_url)
    data = AppleMusicParser(html).extract_data()
    playlist_title = data['playlist_title']
    tracks = data['tracks']
    creator = data['playlist_creator']
    n = len(tracks)
    playlist = sp.user_playlist_create(uid, playlist_title, description=f'Originally created by {creator} on Apple Music[{playlist_url}].')
    playlist_id = playlist['id']
    tracks_uris = []
    try:
        for i, track in enumerate(tracks):
            try:
                results = sp.search(f'{track.title} {track.artist} {track.featuring}', limit=1)
                track_uri = results['tracks']['items'][0]["uri"]
                tracks_uris.append(track_uri)
                progress_recorder.set_progress(i+1, n)
            except IndexError:
                continue
        #You can add a maximum of 100 tracks per request.
        if len(tracks_uris) > 100:
            for chunk in grouper(100, tracks_uris):
                sp.user_playlist_add_tracks(uid, playlist_id, chunk)
        else:
            sp.user_playlist_add_tracks(uid, playlist_id, tracks_uris)
    except SpotifyException as e:
        # Delete playlist if error occurs while adding songs
        sp.user_playlist_unfollow(uid, playlist_id)
        raise e
    url = playlist['external_urls']['spotify']
    return url
Esempio n. 20
0
def sentiment_test_task(self, seconds):
    progress_recorder = ProgressRecorder(self)
    result = 0
    for i in range(seconds):
        time.sleep(1)
        result += i
        progress_recorder.set_progress(i + 1, seconds)
    return result
Esempio n. 21
0
def my_task(self, seconds):
    progress_recorder = ProgressRecorder(self)
    result = 0
    for i in range(seconds):
        sleep(1)
        result += i
        progress_recorder.set_progress(i + 1, seconds)
    return result
Esempio n. 22
0
def http_error_task(self, number):
    progress_recorder = ProgressRecorder(self)
    for i in range(number):
        time.sleep(.1)
        progress_recorder.set_progress(i + 1, number)
        if i == int(number / 2):
            raise StopIteration('We broke it!')
    return random() * 1000
Esempio n. 23
0
def dummy_modify_video(self, vid_name):

    progress_recorder = ProgressRecorder(self)
    print("Progress recorder object :", progress_recorder)

    print("Modifying Video....\n")
    #video_name = video_name[1:] #Need to remove the first / which is prepended - ie video name is /media/1.1.1.mov - but need media/1.1.1.mov
    print("Attempting to open: " + vid_name)

    print("About to use OpenCV VideoCapture!")
    print("Vid_name data type: ", type(vid_name))
    video = cv2.VideoCapture(str(vid_name))
    print("Just used OpenCV VideoCapture!")

    if (video.isOpened() == False):
        print("Error opening video stream or file...\n")
    else:
        w = int(video.get(3))
        h = int(video.get(4))
        print(w, h)

        BASE_DIR_FOR_DELETION = os.path.dirname(os.path.dirname(__file__))
        fs = FileSystemStorage(os.path.join(BASE_DIR_FOR_DELETION, 'static'))
        print("Deleting previously edited video")
        fs.delete('edited_videoMP4STATIC.mp4')

        fourcc = cv2.VideoWriter_fourcc(*'H264')  # WORKING ON WINDOWS :D :D

        out = cv2.VideoWriter('static/edited_videoMP4STATIC.mp4', fourcc, 20.0,
                              (w, h))  # WORKING ON WINDOWS!!!

        frame_number = -1
        frame_limit = 500
        while video.isOpened() and (frame_number < frame_limit):

            frame_number += 1
            ret, frame = video.read()
            print(frame_number)

            progress_recorder.set_progress(frame_number, frame_limit)

            if ret == True:
                frame = cv2.flip(frame, 0)
                # write the flipped frame
                out.write(frame)

                #cv2.imshow('frame',frame)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
            else:
                break

        # Release everything if job is finished
        video.release()
        out.release()
        cv2.destroyAllWindows()
        print("Done")
        return None
Esempio n. 24
0
def import_workshops(self, fileid):
    log.info("Workshop Importer started.Upload-PK: {}".format(fileid))
    progress_recorder = ProgressRecorder(self)
    current = 0
    csvf = Upload.objects.get(pk=fileid)
    with open(csvf.record.path, 'r', encoding='utf-8') as f:
        total = sum(1 for line in f)
        log.info("Total Rows to import: {}".format(total))
        f.seek(0)
        next(f, None)  #Skip Header
        count_created = 0
        count_updated = 0
        count_error = 0
        reader = csv.DictReader(
            f,
            fieldnames=('kuerzel', 'name', 'street', 'zip_code', 'phone',
                        'central_email', 'contact_email', 'wp_user'),
            delimiter=';')
        Workshop.objects.all().update(deleted=True)
        for counter, row in enumerate(reader):
            content = dict(row)
            c = get_zip_obj(content['zip_code'])
            try:
                new, created = Workshop.objects.update_or_create(
                    kuerzel=content['kuerzel'],
                    defaults={
                        'name': content['name'],
                        'street': content['street'],
                        'zip_code': content['zip_code'],
                        'phone': content['phone'],
                        'central_email': content['central_email'],
                        'contact_email': content['contact_email'],
                        'wp_user': content['wp_user'],
                        'city': c
                    })
                new.save()
                if created:
                    count_created += 1
                else:
                    count_updated += 1
            except Exception as e:
                log.debug("Error occured")
                log.debug(content)
                log.error(e)
                count_error += 1
                pass
            current = counter + 1
            progress_recorder.set_progress(current, total)
    Workshop.objects.filter(deleted=True).delete()
    log.info(
        "{} Workshops created, {} Workshops updated, {} failed to update/create"
        .format(count_created, count_updated, count_error))
    csvf.finished = True
    csvf.save()
    log.info("Task completed")
    print("Fertig")
    return "{} Workshops created, {} Workshops updated, {} failed to update/create".format(
        count_created, count_updated, count_error)
Esempio n. 25
0
    def make_tifs(self, animal, channel, njobs):
        """
        This method will:
            1. Fetch the sections from the database
            2. Yank the tif out of the czi file according to the index and channel with the bioformats tool.
            3. Then updates the database with updated meta information
        Args:
            animal: the prep id of the animal
            channel: the channel of the stack to process
            njobs: number of jobs for parallel computing

        Returns:
            number of tifs created
        """
        progress_recorder = ProgressRecorder(self)
        fileLocationManager = FileLocationManager(animal)
        INPUT = fileLocationManager.czi
        OUTPUT = fileLocationManager.tif
        os.makedirs(OUTPUT, exist_ok=True)
        sections = Section.objects.filter(prep_id=animal).filter(channel=channel)\
            .values('czi_file', 'file_name', 'scene_index',  'channel').distinct()

        commands = []
        for i, section in enumerate(sections):
            input_path = os.path.join(INPUT, section['czi_file'])
            output_path = os.path.join(OUTPUT, section['file_name'])
            progress_recorder.set_progress(i,
                                           len(sections),
                                           description='Creating tifs')

            if not os.path.exists(input_path):
                continue

            if os.path.exists(output_path):
                continue

            channel_index = str(int(section['channel']) - 1)
            cmd = [
                '/usr/local/share/bftools/bfconvert', '-bigtiff', '-separate',
                '-series',
                str(section['scene_index']), '-channel', channel_index,
                '-nooverwrite', input_path, output_path
            ]

            #cmd = [section.scene_index, section.channel_index, input_path, output_path]
            #commands.extend([bfconvert.subtask(
            #    (section.scene_index, section.channel_index, input_path, output_path))
            #        for i in range(nproc)])

            #result = group(commands).apply_async()

            commands.append(cmd)

        with Pool(njobs) as p:
            p.map(workernoshell, commands)

        return len(sections)
Esempio n. 26
0
def simulation_task(self, params):
    # PATH sumocfg, Step de la simulacion
    #SUMO_HOME = "/usr/share/sumo"
    progress_recorder = ProgressRecorder(self)
    sumo_parameters = 'simulation/sumoparams.json'
    sumoParams = None
    with open(sumo_parameters) as data_file:
        sumoParams = json.load(data_file)
    SUMO_HOME = sumoParams["sumo_path"]
    try:
        #os.environ["SUMO_HOME"] = "/home/fernando/sumo-git"
        os.environ["SUMO_HOME"] = SUMO_HOME
        tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
        sys.path.append(tools)
        import sumolib
        import traci as traci
        MEDIA = settings.MEDIA_ROOT
        try:
            path = os.path.join(MEDIA + params['simulation_path'], 'output')
            os.mkdir(path)
        except OSError as e:
            if e.errno != errno.EEXIST:
                return os.path.join(MEDIA + params['simulation_path'],
                                    'output')
            return os.path.join(MEDIA + params['simulation_path'], 'output')
        PATH = MEDIA + params['simulation_whole_path']
        # Definiendo las salidas de la simulacion
        TRACE_OUT = MEDIA + params[
            'simulation_path'] + "output/resclima_trace_output.xml"
        EMISSION_OUT = MEDIA + params[
            'simulation_path'] + "output/resclima_emission_output.xml"
        SUMMARY_OUT = MEDIA + params[
            'simulation_path'] + "output/resclima_summary_output.xml"
        # Definiendo la ruta del simulador y realizar la simulacion
        #sumoBinary = "/home/fernando/sumo-git/bin/sumo"
        sumoBinary = sumoParams["sumoBinary"]
        sumoCmd = [
            sumoBinary, "-c", PATH, "--fcd-output", TRACE_OUT,
            "--emission-output", EMISSION_OUT, "--summary", SUMMARY_OUT
        ]
        traci.start(sumoCmd, port=8888)
        print("Realizando la simulacion...")
        step = 0
        while step < params['simulation_step']:
            traci.simulationStep()
            # Your Simulation Script here
            print("Step:" + str(step))
            step += 1
            time.sleep(1)
            progress_recorder.set_progress(step, params['simulation_step'])
        traci.close()
        print("¡La simulacion ha terminado con exito!")
    except ImportError as e:
        traceback.print_exc()
        return e + params['simulation_path']

    return '¡La simulacion ha terminado con exito!'
Esempio n. 27
0
def update_progress(self, dayCount, instensityCount):
    # Create progress recorder instance
    progress_recorder = ProgressRecorder(self)

    if dayCount < instensityCount:
        progress_recorder.set_progress(int(dayCount), days)
        sleep(3)
    else:
        print('all done')
Esempio n. 28
0
def socialpath_main(self,name):

    progress_recorder = ProgressRecorder(self)
    total = len(functions_dict)
    result = 0
    for c,i in enumerate(functions_dict):
        functions_dict[i](name)
        result += c
        print(c)
        progress_recorder.set_progress(c + 1, total=total)
Esempio n. 29
0
def import_recipes(self, recipes, user):
    progress_recorder = ProgressRecorder(self)
    uploaded = 0
    number_of_recipes = len(recipes)
    for recipe in recipes:
        import_recipe(recipe, user)
        uploaded += 1
        progress_recorder.set_progress(uploaded, number_of_recipes,
                                       f"Uploaded {recipe['name']}")
    return uploaded
Esempio n. 30
0
def import_batches(self, batches, user):
    progress_recorder = ProgressRecorder(self)
    uploaded = 0
    number_of_batches = len(batches)
    for batch in batches:
        import_batch(batch, user)
        uploaded += 1
        progress_recorder.set_progress(uploaded, number_of_batches,
                                       f"Uploaded {batch['name']}")
    return uploaded