Exemplo n.º 1
0
    def run(self, typeCode, endTime):

        plInstance, histData = plu.Pipeline(histInterval), None
        endTimeUNIX = utl.dateToUNIX(endTime)
        startDate = utl.getCurrentDateStr()
        priorDate = utl.datetimeDiff(startDate, 30)
        marketData = (self.ticker, self.tradeQuantity)
        systemData = (endTimeUNIX, histLag, systemLag, plInstance)

        if (self.ticker in cst.GDAX_TICKERS):
            gdaxTicker = cst.GDAX_TO_POLONIEX[self.ticker]
            histData = plInstance.getCryptoHistoricalData(
                gdaxTicker, priorDate, startDate)
        else:
            raise ValueError(
                'Bad ticker! Supported tickers are BTC, LTC, ETH.')

        self.generateTechIndObjects(histData)
        sysTuple = (marketData, systemData)

        if (typeCode == "BT"):
            from Pipeline import indsToDF
            techDF = indsToDF(self.techInds)
            positionData = ()
            return self.loopBacktestLogic(positionData, histData, techDF)

        if (typeCode == "PT"):
            self.loopPaperTradeLogic(*sysTuple, histData)
            return self.endPaperTrading(endCode, sysTuple)
Exemplo n.º 2
0
def main():
    try:
        debug=ast.literal_eval(sys.argv[1])
    except IndexError:
        debug=True

    if (debug):
        print ("***************************************\n"
               "\t\t\t DEBUG \n"
               "***************************************\n")

    interaction_file = str(Path("Papers/1-s2.0-S009286741300439X-mmc1.txt"))
    log_dir = "Datafiles_Prepare/Logs/"
    tmp_dir = utils.make_tmp_dir("Datafiles_Prepare/tmp_dir", parents=True)


    organisms = ["Human"]
    for organism in organisms:
        JsonLog.set_filename(
            utils.filename_date_append(Path(log_dir) / Path("Mapping_the_Human_miRNA_" + organism + ".json")))
        JsonLog.add_to_json('file name', interaction_file)
        JsonLog.add_to_json('paper',
                            "Mapping the Human miRNA Interactome by CLASH Reveals Frequent Noncanonical Binding")
        JsonLog.add_to_json('Organism', organism)
        JsonLog.add_to_json('paper_url', "https://www.sciencedirect.com/science/article/pii/S009286741300439X")
        p = Pipeline(paper_name="Mapping_the_Human_miRNA",
                     organism=organism,
                     in_df=df_prepare(read_paper_data(interaction_file, debug)),
                     tmp_dir=tmp_dir)

        p.run()
Exemplo n.º 3
0
    def __init__(self, audiofile, strings=None, filename=None):
        self.filename = filename
        self.audiofile = audiofile
        self.touched = True

        if not strings: strings = [-5, -10, -14, -19, -24, -29]

        self.appsinkpipeline = Pipeline.AppSinkPipeline(self.audiofile)
        self.pipeline = Pipeline.Pipeline(self.audiofile)
        self.timeline = Timeline.Timeline(self, strings)
        self.timeline.show_all()
        self.control = VisualizerControl(self.pipeline)
Exemplo n.º 4
0
def main():
    try:
        debug = ast.literal_eval(sys.argv[1])
    except IndexError:
        debug = True

    if (debug):
        print("***************************************\n"
              "\t\t\t DEBUG \n"
              "***************************************\n")

    mouse_config = {
        "organism": "Mouse",
        "interaction_file": "Papers/ncomms9864-s2.xlsx"
    }
    human_config = {
        "organism": "Human",
        "interaction_file": "Papers/ncomms9864-s4.xlsx"
    }

    tmp_dir = utils.make_tmp_dir("Datafiles_Prepare/tmp_dir", parents=True)
    log_dir = "Datafiles_Prepare/Logs/"

    for cnfg in [mouse_config, human_config]:
        organism = cnfg["organism"]
        interaction_file = cnfg["interaction_file"]

        JsonLog.set_filename(
            utils.filename_date_append(
                Path(log_dir) /
                Path("Darnell_miRNA_target_chimeras_" + organism + ".json")))
        JsonLog.add_to_json('file name', interaction_file)
        JsonLog.add_to_json(
            'paper',
            "miRNA–target chimeras reveal miRNA 3-end pairing as a major determinant of Argonaute target specificity"
        )
        JsonLog.add_to_json('Organism', organism)
        JsonLog.add_to_json('paper_url',
                            "https://www.nature.com/articles/ncomms9864")

        org = Darnell_miRNA_target_chimeras(interaction_file,
                                            tmp_dir,
                                            organism,
                                            debug=debug)
        org.run()

        print("Pipeline start")
        p = Pipeline(paper_name="Darnell_miRNA_target_chimeras",
                     organism=organism,
                     in_df=org.prepare_for_pipeline(),
                     tmp_dir=tmp_dir)

        p.run()
Exemplo n.º 5
0
class Dumper(Pipeline({"pipeline"})):
    def dump(self, fileName: str):
        """Dumps captured packets into separate files"""
        dirName = os.path.splitext(fileName)[0]
        if not os.path.isdir(dirName):
            if os.path.exists(dirName):
                raise Exception(
                    "The file with name " + groups.Back.lightcyanEx(dirName) +
                    " is already exists, so we cannot create a dir with that name"
                )
            else:
                os.mkdir(dirName)
        for packet in self(fileName):
            cleanName = packet.name.plain()
            ofn = os.path.join(dirName, cleanName)
            print(
                groups.Fore.lightgreenEx(
                    "writing " + groups.Back.lightyellowEx(packet.name) +
                    "..."))
            with open(ofn, "wb") as of:
                of.write(packet.body)

    def pipeline(self, stage, packet):
        packet = stage.transformPacket(self, packet)
        stage.generateName(self, packet)
        if stage.isUseful(self, packet):
            #stage.generateName(self, packet)
            return packet
        else:
            raise PacketUselessException(
                "Packet " + packet.name + " is useless", packet)

    def __call__(self, fileName):
        packets = self.stages[0].getPackets(fileName)
        for packet in packets:
            try:
                (packet, stage) = self.pipeline(packet)
                yield packet
            except PipelineInterruptedException as ex:
                (args, kwargs, stage, ex) = ex.args
                (packet) = args
                humanReadableStageName = stage.__name__[:-len("Dumping")]

                if isinstance(ex, PacketUselessException):
                    print(
                        groups.Fore.yellow(
                            groups.Fore.cyan(humanReadableStageName) + ": " +
                            groups.Fore.blue(ex.args[1].index) +
                            " considered useless"))
                else:
                    raise ex
def main(start_fold, gpu):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    sess = tf.Session(config=config)
    set_session(
        sess)  # set this TensorFlow session as the default session for Keras

    GetData = DataGenerator(dataset_mode='lr')
    CV = Pipeline(GetData,
                  DL_model,
                  start_fold,
                  gpu,
                  model_name=MODEL_PATH + 'LSTM_model_lr')
    score = CV.train()
    log.info(f'Model accuracy = {score}')
Exemplo n.º 7
0
def main():
    try:
        debug = ast.literal_eval(sys.argv[1])
    except IndexError:
        debug = True

    if (debug):
        print("***************************************\n"
              "\t\t\t DEBUG \n"
              "***************************************\n")

    interaction_file = str(Path("Papers/1-s2.0-S1097276516305214-mmc3.xlsx"))
    log_dir = "Datafiles_Prepare/Logs/"
    tmp_dir = utils.make_tmp_dir("Datafiles_Prepare/tmp_dir", parents=True)

    organisms = ["Celegans"]
    for organism in organisms:
        JsonLog.set_filename(
            utils.filename_date_append(
                Path(log_dir) /
                Path("Pairing_Beyond_Seed_" + organism + ".json")))
        JsonLog.add_to_json('file name', interaction_file)
        JsonLog.add_to_json(
            'paper',
            "Pairing beyond the Seed Supports MicroRNA Targeting Specificity")
        JsonLog.add_to_json('Organism', organism)
        JsonLog.add_to_json(
            'paper_url',
            "https://www.sciencedirect.com/science/article/pii/S1097276516305214#mmc3"
        )

        ce = Pairing_Beyond_Seed(input_file=interaction_file,
                                 organism=organism,
                                 tmp_dir=tmp_dir,
                                 debug=debug)
        ce.run()

        p = Pipeline(paper_name="Pairing_Beyond_Seed",
                     organism=organism,
                     in_df=ce.prepare_for_pipeline(),
                     tmp_dir=tmp_dir)
        p.run()
Exemplo n.º 8
0
def main():
    try:
        debug = ast.literal_eval(sys.argv[1])
    except IndexError:
        debug = True

    if (debug):
        print("***************************************\n"
              "\t\t\t DEBUG \n"
              "***************************************\n")

    interaction_file = str(Path("Papers/41598_2017_7880_MOESM4_ESM.csv"))
    log_dir = "Datafiles_Prepare/Logs/"
    tmp_dir = utils.make_tmp_dir("Datafiles_Prepare/tmp_dir", parents=True)

    organisms = ["Cow"]
    for organism in organisms:
        JsonLog.set_filename(
            utils.filename_date_append(
                Path(log_dir) /
                Path("Global_Mapping_Cattle_" + organism + ".json")))
        JsonLog.add_to_json('file name', interaction_file)
        JsonLog.add_to_json(
            'paper',
            "Global mapping of miRNA-target interactions in cattle (Bos taurus)"
        )
        JsonLog.add_to_json('Organism', organism)
        JsonLog.add_to_json(
            'paper_url',
            "https://www.nature.com/articles/s41598-017-07880-8#MOESM1")

        cow = Global_Mapping_Cattle(input_file=interaction_file,
                                    tmp_dir=tmp_dir,
                                    debug=debug)

        cow.run()

        p = Pipeline(paper_name="Global_Mapping_Cattle",
                     organism=organism,
                     in_df=cow.prepare_for_pipeline(),
                     tmp_dir=tmp_dir)
        p.run()
Exemplo n.º 9
0
def main(start_fold, gpu, batch, add_trend, freq_enc):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # dynamically grow the memory used on the GPU
    sess = tf.Session(config=config)
    set_session(
        sess)  # set this TensorFlow session as the default session for Keras
    if add_trend:
        log.info('Will add trend to XEEK Train data')
    GetData = DataGenerator(add_trend=add_trend, dataset_mode='ud')
    CV = Pipeline(GetData,
                  DL_model,
                  start_fold,
                  gpu,
                  batch,
                  model_name=MODEL_PATH + 'LSTM_model_ud')
    score = CV.train(freq_encoder=freq_enc)
    log.info(f'Model accuracy = {score}')
Exemplo n.º 10
0
def main():
    try:
        debug = ast.literal_eval(sys.argv[1])
    except IndexError:
        debug = True

    if (debug):
        print("***************************************\n"
              "\t\t\t DEBUG \n"
              "***************************************\n")

    interaction_file = str(Path("Papers/1-s2.0-S1097276514003566-mmc3.xls"))
    log_dir = "Datafiles_Prepare/Logs/"
    tmp_dir = utils.make_tmp_dir("Datafiles_Prepare/tmp_dir", parents=True)

    organisms = ["Celegans", "Human", "Mouse"]
    for organism in organisms:
        JsonLog.set_filename(
            utils.filename_date_append(
                Path(log_dir) /
                Path("Unambiguous_Identification_" + organism + ".json")))
        JsonLog.add_to_json('file name', interaction_file)
        JsonLog.add_to_json(
            'paper',
            "Unambiguous Identification of miRNA:Target Site Interactions by Different Types of Ligation Reactions"
        )
        JsonLog.add_to_json('Organism', organism)
        JsonLog.add_to_json(
            'paper_url',
            "https://www.sciencedirect.com/science/article/pii/S1097276514003566#app3"
        )
        p = Pipeline(paper_name="Unambiguous_Identification",
                     organism=organism,
                     in_df=df_prepare(
                         read_paper_data(interaction_file, organism, debug)),
                     tmp_dir=tmp_dir)

        p.run()
Exemplo n.º 11
0
import Pipeline
from moviepy.editor import VideoFileClip

pipeline_class = Pipeline.Pipeline()
pipeline_class.get_pickle_data()

output = 'project_video_out.mp4'
clip = VideoFileClip("project_video.mp4")
out_clip = clip.fl_image(pipeline_class.pipeline)  # NOTE: this function expects color images!!
out_clip.write_videofile(output, audio=False)
Exemplo n.º 12
0
if __name__ == "__main__":
	print("In main.py")

	parser = argparse.ArgumentParser(description = "Available options")
	parser.add_argument("-d", "--data_path", type = str, metavar = "", required = True, help = "Input dataset folder path")
	parser.add_argument("-s", "--show_clouds", type = bool, default = False, metavar = "", help = "Show result point clouds")
	parser.add_argument("-n", "--no_save_clouds", type = bool, default = False, metavar = "", help = "Don't save cloud outputs")
	parser.add_argument("-f", "--file_name", metavar = "", required = True, help = "Name of output filename (without .pcd)")
	
	args = parser.parse_args()

	folder_path = os.path.abspath(args.data_path)
	show_clouds = args.show_clouds
	save_clouds = not args.no_save_clouds
	file_name = args.file_name

	############
	# Pipeline #
	############ 

	# Create instance of pipeline
	pipeline = Pipeline.Pipeline(folder_path, file_name)
	print(pipeline.folder_path)
	# and run
	pipeline.run(save_clouds, show_clouds)
	



	
Exemplo n.º 13
0
    for opt, arg in opts:
        if opt == '-h':
            print(
                'python test.py -c <config> -t <type> -p <project name> -o <output dir>'
            )
            sys.exit()
        elif opt in ("-c", "--config"):
            command['config'] = arg
        elif opt in ("-t", "--type"):
            command['type'] = arg
        elif opt in ("-p", "--project"):
            command['project_name'] = arg
        elif opt in ("-o", "--out"):
            command['project_path'] = arg
    try:
        if command['project_path'].endswith('/'):
            os.mkdir(command['project_path'] + command['project_name'])
            command['project_path'] = command['project_path'].rstrip('/')
        else:
            os.mkdir(command['project_path'] + '/' + command['project_name'])
    except Exception as e:
        print(e)
        # sys.exit(2)
    command['project_id'] = str(uuid.uuid4())
    return command


project = get_argv(sys.argv[1:])
pipeline = Pipeline(project)
# pipeline.start(project['type'])
Exemplo n.º 14
0
 def __init__(self, deepLib):
     self.deepLib = deepLib
     self.platform = deepLib.platform
     self.pipeline = Pipeline(GstElementFactory.pipeline())
Exemplo n.º 15
0
    def run(self):

        self.queue_complete = False
        while 1:
            # Blocca finché c'è qualcosa da processare nella coda
            request = self.request_queue.get()
            # Se l'elemento della coda è None esce
            if request is None:
                break
            # Estrae dall'elemento della coda da processare
            n, sel = request

            # Avanza la progressbar
            self.work_complete = False
            self.amount_completed = 0
            self.idevent = gobject.timeout_add(200,
                                               self.mainapp.on_ProgressBar)

            # Seleziona il file da processare
            af = sel[0]
            it = sel[1]
            try:
                self.mainapp.FileTable.tvSelection.select_iter(it)
            except:
                pass

            # Prepara il messaggio per la status bar
            self.n = n
            if af.get_tag("title") != "Unknown title" and af.get_tag(
                    "artist") != "Unknown artist":
                self.msg = af.get_tag("artist") + " - " + af.get_tag("title")
            else:
                self.msg = af.get_filename()

            # Percorso di ingresso
            if af.get_uri()[:7] == "file://":
                input_path = af.get_uri()[7:]
            elif af.get_uri()[:7] == "cdda://":
                input_path = af.get_uri()[7:]
            else:
                input_path = af.get_uri()

            # Se usa gstreamer per la conversione
            if not bool(int(self.prefs.get_option("use-external-encoder"))):
                # Estrae le opzioni per la conversione
                print self.Options(af, self.prefs)
                format, mode, qual, bitrate, save_path, output_file_name, tagsv1, tagsv2 = self.Options(
                    af, self.prefs)
                # Pipeline
                converter_pipe = Pipeline(input_path, format, mode, qual,
                                          bitrate,
                                          save_path + "/" + output_file_name)

                # Rimane nel ciclo finché la pipeline non è finita
                while 1:
                    state, pending, timeout = converter_pipe.pipe.get_state()
                    if pending == gst.STATE_NULL:
                        print "Finito:", input_path
                        self.work_complete = True
                        gobject.source_remove(self.idevent)
                        break
                    else:
                        position = converter_pipe.pipe.query_position(
                            gst.FORMAT_TIME, None)[0]
                        perc = float(position) / converter_pipe.duration
                        if perc > 1:
                            perc = 0
                        time.sleep(0.1)
                        self.amount_completed = perc

                # Scrive i tags
                af_output = AudioFile("file://" + save_path + "/" +
                                      output_file_name + "." + format)
                if tagsv1:
                    af_output.set_tags_as_dict(af.get_tags_as_dict())
                    af_output.set_tag("comment", "X Audio Copy")
                    af_output.write_metadata()
                else:
                    af_output.remove_metadata()
                if tagsv2:
                    af_output.write_ID3v2()

                if bool(int(self.prefs.get_option("playlist"))):
                    if "/CD" in save_path:
                        self.savepath = save_path[:save_path.index("/CD")]
                    else:
                        self.savepath = save_path
                    self.playlistname = af_output.get_tag(
                        "artist") + " - " + af_output.get_tag("album")
                    self.listsongs.append("#EXTINF:" +
                                          str(int(af_output.get_duration())) +
                                          "," + af_output.get_tag("artist") +
                                          " - " + af_output.get_tag("title") +
                                          "\n")
                    self.listsongs.append(save_path[save_path.index("/CD") +
                                                    1:] + "/" +
                                          af_output.get_filename() + "\n")
                self.work_complete = True

            # Se usa un encoder esterno. Prima decodifica il file.
            elif bool(int(self.prefs.get_option(
                    "use-external-encoder"))) and self.Options(af, self.prefs):

                # Estrae le opzioni per la conversione
                opt_string, input_path, output_path, tagsv1, tagsv2 = self.Options(
                    af, self.prefs)

                if af.get_type() == "audio/x-wav":
                    opt_string = opt_string.replace('"temporarypath"',
                                                    '"' + input_path + '"')
                    perc = 0.0
                else:
                    # Directory temporanea
                    tempdir = tempfile.mkdtemp()
                    #Pipeline per decodificare in wav
                    converter_pipe = Pipeline(input_path, "wav", None, None,
                                              None, tempdir + "/temp_file")

                    # Rimane nel ciclo finché la pipeline non è finita
                    while 1:
                        state, pending, timeout = converter_pipe.pipe.get_state(
                        )
                        if pending == gst.STATE_NULL:
                            print "Decodifica finita"
                            break
                        else:
                            position = converter_pipe.pipe.query_position(
                                gst.FORMAT_TIME, None)[0]
                            perc = float(
                                position) / converter_pipe.duration / 50
                            if perc > 1:
                                perc = 0
                            time.sleep(0.1)
                            self.amount_completed = perc
                    # Passa il file decodificato all'encoder esterno
                    opt_string = opt_string.replace(
                        '"temporarypath"', '"' + tempdir + '/temp_file.wav"')

                init_encoder_time = time.time()
                encoder_args = shlex.split(opt_string)
                encoder = Encoder(encoder_args)
                # Rimane nel ciclo finché il sub-processo non è finito
                while 1:
                    if encoder.process.poll() == 0:
                        print "Finito:", input_path
                        self.work_complete = True
                        gobject.source_remove(self.idevent)
                        break
                    else:
                        if (time.time() - init_encoder_time > 1):
                            if perc < 1:
                                tag = TagFinder("file://" + output_path)
                                encoding_done = tag.get_duration()
                                perc = float(
                                    encoding_done) / af.get_duration() / 2.1
                                print "PERCENTUALE: ", perc
                                if perc < 1:
                                    self.amount_completed = perc
                                else:
                                    self.amount_completed = 1
                            else:
                                self.amount_completed = 1
                        time.sleep(2)
                try:
                    walk = os.walk(tempdir)
                    for dirpath, subdir, filenames in walk:
                        for f in filenames:
                            os.remove(os.path.join(dirpath, f))
                    os.rmdir(tempdir)
                except:
                    pass

                # Scrive la playlist
                if bool(int(self.prefs.get_option("playlist"))):
                    if "/CD" in output_path:
                        self.savepath = os.path.split(output_path)[
                            0][:os.path.split(output_path)[0].index("/CD")]
                    else:
                        self.savepath = os.path.split(output_path)[0]
                    self.playlistname = af.get_tag(
                        "artist") + " - " + af.get_tag("album")
                    self.listsongs.append("#EXTINF:" +
                                          str(int(af.get_duration())) + "," +
                                          af.get_tag("artist") + " - " +
                                          af.get_tag("title") + "\n")
                    self.listsongs.append(output_path[len(self.savepath +
                                                          "/"):] + "\n")

                self.work_complete = True
            else:
                gobject.source_remove(self.idevent)
                self.msg = "nothing. No external encoder. Please choise a valid encoder"

            # Cancella i file originali se previsto
            if bool(int(self.prefs.get_option("delete-original-files"))):
                if af.get_uri()[:7] == "file://":
                    os.remove(af.get_filepath())

        self.queue_complete = True
        self.mainapp.on_ProgressBar()
Exemplo n.º 16
0
    def func_compress(self,
                      str_file_path,
                      str_output_directory,
                      str_compression_type=STR_COMPRESSION_GZ,
                      str_compression_mode=STR_COMPRESSION_ARCHIVE,
                      f_test=False):
        """
        Convenience function which compresses a file path
        (file or directory; gz and bz2 directory are first tarred).

        * str_file_path : Path of file or folder to compress.
                        : String
        * str_output_directory : Path of the projct's output directory.
                                 Files must be in the output directory
                                 in order to be compressed.
                               : String Absolute Path
        * str_compression_type : String indicator of the compression to use.
                               : A value from Compression.COMPRESION_CHOICES
        * str_compression_mode : String indicator on how compression should occur.
                                 Compression.STR_COMPRESSION_ARCHIVE archives a directory as one compressed unit.
                                 Compression.STR_COMPRESSION_FIRST_LEVEL_ONLY archives each item in the directory's root level.
                                 File are not affected by str_compression_mode and are simply compressed.
                               : Compression.STR_COMPRESSION_ARCHIVE and Compression.STR_COMPRESSION_FIRST_LEVEL_ONLY
        * f_test : Indicates to run in test mode (True) or not
                 : Boolean
        * return : Returns a None on failure or the path of the compressed file or directory
                 : Boolean or String
        """

        # Pipeline function used to check if a file is valid to be removed (after compression)
        cur_pipeline = Pipeline.Pipeline()

        # Make sure the choice was a compression that is handled
        if str_compression_type not in LSTR_COMPRESSION_CHOICES:
            self.logr_logger.error(
                "func_compress: Please indicate a compression type from the following choices: "
                + ",".join(LSTR_COMPRESSION_CHOICES) + ".")
            return None

        # make sure the mode choices are valid
        if str_compression_mode not in LSTR_COMPRESSION_MODE_CHOICES:
            self.logr_logger.error(
                "func_compress: Please indicate a compression type from the following choices: "
                + ",".join(LSTR_COMPRESSION_MODE_CHOICES) + ".")
            return None

        # Check if it exists
        if not str_file_path or not os.path.exists(str_file_path):
            self.logr_logger.error(
                "func_compress: The following path to compress does not exist: "
                + "None" if str_file_path is None else str_file_path)
            return None

        # Check if it is already compressed
        if self.func_is_compressed(str_file_path):
            self.logr_logger.info(
                "func_compress: The following path was already compressed: " +
                str_file_path)
            return str_file_path

        # Zip archiving is a different pattern so handled separately
        if str_compression_type == STR_COMPRESSION_ZIP:
            i_zip_mode = None
            try:
                import zlib
                i_zip_mode = zipfile.ZIP_DEFLATED
            except:
                i_zip_mode = zipfile.ZIP_STORED
                self.logr_logger.error(
                    "func_compress: Zlib python library is needed to make ZIp archives, "
                    +
                    "please make sure it is installed or select another compression type."
                )
                return None

            str_compressed_archive = str_file_path
            if str_compression_mode == STR_COMPRESSION_ARCHIVE:
                str_compressed_archive = str_compressed_archive + "." + STR_COMPRESSION_ZIP

            self.logr_logger.info(
                "func_compress: Will attempt to compress the following path with ZIP compression:"
                + str_compressed_archive)
            if not f_test:
                # Make the list of folders and files to compress
                # Archive mode
                lstr_paths = [str_file_path]
                if os.path.isdir(str_file_path) and (
                        str_compression_mode
                        == STR_COMPRESSION_FIRST_LEVEL_ONLY):
                    lstr_paths = [
                        os.path.join(str_file_path, str_path)
                        for str_path in os.listdir(str_file_path)
                    ]

                for str_path_to_compress in lstr_paths:
                    # The name of the new compressed zip archive
                    str_compressed_dir_name = str_path_to_compress + "." + STR_COMPRESSION_ZIP
                    hndl_compressed_dir = zipfile.ZipFile(
                        str_compressed_dir_name,
                        mode="w",
                        allowZip64=True,
                        compression=i_zip_mode)

                    if os.path.isdir(str_path_to_compress):
                        for str_root, _, lstr_files in os.walk(
                                str_path_to_compress):
                            for str_file in lstr_files:
                                hndl_compressed_dir.write(
                                    filename=os.path.join(str_root, str_file),
                                    arcname=os.path.basename(str_file))
                    else:
                        hndl_compressed_dir.write(
                            filename=str_path_to_compress,
                            arcname=os.path.basename(str_path_to_compress))
                    hndl_compressed_dir.close()

                # At successful archive creation
                # Check that the files are valid before trying to delete them
                f_remove_files = True
                for str_path_to_compress in lstr_paths:
                    if not cur_pipeline.func_is_valid_path_for_removal(
                            str_path=str_path_to_compress,
                            str_output_directory=str_output_directory):
                        self.logr_logger.info(
                            "func_compress: Could not remove originals after compression. Problematic path = "
                            + str_path_to_compress)
                        f_remove_files = False
                # Now remove originals
                if f_remove_files:
                    for str_path_to_compress in lstr_paths:
                        if os.path.isfile(str_path_to_compress):
                            os.remove(str_path_to_compress)
                        else:
                            shutil.rmtree(str_path_to_compress)
                else:
                    return None

            return str_compressed_archive

        # Handle gz and bz2
        # Check if it is a file or folder
        if os.path.isdir(str_file_path):
            # tar.gz and tar.bz2
            str_compressed_dir_name = str_file_path + ".tar." + str_compression_type
            str_compression_open_mode = "w:bz2" if str_compression_type == STR_COMPRESSION_BZ2 else "w:gz"

            if str_compression_mode == STR_COMPRESSION_FIRST_LEVEL_ONLY:
                self.logr_logger.info(
                    "func_compress: Will attempt to compress the contents of the following directory with "
                    + str_compression_type + " compression: " + str_file_path)
                if f_test:
                    return str_file_path
            elif str_compression_mode == STR_COMPRESSION_ARCHIVE:
                self.logr_logger.info(
                    "func_compress: Will attempt to compress the following directory with "
                    + str_compression_type + " compression: " +
                    str_compressed_dir_name)
                if f_test:
                    return str_compressed_dir_name
            else:
                self.logr_logger.info(
                    "func_compress: Unknown compression mode: " +
                    str_compression_mode)
                if f_test:
                    return None

            hndl_compressed_dir = None
            # Files to compress
            lstr_compress_files = [str_file_path]
            if str_compression_mode == STR_COMPRESSION_FIRST_LEVEL_ONLY:
                lstr_compress_files = [
                    os.path.join(str_file_path, str_file_to_compress)
                    for str_file_to_compress in os.listdir(str_file_path)
                ]
                str_compressed_dir_name = str_file_path

            for str_cur_compress_file in lstr_compress_files:
                hndl_compressed_dir = tarfile.open(
                    str_cur_compress_file + ".tar." + str_compression_type,
                    str_compression_open_mode)
                hndl_compressed_dir.add(
                    str_cur_compress_file,
                    arcname=os.path.basename(str_cur_compress_file))
                hndl_compressed_dir.close()

            # At successful archive creation
            # Check that the files are valid before trying to delete them
            f_remove_files = True
            for str_cur_compress_file in lstr_compress_files:
                if not cur_pipeline.func_is_valid_path_for_removal(
                        str_path=str_cur_compress_file,
                        str_output_directory=str_output_directory):
                    self.logr_logger.info(
                        "func_compress: Could not remove originals after compression. Problematic path = "
                        + str_path_to_compress)
                    f_remove_files = False
            if f_remove_files:
                for str_cur_compress_file in lstr_compress_files:
                    if os.path.isfile(str_cur_compress_file):
                        if not f_test:
                            os.remove(str_cur_compress_file)
                    else:
                        if not f_test:
                            shutil.rmtree(str_cur_compress_file,
                                          ignore_errors=True)
            else:
                return None
            return str_compressed_dir_name
        else:
            # Compress files not directories
            # Gz file
            str_compressed_file_name = str_file_path + "." + str_compression_type
            self.logr_logger.info(
                "func_compress: Will attempt to compress the following file with "
                + str_compression_type + " compression.")
            if f_test:
                return str_compressed_file_name
            with open(str_file_path, "rb") as hndl_file_in:
                if str_compression_type == STR_COMPRESSION_GZ:
                    with gzip.open(str_compressed_file_name, "wb") as hndl_gz:
                        hndl_gz.writelines(hndl_file_in)
                elif str_compression_type == STR_COMPRESSION_BZ2:
                    with bz2.BZ2File(str_compressed_file_name,
                                     mode="w") as hndl_bz:
                        hndl_bz.writelines(hndl_file_in)
            if not cur_pipeline.func_is_valid_path_for_removal(
                    str_path=str_file_path,
                    str_output_directory=str_output_directory):
                self.logr_logger.info(
                    "func_compress: Could not remove originals after compression (bz2/gz). Problematic path = "
                    + str_file_path)
                return None
            else:
                if not f_test:
                    os.remove(str_file_path)
            return str_compressed_file_name
        return None
Exemplo n.º 17
0
    screen.blit(ft1_surf,
                [screen.get_width() / 2 - ft1_surf.get_width() / 2, 100])
    screen.blit(ft2_surf,
                [screen.get_width() / 2 - ft2_surf.get_width() / 2, 200])
    pygame.display.flip()


if __name__ == '__main__':
    pygame.init()
    pygame.font.init()
    font = pygame.font.SysFont(None, 50)
    size = width, height = 401, 650
    screen = pygame.display.set_mode(size)
    pygame.display.set_caption("Flappy Bird")
    clock = pygame.time.Clock()
    Pipeline = Pipeline.Pipeline()
    Bird = Bird.Bird()

    while True:
        clock.tick(60)
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()
            if (event.type == pygame.KEYDOWN
                    or event.type == pygame.MOUSEBUTTONDOWN) and not Bird.dead:
                Bird.jump = True
                Bird.gravity = 5
                Bird.jumpSpeed = 10

        background = pygame.image.load("assets/background.png")
Exemplo n.º 18
0
#Performance metric tracking
fps_current = 0
cpu_load_pct = 0
mem_load_pct = 0


#Status LED
statusLED0 = pythonled.pythonled(0)
statusLED1 = pythonled.pythonled(1)
statusLED2 = pythonled.pythonled(2)
statusLED3 = pythonled.pythonled(3)
ledStatus = False

# image processing pipeline (codegenerated from GRIP)
procPipeline = Pipeline.Pipeline()

# Server to transmit processed data over UDP to the roboRIO
outputDataServer = UDPServer.UDPServer(send_to_address = "roborio-1736-frc.local", send_to_port = 5800)

################################################################################
# Utility Functions
################################################################################

def indicateLEDsNotRunning():
    statusLED1.off()
    ledStatus = False

    
def indicateLEDsProcessingActive():
    global ledStatus
50             50                 50                0        0 / 150

Top-3 Hit Ratios:
k    hit_ratio
---  -----------
1    1
2    1
3    1

Scoring History:
     timestamp            duration    number_of_trees    training_rmse      training_logloss    training_classification_error
---  -------------------  ----------  -----------------  ----------------  ------------------  -------------------------------
     2016-08-25 13:50:21  0.006 sec   0.0                0.666666666667   1.09861228867       0.66
     2016-08-25 13:50:21  0.077 sec   1.0                0.603019288754   0.924249463924      0.04
     2016-08-25 13:50:21  0.096 sec   2.0                0.545137025745   0.788619346614      0.04
     2016-08-25 13:50:21  0.110 sec   3.0                0.492902188607   0.679995476522      0.04
     2016-08-25 13:50:21  0.123 sec   4.0                0.446151758168   0.591313596193      0.04
---  ---                  ---         ---                ---              ---                 ---
     2016-08-25 13:50:21  0.419 sec   46.0               0.0489303232171  0.0192767805328     0.0
     2016-08-25 13:50:21  0.424 sec   47.0               0.0462779490149  0.0180720396825     0.0
     2016-08-25 13:50:21  0.429 sec   48.0               0.0444689238255  0.0171428314531     0.0
     2016-08-25 13:50:21  0.434 sec   49.0               0.0423442541538  0.0161938230172     0.0
     2016-08-25 13:50:21  0.438 sec   50.0               0.0403480417561  0.0152718656454     0.0

Variable Importances:
variable    relative_importance    scaled_importance    percentage
----------  ---------------------  -------------------  ------------
PC1         448.958                1                    0.982184
PC2         8.1438                 0.0181393            0.0178162
Pipeline(steps=[('standardize', <h2o.transforms.preprocessing.H2OScaler object at 0x1088c6a50>), ('pca', ), ('gbm', )])
Exemplo n.º 20
0
#!/usr/bin/env python3
# Final Project - Lichtenberg Figures

import numpy as np
import Configuration
import Visualization
import Pipeline

# Load configuration file.
configuration_info = Configuration.Configuration()

# Create spacing, needed for correct grid scale automatically.
x_points = np.linspace(0, 1, num=configuration_info.grid_resolution_x_points)
y_points = np.linspace(0, 1, num=configuration_info.grid_resolution_y_points)

# Calculate corresponding offset (needed so pixels are not truncated)
x_offset = x_points[1] / 2.0
y_offset = y_points[1] / 2.0

# Create a meshgrid.
xs, ys = np.meshgrid(x_points, y_points)

# Create a pipeline for performing steps and create initial points.
pipeline_process = Pipeline.Pipeline(configuration_info)

# Create animation.
visualizer = Visualization.Visualization(x_offset, y_offset,
                                         configuration_info, pipeline_process)
visualizer.create_animation()
Exemplo n.º 21
0
    def func_run_sample(self, lstr_sample_info):
        # str_script is None indicates a bash script was not made and the raw
        # command can be run. This is a case of running a pipeline locally
        # without sample.txt files or config files that update aspects of the
        # pipeline that would require a script to encapsulate those changes,
        # for example a Path change.
        # Also a dispatch command outside of local dispatching will also
        # require a script to be ran,

        # Check to see if a script needs to be made and ran.
        # Happens on certain proccessing requirements
        # like updating environmental variables with pipeline
        # config files.
        str_script = self.func_update_command(lstr_sample_info)
        if str_script:
            return(Commandline.Commandline().func_CMD(str_script,
                                                      f_use_bash=True))
        elif str_script is None:
            # Holds the commands to run
            lcmd_commands = []

            ## Output dir related
            # If the output dir is not specified then move and copy functions are disabled
            f_archive = True
            if(not hasattr(self.ns_arguments, Arguments.C_STR_OUTPUT_DIR)
               or not self.ns_arguments.str_out_dir):
                f_archive = False

            ## Make output directory
            PipelineRunner.func_make_output_dir(self.ns_arguments)

            # Make pipeline object and indicate Log file
            pline_cur = Pipeline.Pipeline(str_name=self.prog,
                                          str_log_to_file=self.ns_arguments.str_log_file if hasattr(self.ns_arguments, "str_log_file") else os.path.join(self.ns_arguments.str_out_dir, "custom_log.txt"),
                                          str_update_source_path=self.ns_arguments.str_update_classpath if hasattr(self.ns_arguments, "str_update_classpath") else None)
            # Update the logger with the arguments
            if self.version:
                str_version_log = "".join(["PipelineRunner.func_run_sample:: ",
                                           "Pipeline version:",
                                           str(self.version), "\n",
                                           "PipelineRunner.func_run_sample:: ",
                                           "The call to the pipeline was: ",
                                           " ".join(["\n"] + sys.argv + ["\n"]),
                                           "PipelineRunner.func_run_sample:: ",
                                           "This run was started with the ",
                                           "following arg.\n"])
                str_args_log = "\n".join([str(str_namespace_key) + " = " + str(str_namespace_value)
                                          for str_namespace_key, str_namespace_value in vars(self.ns_arguments).items()] + ["\n"])
                pline_cur.logr_logger.info(str_version_log)
                pline_cur.logr_logger.info(str_args_log)
            # Put pipeline in test mode if needed.
            if hasattr(self.ns_arguments, "f_Test") and self.ns_arguments.f_Test:
                pline_cur.func_test_mode()
            # Turn off archiving if output directory was not given
            if hasattr(self.ns_arguments, "f_archive") and not f_archive:
                pline_cur.logr_logger.warning("PipelineRunner.func_run_sample:: Turning off archiving, please specify an output directory if you want this feature enabled.")
                pline_cur.f_archive = False
            # Run the user based pipeline
            # If the commands are not existent (parsed from JSON)
            # then build them from script
            # Where variables are being used.
            #if self.ns_arguments.str_wdl:
            #    # If WDL is being output, switch the values of the arguments
            #    # with the name of the argument allowing us to track them,
            #    import inspect
            #    import copy
            #    ns_wdl_arguments = copy.deepcopy(self.ns_arguments)
            #    lstr_members = [member[0] for member in inspect.getmembers(ns_wdl_arguments)
            #                     if not (member[0].startswith("_") or member[0].endswith("_") or inspect.isroutine(member))]
            #    for str_member in lstr_members:
            #        setattr(ns_wdl_arguments, str_member, "${"+str_member+"}".encode('utf-8'))
            #    lcmd_commands = self.func_make_commands(args_parsed = ns_wdl_arguments, cur_pipeline = pline_cur)
            #else:
            lcmd_commands = self.func_make_commands(args_parsed=self.ns_arguments,
                                                    cur_pipeline=pline_cur)

            # Write JSON file
            if hasattr(self.ns_arguments, "str_json_file_out") and self.ns_arguments.str_json_file_out:
                JSONManager.JSONManager.func_pipeline_to_json(lcmd_commands=lcmd_commands,
                                                              dict_args=vars(self.ns_arguments),
                                                              str_file=self.ns_arguments.str_json_file_out,
                                                              f_pretty=True)
                pline_cur.logr_logger.info("Writing JSON file to: " + self.ns_arguments.str_json_file_out)
                return(True)

            # Run commands
            if not hasattr(self.ns_arguments, "lstr_copy"):
                setattr(self.ns_arguments, "lstr_copy", None)
            if not hasattr(self.ns_arguments, "str_move_dir"):
                setattr(self.ns_arguments, "str_move_dir", None)
            if not hasattr(self.ns_arguments, "str_compress"):
                setattr(self.ns_arguments, "str_compress",  "none")
            if not hasattr(self.ns_arguments, "f_clean"):
                setattr(self.ns_arguments, "f_clean", False)
            if not hasattr(self.ns_arguments, "i_time_stamp_diff"):
                setattr(self.ns_arguments, "i_time_stamp_diff", None)
            return(pline_cur.func_run_commands(lcmd_commands=lcmd_commands,
                                               str_output_dir=self.ns_arguments.str_out_dir,
                                               f_clean=self.ns_arguments.f_clean,
                                               f_self_organize_commands=self.ns_arguments.f_graph_organize,
                                               li_wait=[int(str_wait) for str_wait in self.ns_arguments.lstr_wait.split(",")],
                                               lstr_copy=self.ns_arguments.lstr_copy if self.ns_arguments.lstr_copy else None,
                                               str_move=self.ns_arguments.str_move_dir if self.ns_arguments.str_move_dir else None,
                                               str_compression_mode=self.ns_arguments.str_compress,
                                               i_time_stamp_wiggle=self.ns_arguments.i_time_stamp_diff,
                                               #str_wdl=self.ns_arguments.str_wdl,
                                               str_dot_file=self.ns_arguments.str_dot_path,
                                               i_benchmark_secs=self.ns_arguments.i_mem_benchmark,
                                               args_original=None ))
Exemplo n.º 22
0
def makePipeline():
    """Will be executed from the secondary process. Imports and constructs the computation environment."""
    import Pipeline
    processor = Pipeline.Pipeline("vgg16_weights.h5")
    return processor
Exemplo n.º 23
0
import cv2
from PIL import Image
from image_thresholding import *
from lane_detection import *
import copy
from Pipeline import *
import sys

from moviepy.editor import VideoFileClip

if __name__ == '__main__':
    arg_count = len(sys.argv)
    #print(arg_count)
    if arg_count != 3:
        print(
            'Run this script as python lane_finder.py your_input_file.mp4 your_output_file.mp4'
        )
    else:
        try:
            pipeline = Pipeline()
            inputfile = str(sys.argv[1])
            outputfile = str(sys.argv[2])

            clip1 = VideoFileClip(inputfile)
            white_clip = clip1.fl_image(
                pipeline.process_image
            )  #NOTE: this function expects color images!!
            white_clip.write_videofile(outputfile, audio=False)
        except Exception as e:
            print(str(e))
Exemplo n.º 24
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-c",
        "--config",
        default='/beamline/apps/saxs-auto/settings.conf',
        action="store",
        help="use this to set config file location for pipeline")
    parser.add_argument("-l",
                        "--lite",
                        action="store_true",
                        help="set this switch to use the local lite pipeline")

    args = parser.parse_args()

    try:
        stream = file(args.config, 'r')
    except IOError:
        print "Unable to find configuration file settings.conf, exiting."
        sys.exit(2)

    config = yaml.load(stream)

    if args.lite == False:
        pipeline = Pipeline.Pipeline(config)
    else:
        pipeline = 'lite'

    print 'Listening on port 8082'
    pipeline_app.run(host='0.0.0.0', port=8082)