示例#1
0
def find_downloads(args, config):
    """Finds available downloads"""

    episode_db = EpisodeDatabase.load_from_cache(config)
    from_date = datetime.strptime(args.fromdate, "%Y-%m-%d")
    to_date = datetime.strptime(args.todate, "%Y-%m-%d")

    if args.update_metadata:
        episode_db.update_all_tracked_series()
        episode_db.save_to_cache()

    print(
        f"Searching for downloads between {from_date:%Y-%m-%d} and {to_date:%Y-%m-%d}"
    )
    searches_to_perform = get_search_strings(from_date, to_date, episode_db,
                                             config.metadata)

    print("")
    if args.dry_run:
        print(
            "Dry run requested. Not performing searches. Searches to perform:")
        for search in searches_to_perform:
            print(search["query"])
    else:
        if args.create_jobs:
            job_queue = JobQueue(config)
            for search in searches_to_perform:
                job_queue.create_job(search["keyword"], search["query"],
                                     search["download_only"])
        else:
            search_for_torrents(searches_to_perform, args.retry_count,
                                args.directory)
示例#2
0
def show_job(args, config):
    """Deletes a new queued job"""

    job_queue = JobQueue(config)
    job = job_queue.get_job_by_id(args.id)
    if job is None:
        print(f"No existing job with ID '{args.id}'")
    else:
        print(f"ID: {job.job_id}")
        print(f"Status: {job.status.value}")
        print(f"Date added: {job.added}")
        print(f"Series keyword: {job.keyword}")
        print(f"Search string: {job.query}")
        if job.magnet_link is not None:
            print(f"Torrent title: {job.title}")
            print(f"Magnet link: {job.magnet_link}")
        if job.torrent_hash is not None:
            print(f"Torrent hash: {job.torrent_hash}")
        if job.download_directory is not None:
            print(
                f"Torrent directory: {os.path.join(job.download_directory, job.name)}"
            )
        if job.is_download_only is not None:
            print(f"Is download-only job: {job.is_download_only}")
        if job.converted_file_name is not None:
            print(
                f"File name of converted file: {job.converted_file_name}.mp4")
示例#3
0
def clear_jobs(args, config):
    """Clears the job queue"""

    status = args.status
    job_queue = JobQueue(config)
    jobs = job_queue.load_jobs()
    for job in jobs:
        if status is None or status == job.status:
            job.delete()
示例#4
0
def list_jobs(args, config):
    """Lists all jobs in the job queue"""

    status = args.status
    job_queue = JobQueue(config)
    jobs = job_queue.load_jobs()
    for job in jobs:
        if status is None or status == job.status:
            print(f"{job.job_id} {job.status_description}")
示例#5
0
def delete_job(args, config):
    """Deletes a new queued job"""

    job_queue = JobQueue(config)
    job = job_queue.get_job_by_id(args.id)
    if job is None:
        print(f"No existing job with ID '{args.id}'")
    else:
        job.delete()
示例#6
0
def update_job(args, config):
    """Updates a new queued job"""

    job_queue = JobQueue(config)
    job = job_queue.get_job_by_id(args.id)
    if job is None:
        print(f"No existing job with ID '{args.id}'")
    else:
        if args.status not in set(item.value for item in JobStatus):
            print(f"Unknown status value '{args.status}'")
        else:
            job.status = JobStatus(args.status)
            job.save(logging.getLogger())
示例#7
0
class Visualiser(object):
    def __init__(self, base_directory):
        self.d_loss_real = []
        self.d_loss_fake = []
        self.g_loss = []
        self.base_directory = base_directory

    def __enter__(self):
        self.queue = JobQueue(num_processes=1)
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.queue.join()

    def _get_directory(self):
        os.makedirs(self.base_directory, exist_ok=True)
        return self.base_directory

    def step(self, d_loss_real, d_loss_fake, g_loss):
        self.d_loss_real.append(d_loss_real)
        self.d_loss_fake.append(d_loss_fake)
        self.g_loss.append(g_loss)

    def step_autoencoder(self, loss):
        self.g_loss.append(loss)

    def test(self, epoch, size_first, discriminator, generator, noise, real):
        generator.eval()
        discriminator.eval()
        out = generator(noise)
        self.queue.submit(
            GANTest(directory=self._get_directory(),
                    epoch=epoch,
                    size_first=size_first,
                    gen_out=out.cpu().data.numpy(),
                    real_out=real.cpu().data.numpy(),
                    discriminator_out=discriminator(out).cpu().data.numpy(),
                    discriminator_real=discriminator(real).cpu().data.numpy()))
        generator.train()
        discriminator.train()

    def test_autoencoder(self, epoch, generator, real):
        generator.eval()
        self.queue.submit(
            AutoEncoderTest(directory=self._get_directory(),
                            epoch=epoch,
                            out=generator(real[:10]).cpu().data.numpy(),
                            real=real[:10].cpu().data.numpy()))
        generator.train()

    def plot_training(self, epoch):
        self.queue.submit(
            PlotLearning(directory=self._get_directory(),
                         epoch=epoch,
                         d_loss_real=self.d_loss_real,
                         d_loss_fake=self.d_loss_fake,
                         g_loss=self.g_loss))
示例#8
0
def process_jobs(args, config):
    """Executes current jobs in the job queue"""

    job_queue = JobQueue(config)
    if not args.skip_search:
        airdate = datetime.now()
        job_queue.perform_searches(
            datetime(month=airdate.month, day=airdate.day, year=airdate.year),
            args.unattended)

    if not args.skip_add_downloads:
        job_queue.add_torrents()

    if not args.skip_query_downloads:
        job_queue.query_torrents_status()

    if not args.skip_convert:
        job_queue.perform_conversions(args.unattended)
示例#9
0
    def __call__(self, infilename, outfilename, **kwargs):
        self._fft_size = kwargs.get('fft_size', 4096)
        self._ffts_per_job = kwargs.get('ffts_per_job', 128)

        job_queue = JobQueue(self._workers)

        with HDF5Observation(infilename) as infile:
            with HDF5FFTDataSet(outfilename) as outfile:
                for i in range(infile.num_channels):
                    channel_name = 'channel_{0}'.format(i)
                    channel = infile[channel_name]

                    num_ffts_out = channel.length // self._fft_size
                    if channel.length % self._fft_size != 0:
                        num_ffts_out += 1

                    if self._fft_size % 2 == 0:
                        # Size of output for rfft is (n/2)+1
                        fft_size_out = self._fft_size // 2 + 1
                    else:
                        # Size of output for rfft is (n+1)/2
                        fft_size_out = (self._fft_size + 1) // 2

                    out_channel = outfile.create_channel(channel_name,
                                                         shape=(num_ffts_out,
                                                                fft_size_out,
                                                                2),
                                                         dtype=np.float32)
                    out_channel.min_angle = math.inf
                    out_channel.max_angle = -math.inf
                    out_channel.min_abs = math.inf
                    out_channel.max_abs = -math.inf

                    LOG.info("Processing channel {0} with {1} ffts".format(
                        channel_name, num_ffts_out))
                    self._process_channel(job_queue, channel, out_channel,
                                          num_ffts_out)

        job_queue.join()
示例#10
0
def create_job(args, config):
    """Creates a new queued job"""

    job_queue = JobQueue(config)
    job = job_queue.create_job(args.keyword, args.search_term)
    job.save(logging.getLogger())
示例#11
0
文件: plots.py 项目: PuYezi/rfi_ml
 def __init__(self, workers):
     self.queue = JobQueue(workers)
示例#12
0
 def __enter__(self):
     self.queue = JobQueue(num_processes=1)
     return self
示例#13
0
                self.frequency = None

                # Create merged spectrograms for this p
                merged = self.merge_spectrograms(spectrograms)
                merged_normalised = self.merge_spectrograms(
                    spectrograms, normalise_local=True)
                self.save_spectrogram(merged, "merged",
                                      "spectrogram_merged.png")
                self.save_spectrogram(merged_normalised,
                                      "merged local normalisation",
                                      "spectrogram_merged_normalised.png")


if __name__ == "__main__":
    queue = JobQueue(8)

    # Load each file using a process pool
    num_samples = 102400
    queue.submit(
        LBAPlotter("../data/v255ae_At_072_060000.lba",
                   "./At_out/",
                   num_samples=num_samples))
    queue.submit(
        LBAPlotter("../data/v255ae_Mp_072_060000.lba",
                   "./Mp_out/",
                   num_samples=num_samples))
    queue.submit(
        LBAPlotter("../data/vt255ae_Pa_072_060000.lba",
                   "./Pa_out/",
                   num_samples=num_samples))
示例#14
0
 def start_save_process(cls):
     cls.job_queue = JobQueue(
         1
     )  # 1 process to perform the saves because it can take a while on the main thread