コード例 #1
0
ファイル: scraper.py プロジェクト: nderimx/autobewerber
def scrape_links(search_term):
    mod_search_term = search_term.replace(" ", "%20")
    search_url = host_url+"/en/vacancies/?term="+mod_search_term

    search_results_page = requests.get(search_url)
    page_html = BeautifulSoup(search_results_page.text, 'html.parser')

    jobs = Jobs()

    # jobs.descriptions = [jb.string for jb in page_html.find_all('span', {'class': 'sc-fzqNJr Text__span-jiiyzm-8 Text-jiiyzm-9 VacancySerpItem___StyledText-qr45cp-6 hzFALC'})]
    jobs.links = [link['href'] for link in page_html.find_all('a', {"data-cy":"job-link"})]
    pages = int(page_html.find('span', {"data-cy":"page-count"}).string.split(' ')[2])
    # print(pages-1)
    threads = []
    for page in range(2, pages+1):
        threads.append(Thread(target = jobs.retrieve_page, args = (page, search_url)))
        threads[page-2].start()

    for thread in threads:
        thread.join()

    # for i in range(len(jobs.links)):
    #     print(jobs.links[i])
    # print(len(jobs.links))
    return jobs.links
コード例 #2
0
ファイル: cleanr.py プロジェクト: MobiusM/Cleanr
    def __init__(self, token):
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        self.updater = Updater(token)
        self.job_queue = self.updater.job_queue
        self.handlers = Handlers(self.updater, command_handlers=COMMAND_HANDLERS)
        self.jobs = Jobs(self.job_queue, repeated_jobs=REPEATING_JOBS)
コード例 #3
0
def main():
    db_session.global_init("mars_explorer.db")
    user = User()
    user.surname = "Scott"
    user.name = "Ridley"
    user.age = 21
    user.position = "captain"
    user.speciality = "research engineer"
    user.address = "module_1"
    user.email = "*****@*****.**"
    db_sess = db_session.create_session()
    db_sess.add(user)
    db_sess.commit()
    user = User()
    user.surname = "Smith"
    user.name = "Jacob"
    user.age = 18
    user.position = "colonist"
    user.speciality = "farmer"
    user.address = "module_2"
    user.email = "*****@*****.**"
    db_sess.add(user)
    db_sess.commit()
    user = User()
    user.surname = "Bruh"
    user.name = "Duh"
    user.age = 22
    user.position = "colonist"
    user.speciality = "engineer"
    user.address = "module_3"
    user.email = "*****@*****.**"
    db_sess.add(user)
    db_sess.commit()
    user = User()
    user.surname = "Bennet"
    user.name = "-_-"
    user.age = 16
    user.position = "colonist"
    user.speciality = "warrior"
    user.address = "module_4"
    user.email = "*****@*****.**"
    db_sess.add(user)
    db_sess.commit()
    jobs = Jobs(team_leader=1,
                job="deployment of residential modules 1 and 2",
                work_size=15,
                collaborators="2, 3",
                is_finished=False)
    db_sess.add(jobs)
    db_sess.commit()
    jobs = Jobs(team_leader=1,
                job="deployment of residential modules 3 and 4",
                work_size=20,
                collaborators="3, 4",
                is_finished=True)
    db_sess.add(jobs)
    db_sess.commit()
    app.run()
コード例 #4
0
ファイル: cleanr.py プロジェクト: MobiusM/Cleanr
class Cleanr:
    def __init__(self, token):
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        self.updater = Updater(token)
        self.job_queue = self.updater.job_queue
        self.handlers = Handlers(self.updater, command_handlers=COMMAND_HANDLERS)
        self.jobs = Jobs(self.job_queue, repeated_jobs=REPEATING_JOBS)

    def start_bot(self):
        self.handlers.start_handlers()
        self.jobs.start_jobs()

        self.updater.start_polling()
        self.updater.idle()
コード例 #5
0
def jobs_list(js_code):
    sector = Job_sector.objects(js_code=js_code)
    job_sector = sector[0]['job_sector']
    jobs_list = Jobs.objects(job_sector=job_sector)
    return render_template('job_des.html',
                           j_list=jobs_list,
                           js_code=js_code,
                           job_sector=job_sector)
コード例 #6
0
 def __init__(self):
     self.jobs = Jobs()
     self.db = dict()
     self.db['name'] = 'theCommonResume'
     self.db['user'] = '******'
     self.db['host'] = '127.0.0.1'
     self.info = dict()
     self.xtra = dict()
コード例 #7
0
def main(argv):
    daemon = Pyro4.Daemon()
    ns = Pyro4.locateNS()
    jobs = Jobs()
    uri = daemon.register(jobs)
    ns.register("jobs", uri)

    daemon.requestLoop()
コード例 #8
0
def add_jobs(db_sess):
    jobs = Jobs()
    jobs.team_leader = 2
    jobs.job = "search for water"
    jobs.work_size = "20"
    jobs.collaborators = 5, 7
    jobs.is_finished = False
コード例 #9
0
ファイル: stack.py プロジェクト: mohseniaref/Doris
    def unpack_image(self, dest_folder=''):

        if not dest_folder:
            dest_folder = os.path.join(self.path, 'slc_data_files')
            self.unzip_path = dest_folder
        if not os.path.exists(dest_folder):
            os.mkdir(dest_folder)

        jobList1 = []

        # This program unpacks the images which are needed for processing. If unpacking fails, they are removed..
        for imagefile in self.images:

            zipped_folder = imagefile.zip_path
            if zipped_folder.endswith('.SAFE.zip'):
                imagefile.unzip_path = os.path.join(
                    dest_folder,
                    os.path.basename(zipped_folder[:-9] + '.SAFE'))
            elif zipped_folder.endswith('.zip'):
                imagefile.unzip_path = os.path.join(
                    dest_folder,
                    os.path.basename(zipped_folder[:-4] + '.SAFE'))
            shapefile = self.shape_filename
            pol = self.polarisation[0]
            overwrite = False
            command1 = ('python ' + self.function_path +
                        'load_shape_unzip.py ' + zipped_folder + ' ' +
                        dest_folder + ' ' + shapefile + ' ' + pol + ' ' +
                        str(overwrite))
            jobList1.append({"path": self.path, "command": command1})
            if not self.parallel:
                os.chdir(self.path)
                # Resample
                os.system(command1)

        if self.parallel:
            jobs = Jobs(self.nr_of_jobs, self.doris_parameters)
            jobs.run(jobList1)
コード例 #10
0
 def post(self):
     args = job_parser.parse_args()
     session = db_session.create_session()
     jobs = Jobs(
         team_leader=args['team_leader'],
         job=args['job'],
         work_size=args['work_size'],
         collaborators=args['collaborators'],
         start_date=args['start_date'],
         end_date=args['end_date'],
         is_finished=args['is_finished']
     )
     session.add(jobs)
     session.commit()
     return jsonify({'success': 'OK'})
コード例 #11
0
ファイル: main.py プロジェクト: BigBush3/flask
def add_news():
    form = JobsForm()
    if form.validate_on_submit():
        db_sess = db_session.create_session()
        jobs = Jobs()
        jobs.job = form.job.data
        jobs.team_leader = form.team_leader.data
        jobs.duration = form.duration.data
        jobs.collaborators = form.collaborators.data
        jobs.is_finished = form.is_finished.data
        current_user.jobs.append(jobs)
        db_sess.merge(current_user)
        db_sess.commit()
        return redirect('/')
    return render_template('jobs.html', title='add work', form=form)
コード例 #12
0
ファイル: main.py プロジェクト: jmabuin/PyBLASpark
import os
import sys
import time
import importlib

from pyspark import SparkContext, SparkConf, SQLContext

sys.path.insert(0, 'modules.zip')

from options.Options import *
from options.Mode import *
from jobs.Jobs import *

if __name__ == '__main__':
    initTime = time.clock()

    conf = SparkConf()
    sc = SparkContext(conf=conf)
    sqlContext = SQLContext(sc)

    #options_module = importlib.import_module('options')
    #jobs_module = importlib.import_module('jobs')

    myOptions = Options()  #options._module.Options()

    job = Jobs(myOptions, sc)

    endTime = time.clock()

    print "Total time used: " + str(endTime - initTime) + "\n"
コード例 #13
0
def jobs(js_code, code):
    sector = Job_sector.objects(js_code=js_code)
    jobs_list = Jobs.objects(code=code)
    js_code = sector[0]['js_code']
    jobs = jobs_list[0]
    return render_template('detail_des.html', j=jobs)
コード例 #14
0
ファイル: network_stack.py プロジェクト: mohseniaref/Doris
    def create_network(self):
        # This function creates the network interferograms for the single master stack. This includes:
        # > creation of new data folders
        # > copying of the ifgs.res to the right folders
        # > making ifgs from former ifgs to create the new ifgs. (This is a current hack, which should be improved
        #       later on.)

        # First create folder structure and master/slave result files
        for ifg_key, ifg_pair in zip(self.ifgs_keys, self.ifgs_list):
            # Loop over all the different ifgs pairs.
            folder = os.path.join(self.stack_folder, 'ifg_key' + '_ifg')
            if not os.path.exists(folder):
                os.mkdir(folder)
            self.stack[ifg_key] = dict()

            date_1 = ifg_key[:10]
            date_2 = ifg_key[11:21]
            for burst_key in self.datastack[date_1].keys():
                swath = burst_key[:7]
                burst = burst_key[8:]

                swath_folder = os.path.join(self.stack_folder,
                                            ifg_key + '_ifg', swath)
                if not os.path.exists(swath_folder):
                    os.mkdir(swath_folder)

                burst_folder = os.path.join(self.stack_folder,
                                            ifg_key + '_ifg', swath, burst)
                if not os.path.exists(burst_folder):
                    os.mkdir(burst_folder)
                ifgs_path = os.path.join(self.stack_folder, ifg_key + '_ifg',
                                         swath, burst, 'ifgs.res')
                master_path = os.path.join(self.stack_folder, ifg_key + '_ifg',
                                           'master.res')
                slave_path = os.path.join(self.stack_folder, ifg_key + '_ifg',
                                          'slave.res')
                self.stack[ifg_key][burst_key] = dict()

                if date_1 == self.master_date:
                    self.stack[ifg_key][burst_key]['ifgs'] = copy.deepcopy(
                        self.datastack[date_2][burst_key]['ifgs'])
                else:
                    self.stack[ifg_key][burst_key]['ifgs'] = copy.deepcopy(
                        self.datastack[date_1][burst_key]['ifgs'])

                # For the master and slave files always the slave files are used except the single_master master date
                # is used.
                if not date_1 == self.master_date:
                    self.stack[ifg_key][burst_key]['master'] = copy.deepcopy(
                        self.datastack[date_1][burst_key]['slave'])
                else:
                    self.stack[ifg_key][burst_key]['master'] = copy.deepcopy(
                        self.datastack[date_2][burst_key]['master'])
                if not date_2 == self.master_date:
                    self.stack[ifg_key][burst_key]['slave'] = copy.deepcopy(
                        self.datastack[date_2][burst_key]['slave'])
                else:
                    self.stack[ifg_key][burst_key]['slave'] = copy.deepcopy(
                        self.datastack[date_1][burst_key]['master'])

                self.stack[ifg_key][burst_key]['ifgs'].res_path = ifgs_path
                self.stack[ifg_key][burst_key]['ifgs'].res_path = master_path
                self.stack[ifg_key][burst_key]['ifgs'].res_path = slave_path

            # Create the full_swath resfiles.
            ifgs_path = os.path.join(self.stack_folder, ifg_key + '_ifg',
                                     'ifgs.res')
            self.full_swath[ifg_key] = dict()
            self.full_swath[ifg_key]['ifgs'] = copy.deepcopy(
                self.full_swath_datastack[date_1]['ifgs'])
            self.full_swath[ifg_key]['ifgs'].res_path = ifgs_path

        # Remove the interferogram, coherence, filtering and unwrapping steps if needed.
        self.del_process('interfero', type='ifgs')
        self.del_process('coherence', type='ifgs')
        self.del_process('filtphase', type='ifgs')
        self.del_process('unwrap', type='ifgs')
        # Now write the .res files to disk.
        self.update_res()

        # Create the corrected slave images (earth phase / dem phase)
        # This step is done per burst and paralellized, as we will load the
        date = self.stack.keys()[0]
        job_list = []
        for burst in self.stack[date].keys():
            path = self.stack_folder
            command = self.function_path + ' corrected_slave.py ' + self.stack_folder + ' ' + burst + ' ' + self.master_date
            job_list.append({"path": path, "command": command})
            if (not (self.parallel)):
                os.chdir(path)
                os.system(command)
        if (self.parallel):
            jobs = Jobs(self.nr_of_jobs, self.doris_parameters)
            jobs.run(job_list)

        # Create links to the different images for coherence calculations.
        for ifg_key, ifg_pair in zip(self.ifgs_keys, self.ifgs_list):
            # Loop over all the different ifgs pairs.
            date_1 = ifg_key[:10]
            date_2 = ifg_key[11:21]
            master_key = self.master_date[:4] + self.master_date[
                5:7] + self.master_date[8:]

            for burst_key in self.datastack[ifg_key].keys():
                swath = burst_key[:7]
                burst = burst_key[8:]
                new_slave = os.path.join(self.stack_folder, ifg_key + '_ifg',
                                         swath, burst, 'slave_corrected.raw')
                new_master = os.path.join(self.stack_folder, ifg_key + '_ifg',
                                          swath, burst, 'master_corrected.raw')

                # Make a link to the right data file depending on whether we have a slave or master image.
                if not date_1 == self.master_date:
                    old_master = os.path.join(
                        self.stack_folder, master_key + '_' + date_1, swath,
                        burst, self.stack[ifg_key][burst_key]
                        ['master'].processes['resample']['Data_output_file'])
                    self.stack[ifg_key][burst_key]['master'].processes[
                        'resample'][
                            'Data_output_file'] = 'master_corrected.raw'
                else:
                    old_master = os.path.join(
                        self.stack_folder, date_1 + '_' + date_2, swath, burst,
                        self.stack[ifg_key][burst_key]
                        ['master'].processes['crop']['Data_output_file'])
                    self.stack[ifg_key][burst_key]['master'].processes['crop'][
                        'Data_output_file'] = 'master_corrected.raw'
                if not date_2 == self.master_date:
                    old_slave = os.path.join(
                        self.stack_folder, master_key + '_' + date_2, swath,
                        burst, self.stack[ifg_key][burst_key]
                        ['slave'].processes['resample']['Data_output_file'])
                    self.stack[ifg_key][burst_key]['slave'].processes[
                        'resample']['Data_output_file'] = 'slave_corrected.raw'
                else:
                    old_slave = os.path.join(
                        self.stack_folder, date_2 + '_' + date_1, swath, burst,
                        self.stack[ifg_key][burst_key]
                        ['slave'].processes['crop']['Data_output_file'])
                    self.stack[ifg_key][burst_key]['slave'].processes['crop'][
                        'Data_output_file'] = 'slave_corrected.raw'

                os.link(old_master, new_master)
                os.link(old_slave, new_slave)

        self.update_res()
コード例 #15
0
from elasticsearch import Elasticsearch

from jobs import Jobs
from tika import Tika


jobs = Jobs()
text_extractor = Tika()
es = Elasticsearch()
for job in jobs.iterate():
    es.index(index="memory", doc_type="pdf",
             body={"content": text_extractor.get_file_content(job), "metadata": text_extractor.get_file_metadata(job)})
コード例 #16
0
user.address = "module_2"
user.email = "*****@*****.**"
db_sess.add(user)
db_sess.commit()
user = User()
user.surname = "Bruh"
user.name = "Duh"
user.age = 22
user.position = "colonist"
user.speciality = "engineer"
user.address = "module_3"
user.email = "*****@*****.**"
db_sess.add(user)
db_sess.commit()
user = User()
user.surname = "Bennet"
user.name = "-_-"
user.age = 16
user.position = "colonist"
user.speciality = "warrior"
user.address = "module_4"
user.email = "*****@*****.**"
db_sess.add(user)
db_sess.commit()
jobs = Jobs(team_leader=1,
            job="deployment of residential modules 1 and 2",
            work_size=15,
            collaborators="2, 3",
            is_finished=False)
db_sess.add(jobs)
db_sess.commit()
コード例 #17
0
ファイル: stack.py プロジェクト: mohseniaref/Doris
    def write_stack(self, write_path='', no_data=False):
        # This function writes the full datastack to a given folder using the dates / swaths / bursts setup. This
        # also generates the res readfiles data.
        if write_path and os.path.exists(write_path):
            self.path = write_path
        if (not write_path
                or not os.path.exists(write_path)) and not self.path:
            warnings.warn(
                'Please specify a path that exists to write the data')
            return

        write_jobs = []
        burst_num = []

        for date in self.datastack.keys():

            date_basic = date.translate(None, '-')
            date_path = os.path.join(self.path, date_basic)
            if not os.path.exists(date_path):
                os.mkdir(date_path)

            for swath in self.datastack[date].keys():

                swath_path = os.path.join(date_path, swath)
                if not os.path.exists(swath_path):
                    os.mkdir(swath_path)

                for burst in self.datastack[date][swath].keys():
                    # Finally write the bursts with their res files and precise orbits
                    xml = self.datastack[date][swath][burst].swath_xml
                    data = self.datastack[date][swath][burst].swath_data
                    image_no = str(
                        self.datastack[date][swath][burst].burst_num)
                    stack_no = burst[6:]
                    xml_base = os.path.basename(xml)
                    res_name = os.path.join(
                        swath_path, xml_base[15:23] + '_iw_' + xml_base[6] +
                        '_burst_' + stack_no + '.res')
                    outdata = os.path.join(
                        swath_path, xml_base[15:23] + '_iw_' + xml_base[6] +
                        '_burst_' + stack_no + '.raw')

                    self.datastack[date][swath][burst].write(res_name)
                    if not os.path.exists(res_name) or not os.path.exists(
                            outdata):

                        write_jobs.append('python ' + self.function_path +
                                          'sentinel_dump_data_function.py ' +
                                          data + ' ' + res_name)
                        burst_num.append(stack_no + '_' + xml_base[6] + '_' +
                                         xml_base[15:23])

        # Burst are sorted in such a way that mainly read from different data files sorted by burst then swath then date.
        ids = sorted(range(len(burst_num)), key=lambda x: burst_num[x])

        jobList1 = []
        for id_val in ids:
            jobList1.append({"path": self.path, "command": write_jobs[id_val]})
            if not self.parallel:
                os.chdir(self.path)
                # Resample
                os.system(write_jobs[id_val])
        if self.parallel:
            jobs = Jobs(self.nr_of_jobs, self.doris_parameters)
            jobs.run(jobList1)
コード例 #18
0
from jobs import Jobs
import argparse
import os

job_mgr = Jobs()
parser = argparse.ArgumentParser()

parser.add_argument('--gridpack', default='.')
parser.add_argument('-j', '--jobs', default=1, type=int, help="Number of jobs")
parser.add_argument('-e',
                    '--events',
                    default=5000,
                    type=int,
                    help="Number of events per job")
parser.add_argument('-s',
                    '--initial-seed',
                    type=int,
                    default=1,
                    help="Initial random number seed")
parser.add_argument('-p',
                    '--plugins',
                    type=str,
                    default='',
                    help="Comma separated list of rivet plugins to run")
parser.add_argument('-o',
                    '--outdir',
                    type=str,
                    default='.',
                    help="Output directory for yoda files")
parser.add_argument(
    '--env',
コード例 #19
0
ファイル: add_folder.py プロジェクト: xabufr/memory_searcher
import sys
import os

from jobs import Jobs


folders = sys.argv[1:]
jobs = Jobs()

for folder in folders:
    files_names = os.listdir(folder)
    files = [os.path.join(folder, file_name) for file_name in files_names if os.path.splitext(file_name)[1] == ".pdf"]
    for file_path in files:
        jobs.add_job(file_path)