コード例 #1
0
ファイル: client.py プロジェクト: ketoni/pixer
class WebClient(metaclass=ABCMeta):

    def __init__(self):
        self.session = requests.Session()
        self.archiver = Archiver()
        self.response = ""
        self.html_soup = ""

    @abstractmethod
    def login():
        pass

    def tryGet(self, url, trials = 5):
        while trials:
            try: return self.session.get(url)
            except Exception as e:
                self.archiver.log(str(e))
                time.sleep(1)
                trials -= 1
        if not trials: raise RuntimeError("GET to " + url + " failed after " + str(trials) + " trials")

    def traverse(self, url):
        self.response = self.tryGet(url)
        self.html_soup = BeautifulSoup(self.response.text, "html.parser")
        return self.response.status_code

    def saveFile(self, url, savepath):
        self.archiver.file(savepath).write(self.tryGet(url).content)
        return self.response.status_code

    def executeJS(self, jspath):
        ret = self.archiver.execute("phantomjs --ssl-protocol=any " + jspath + " \"" + self.response.url + '\"')
        if ret['err']:
            pass # something went wrong
        return [i.strip() for i in ret['out'].split('\n') if i]

    def setReferer(self, ref):
        self.session.headers['Referer'] = ref

    def parseForm(self, action):
        # Return a dict of the first form found on the page
        form = self.html_soup.find("form", action = action)
        return { e.get("name", ""): e.get("value", "") or "" for e in form.find_all("input") if e.get("name", "") }

    def parseAllElements(self, *tags):
        # Return an array of all html elements that match tags (hierarchially, going up)
        tags = [[t.split(" ")[0], "" if len(t.split(" ")) == 1 else dict([tuple(t.split(" ")[1].split('='))])] for t in tags]
        hits = []
        for elem in self.html_soup.find_all(*tags[0]):
            res = elem
            for tag in tags[1:]:
                res = res.find_parent(*tag)
                if res is None: break
            if res: hits.append(elem)
        return hits

    def parseElement(self, *tags):
        # See parseAllElements, this returns just the first match
        try: return self.parseAllElements(*tags)[0]
        except: return {}
コード例 #2
0
ファイル: async_pipeline.py プロジェクト: CollComm/Tuan
def main(source_, citylist_, period_):
	global error_cities
	global error_messages
	source = source_
	citylist = citylist_
	period = period_
	if source:
		source = source.split(",")
		print "source: ", source
	if citylist:
		citylist = citylist.split(",")
		print "city list: ", citylist

	while True:
		if source:
			app = AsyncPipeline()
			app.start(source, citylist)
		else:
			break

		# rescue
		if len(error_cities) > 0:
			for src in error_cities.iterkeys():
				print "Try to rescue", src
				remain_cities = error_cities[src]
				error_cities[src] = []
				error_messages[src] = []
				app = AsyncPipeline()
				app.start([src], remain_cities)

		# archive first
		archiver = Archiver()
		for src in source:
			archiver.archive(src, src, True)  # False achive locally, True achive to S3

		# repeat
		if not period: break
		time.sleep( int(period) * 3600 )

		error_cities = {}
		error_messages = {}

		# check config
		stop_crawl = 0
		check_config = CheckConfig()
		config = check_config.check('crawl_config')
		for src in source:
			if src in config:
				if "period" in config[src]:
					period = config[src]["period"]
				if "stop" in config[src]:
					stop_crawl = config[src]["stop"]
				break
		if stop_crawl == 1:
			break
コード例 #3
0
ファイル: tasks.py プロジェクト: gravic/wordpress-deploy
def deploy(slug, testing_url, production_url, theme_url, production_server, production_dir):
    build_dir = os.path.join(SETTINGS.BUILD_DIR, slug)
    archive_dir = os.path.join(SETTINGS.ARCHIVE_DIR, slug)

    compiler = Compiler(build_dir, testing_url, production_url, theme_url)
    compiler.compile()

    archiver = Archiver(slug, build_dir, archive_dir)
    archive = archiver.archive()

    deployer = Deployer(production_server, SETTINGS.SSH_KEY, archive_dir, production_dir)
    deployer.deploy(archive)

    return True
コード例 #4
0
ファイル: database.py プロジェクト: hhfernald/notepath
    def _archive_and_delete_notes(self,
                                  notepaths: Union[NotePath, List[NotePath]],
                                  cursor
                                  ) -> None:
        if isinstance(notepaths, NotePath):
            notepaths = [notepaths]

        # Archive the notes before deleting them from the database.
        note_ids = self._get_note_ids(notepaths, cursor)
        archiver = Archiver(self._archive_path)
        old_notes = self._get_notes_by_id(note_ids, cursor)
        if old_notes:
            archiver.append_notes(old_notes)

        self._delete_notes_by_path(notepaths, cursor)
コード例 #5
0
ファイル: main.py プロジェクト: xfyer/Rover
def main(arguments: argparse.Namespace):
    # Set Logging Level
    logging.Logger.setLevel(system_helpers.logger,
                            arguments.logLevel)  # DoltPy's Log Level
    logger.setLevel(arguments.logLevel)  # This Script's Log Level

    rover: Rover = Rover(threadID=1,
                         name="Rover",
                         requested_wait_time=arguments.wait * 60,
                         reply=arguments.reply,
                         threadLock=threadLock)
    archiver: Archiver = Archiver(threadID=2,
                                  name="Archiver",
                                  requested_wait_time=arguments.wait * 60,
                                  commit=arguments.commit,
                                  threadLock=threadLock)
    server: WebServer = WebServer(
        threadID=3, name="Analysis Server"
    )  # https://www.tutorialspoint.com/python3/python_multithreading.htm

    # Start Archiver
    if arguments.archive:
        archiver.start()

    # Start Rover
    if arguments.rover:
        rover.start()

    # Start Webserver
    if arguments.server:
        server.start()
コード例 #6
0
def getArchiverData(startTime,
                    numPoints,
                    signals,
                    timeInt=ARCHIVER_TIME_INTERVAL):
    # type: (datetime, int, List[str], int) -> Optional[ArchiverData]
    archiver = Archiver("lcls")
    try:
        data = archiver.getDataWithTimeInterval(
            pvList=signals,
            startTime=startTime,
            endTime=(startTime + timedelta(seconds=(numPoints * timeInt))),
            timeDelta=timedelta(seconds=timeInt))
        return data

    except ConnectTimeout:
        writeAndFlushStdErr("Archiver timed out - are you VPNed in?")
        return None
コード例 #7
0
 def __init__(self, config_file_or_database,
              db_engine=None, user=None, pw=None, host=None, port=5432):
     if not db_engine:
         config = read_config_file(config_file_or_database)
         db_engine = config['db_engine'] if 'db_engine' in config else db_engine
     else:
         config = {
             'database': config_file_or_database,
             'user': user,
             'password': pw,
             'host': host,
             'port': port,
         }
     self.archiver = Archiver(db_engine, config)
     self.rpa = False
     self.dry_run = False
     self.generator = None
コード例 #8
0
ファイル: scheduler.py プロジェクト: jeffdc/destalinator
def destalinate_job():
    print("Destalinating")
    if "SB_TOKEN" not in os.environ or "API_TOKEN" not in os.environ:
        print "ERR: Missing at least one Slack environment variable."
    else:
        warner = Warner()
        archiver = Archiver()
        announcer = Announcer()
        flagger = Flagger()
        print("Warning")
        warner.warn()
        print("Archiving")
        archiver.archive()
        print("Announcing")
        announcer.announce()
        print("Flagging")
        flagger.flag()
        print("OK: destalinated")
    print("END: destalinate_job")
コード例 #9
0
def parse_xml(xml_file, output_format, connection, config):
    output_format = output_format.lower()
    if not os.path.exists(xml_file):
        sys.exit('Could not find input file: ' + xml_file)
    BUFFER_SIZE = 65536
    archiver = Archiver(connection, config)
    if output_format in SUPPORTED_OUTPUT_FORMATS:
        handler = SUPPORTED_OUTPUT_FORMATS[output_format](archiver)
    else:
        raise Exception("Unsupported report format '{}'".format(output_format))
    parser = xml.sax.make_parser()
    parser.setContentHandler(handler)
    with open(xml_file) as file:
        buffer = file.read(BUFFER_SIZE)
        while buffer:
            parser.feed(buffer)
            buffer = file.read(BUFFER_SIZE)
    if len(archiver.stack) != 1:
        raise Exception('File parse error. Please check you used proper output format (default: robotframework).')
    else:
        archiver.end_test_run()
コード例 #10
0
    def __init__(self, user_nickname='alex', **kwargs):
        config = configparser.ConfigParser()

        try:
            self.dev_mode = kwargs['dev']
        except KeyError:
            self.dev_mode = False

        try:
            self.archive_mode = kwargs['archive']
        except KeyError:
            self.archive_mode = True

        try:
            self.prevent_shutdown = kwargs['prevent_shutdown']
        except KeyError:
            self.prevent_shutdown = False

        try:
            use_gui = kwargs['use_gui']
        except KeyError:
            use_gui = False

        config.read('test_config.ini' if self.dev_mode else 'live_config.ini')

        self.user = User(user_nickname)

        self.webdriver = get_webdriver(self.user.id, use_gui)
        self.archiver = Archiver(self.webdriver) if self.archive_mode else None
        self.fb = FacebookHandler(self.webdriver)

        try:
            self.init_selenium()
        except Exception as err:
            print(f'Encountered exception in selenium init:{err.__repr__()}')
            self.webdriver.quit()
            raise err

        self.pending_auctions = get_unexpired_auctions(dev=self.dev_mode)
コード例 #11
0
 def __init__(self,
              config_file_or_database,
              db_engine=None,
              user=None,
              pw=None,
              host=None,
              port=5432):
     if not db_engine:
         config = read_config_file(config_file_or_database)
     else:
         config = {
             'database': config_file_or_database,
             'db_engine': db_engine,
             'user': user,
             'password': pw,
             'host': host,
             'port': port,
         }
     database = database_connection(config)
     self.archiver = Archiver(database, config)
     self.archiver.test_type = "Robot Framework"
     self.rpa = False
     self.dry_run = False
     self.generator = None
コード例 #12
0
class Snapper:
    def __init__(self, cam_config, storage):
        self.storage = storage
        self.cam = IpCamSnapper(cam_config)
        self.writer = FileDao()
        self.archiver = Archiver(self.storage, self.cam)
        self.store_in = self.storage.get_cam_snap_path(self.cam.name)

    def snap(self):
        snap = None
        try:
            snap = self.cam.snap()
        except Exception as e:
            print(e.args)
            print("exception in snap")
            # TODO: log exception

        if snap:
            filename = snap.name + os.extsep + self.cam.ext
            self.writer.set_directory(self.store_in).write(
                filename, snap.content)

    def archive(self):
        self.archiver.archive()
コード例 #13
0
class Supervisor:
    auctionpost = None
    constraints = None
    extensions_remaining = None
    fbclock = None
    countdown = None

    valid_bid_history = None

    initial_snipe_performed = False
    snipers_spotted = False
    most_recent_bid_submission = None

    def __init__(self, user_nickname='alex', **kwargs):
        config = configparser.ConfigParser()

        try:
            self.dev_mode = kwargs['dev']
        except KeyError:
            self.dev_mode = False

        try:
            self.archive_mode = kwargs['archive']
        except KeyError:
            self.archive_mode = True

        try:
            self.prevent_shutdown = kwargs['prevent_shutdown']
        except KeyError:
            self.prevent_shutdown = False

        try:
            use_gui = kwargs['use_gui']
        except KeyError:
            use_gui = False

        config.read('test_config.ini' if self.dev_mode else 'live_config.ini')

        self.user = User(user_nickname)

        self.webdriver = get_webdriver(self.user.id, use_gui)
        self.archiver = Archiver(self.webdriver) if self.archive_mode else None
        self.fb = FacebookHandler(self.webdriver)

        try:
            self.init_selenium()
        except Exception as err:
            print(f'Encountered exception in selenium init:{err.__repr__()}')
            self.webdriver.quit()
            raise err

        self.pending_auctions = get_unexpired_auctions(dev=self.dev_mode)

    def init_selenium(self):
        self.fb.login_with(self.user)
        self.user.id = self.fb.get_facebook_id(dev=self.dev_mode)

    def run(self):
        print(
            f'Covering the following auctions:\n{["    " + auction.auction_post.id + " " + str(auction.constraints.expiry) for auction in self.pending_auctions]}')
        try:
            for auction in self.pending_auctions:
                print('#' * 100 + '\nStarting new auction\n' + '#' * 100)
                self.prepare_for_auction(auction)
                self.perform_main_loop()
        finally:
            if not self.prevent_shutdown:
                print('Quitting webdriver...')
                self.webdriver.quit()
            else:
                print("Webdriver quit intentionally prevented (running in shutdown=False mode)")

    print('Finished gracefully')

    def prepare_for_auction(self, auction_instance):
        self.fbgroup = FbGroup(id=auction_instance.auction_post.group_id)  # todo make this less terrible
        self.auctionpost = auction_instance.auction_post
        self.constraints = auction_instance.constraints
        self.extensions_remaining = auction_instance.constraints.extensions
        self.fbclock = FacebookAuctionClock(self.fb, auction_instance.constraints, self.dev_mode)
        self.countdown = CountdownTimer(self.fbclock)

        self.valid_bid_history = None

        self.initial_snipe_performed = False
        self.snipers_spotted = False
        self.most_recent_bid_submission = None

        self.load_auction_page()
        self.auctionpost.name = self.fb.get_auction_name()
        self.print_preamble()
        self.refresh_bid_history(True)
        self.print_bid_history()

    def load_auction_page(self):
        self.fb.load_auction_page(self.fbgroup, self.auctionpost)

    def print_preamble(self):
        print(
            f'Bidding as {self.user.id} on {self.auctionpost.name} to a maximum of {self.constraints.max_bid} '
            f'in steps of {self.constraints.min_bid_step}')
        print(f'    Auction ends {self.constraints.expiry.astimezone(self.user.tz)} user-locale time')

    def perform_main_loop(self):
        try:
            while not self.fbclock.auction_last_call():
                self.iterate()
        except Exception as err:
            print(f'Error in perform_main_loop(): {err.__repr__()}')
            self.save_error_dump_html()
            raise err
        finally:
            self.perform_final_state_output()

    def save_error_dump_html(self):
        try:
            base_dir = os.getcwd()
            timestamp = str(datetime.now().timestamp())
            with open(os.path.join(base_dir, 'err_dump', f'{timestamp}.html'), 'wb+') as out:
                out.write(self.webdriver.page_source.encode('utf-8'))
                out.close()
        except Exception as err:
            print(f'Error writing error dump: {err.__repr__()}')

    def iterate(self):
        try:
            self.sync_clock_if_required()
            self.refresh_bid_history()
            self.countdown.proc()

            if self.snipers_present() and not self.snipers_spotted:
                print('Auction is contested - someone is typing!')
                self.snipers_spotted = True

            if not self.valid_bid_history:
                raise MissingBidHistoryException()

            if self.winning() and not self.snipers_spotted:
                pass

            elif not self.can_bid():
                pass

            # elif self.initial_snipe_ready():
            #     print('time for initial snipe')
            #     if self.constraints.max_bid >= self.get_lowest_valid_bid_value() + 8:
            #         # Add a lucky 8 to the initial snipe
            #         self.make_bid(1, 8)
            #     else:
            #         self.make_bid()
            #
            #     self.initial_snipe_performed = True

            elif self.final_snipe_ready():
                print('time for final snipe')

                if self.snipers_spotted and not self.extensions_remaining:
                    self.make_bid(0, self.get_countersnipe_increase())
                else:
                    self.make_bid()

        except StaleElementReferenceException as err:
            print(f'Stale:{err.__repr__()}')
        except MissingBidHistoryException as err:
            print(err.__repr__())
            print(f'History: {self.valid_bid_history}')

    def sync_clock_if_required(self):
        if self.fbclock.sync_required():
            self.fbclock.init_maximal_delay(10, self.get_auction_url())

    def get_auction_url(self):
        return f'https://www.facebook.com/groups/{self.fbgroup.id}/permalink/{self.auctionpost.id}/'

    def winning(self):
        try:
            return self.valid_bid_history[-1].bidder == self.user.id
        except IndexError:
            return False

    def can_bid(self):
        return self.get_lowest_valid_bid_value() <= self.constraints.max_bid

    def get_lowest_valid_bid_value(self, steps=1):
        try:
            return max(self.constraints.starting_bid,
                       self.valid_bid_history[-1].value + self.constraints.min_bid_step * steps)
        except IndexError:
            return self.constraints.starting_bid

    def snipers_present(self):
        begin_checking_at_datetime = self.constraints.expiry - timedelta(seconds=90)
        return self.fbclock.get_current_time() > begin_checking_at_datetime and self.fb.someone_is_typing()

    def initial_snipe_ready(self):
        initial_snipe_threshold = timedelta(seconds=4)
        return self.fbclock.get_current_time() > self.constraints.expiry - initial_snipe_threshold \
               and (not self.initial_snipe_performed) \
               and (not self.snipers_spotted)

    def final_snipe_ready(self):
        return self.fbclock.auction_last_call()

    def make_bid(self, steps=1, extra=8):

        # Add extra to the bid if possible, else just bid the minimum valid amount
        if self.get_lowest_valid_bid_value(steps) + extra > self.constraints.max_bid:
            bid_value = self.get_lowest_valid_bid_value(steps)
        else:
            bid_value = self.get_lowest_valid_bid_value(steps) + extra

        # Check for bid values that indicate empty (ie erroneous) bid history
        if bid_value < 208:
            raise RuntimeError(
                f'Bid value of {bid_value}NTD seems too low - something has gone wrong when parsing bids. Aborting.')

        # Make sure you didn't already just submit a bid for this amount
        if bid_value != self.most_recent_bid_submission:
            print(f'Preparing to bid {bid_value}')
            if bid_value > self.constraints.max_bid:
                raise ValueError("make_bid(): You cannot bid more than your max_bid_amount!")
            comment_content = f'{bid_value}(autobid)' if self.dev_mode else str(bid_value)
            print(f'Submitting "{comment_content}"')
            self.fb.post_comment(comment_content)

            self.most_recent_bid_submission = bid_value
            self.trigger_extension()
            sleep(0.05)
        else:
            print('Duplicate bid submission avoided')

    def get_countersnipe_increase(self):
        affordable_bid = min(self.get_lowest_valid_bid_value(4) + 8, self.constraints.max_bid)
        return affordable_bid - self.valid_bid_history[-1].value

    def trigger_extension(self):
        if self.extensions_remaining > 0 \
                and self.fbclock.get_time_remaining() < timedelta(minutes=5):
            self.constraints.expiry += timedelta(minutes=(1 if self.dev_mode else 5))
            self.countdown.reset()
            self.extensions_remaining -= 1
            print(f'Bid placed in final 5min - auction time extended to {self.constraints.expiry}')

    def perform_final_state_output(self):
        sleep(1)
        self.refresh_final_state()
        if self.archiver:
            self.archive_final_state()
        self.print_final_state()

    def refresh_final_state(self):
        print('Performing final refresh of page and bid history...')
        self.load_auction_page()
        self.refresh_bid_history(True)
        print('    Refreshed!')

    def archive_final_state(self):
        self.archiver.take_screenshot()
        self.archiver.save_final_state_html()

    def print_final_state(self):
        print('Final Auction State:')
        self.print_bid_history()

    def refresh_bid_history(self, force_accurate=False):
        self.fb.remove_all_child_comments()
        comment_elem_list = self.fb.get_comments()

        # If response speed is more critical than maintaining an accurate record
        if self.critical_period_active() and not force_accurate:
            new_valid_bid_history = self.get_bid_history_quickly(comment_elem_list)
        # Else if operating in non-critical mode
        else:
            new_valid_bid_history = self.get_bid_history_accurately(comment_elem_list)

        if new_valid_bid_history:
            # Only update the bid history if it is not empty
            self.valid_bid_history = new_valid_bid_history

    def get_bid_history_quickly(self, comment_elem_list):
        new_valid_bid_history = []

        # for idx, comment in enumerate(comment_elem_list, start=len(self.valid_bid_history)):
        for idx, comment in enumerate(comment_elem_list):
            try:
                candidate_bid = bidparse.comment_parse(comment)

                if candidate_bid.timestamp >= self.constraints.expiry:
                    raise ValueError()

                if self.potentially_contested(idx, comment_elem_list):
                    print(f"User's bid of {candidate_bid.value} may be contested - disregarding bid")
                    raise ValueError

                if self.bid_is_valid(candidate_bid, new_valid_bid_history):
                    # If this isn't running during initialisation and bid is new
                    if self.valid_bid_history and self.bid_is_new(candidate_bid):
                        print(f'New bid detected! (fast-mode)')
                        self.print_bid(candidate_bid)
                        self.relax_if_warranted()

                    new_valid_bid_history.append(candidate_bid)

            except ValueError:
                pass
            except NoSuchElementException:
                pass
            except Exception:
                pass  # todo figure out what can trigger this and explicitly handle it

        return new_valid_bid_history

    # Detects user's bids that appear to precede competing bids in client but may not on server
    def potentially_contested(self, comment_idx, comment_elem_list):
        if comment_idx != len(comment_elem_list) - 1:
            comment_elem = comment_elem_list[comment_idx]
            next_comment_elem = comment_elem_list[comment_idx + 1]
            if comment_elem.author == self.user.id \
                    and (comment_elem.timestamp == next_comment_elem.timestamp \
                         or comment_elem.timestamp == next_comment_elem.timestamp + timedelta(seconds=1)) \
                    and not bidparse.comment_parse(comment_elem).value > bidparse.comment_parse(
                next_comment_elem).value + self.constraints.min_bid_step:
                return True
        return False

    # Returns whether bid is valid with respect to last enumerated valid bid
    def bid_is_valid(self, candidate_bid, bid_history):
        return (not bid_history
                or candidate_bid.value >= bid_history[-1].value + self.constraints.min_bid_step)

    # Returns whether bid has been detected in any prior iteration
    def bid_is_new(self, candidate_bid):
        return not self.valid_bid_history or candidate_bid > self.valid_bid_history[-1]

    # Reset countersnipe detection if non-paranoid and there are no other countersnipers
    def relax_if_warranted(self):
        if self.snipers_spotted and not self.constraints.paranoid_mode and not self.fb.someone_is_typing():
            print('Countersniper has bid - relaxing posture')
            self.snipers_spotted = False

    def get_bid_history_accurately(self, comment_elem_list):
        new_valid_bid_history = []

        for comment in comment_elem_list:
            try:
                candidate_bid = bidparse.comment_parse(comment)

                if candidate_bid.timestamp >= self.constraints.expiry:
                    raise ValueError()

                if self.bid_is_valid(candidate_bid, new_valid_bid_history):
                    # If this isn't running during initialisation and bid is new
                    if self.valid_bid_history and self.bid_is_new(candidate_bid):
                        print(f'New bid detected!')
                        self.print_bid(candidate_bid)
                        self.relax_if_warranted()

                    new_valid_bid_history.append(candidate_bid)

            except ValueError:
                pass
            except NoSuchElementException:
                pass
            except Exception as err:
                pass

        return new_valid_bid_history

    def critical_period_active(self):
        return self.fbclock.get_time_remaining() < timedelta(seconds=5)

    def print_bid(self, bid):
        max_placed_bid_digits = len(str(self.valid_bid_history[-1].value))
        print(f'    {str(bid.value).rjust(max_placed_bid_digits)}NTD '
              f'at {bid.timestamp.month}月{bid.timestamp.day}日 {bid.timestamp.strftime("%H:%M:%S")} '
              f'({bid.bidder})')

    def print_bid_history(self):
        print('Current Bid History:')
        for bid in self.valid_bid_history:
            self.print_bid(bid)
        if self.fbclock.auction_last_call():
            self.print_auction_result()

    def print_auction_result(self):
        try:
            if self.auction_won():
                print(f'Auction won for {self.valid_bid_history[-1].value}NTD')
            else:
                print(f'Auction lost to {self.valid_bid_history[-1].bidder} ({self.valid_bid_history[-1].value}NTD)')
        except IndexError:
            print(f'Auction contains no valid bids... wtf happened?')

    def auction_won(self):
        try:
            return self.valid_bid_history[-1].bidder == self.user.id \
                   and self.valid_bid_history[-1].timestamp < self.constraints.expiry
        except IndexError:
            return False
コード例 #14
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 4.5
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #15
0
    def parse(self):

        h_data_type = "invoice-headers"
        d_data_type = "invoice-line-items"

        proc_dir = "processing/invoices"
        json_dir = "{}/{}".format(self.data_dir, proc_dir)
        json_files = [f for f in os.listdir(json_dir) if f.endswith('.json')]

        if not json_files:
            self.log.write(
                "ERROR {}/*.json files do not exist, did you forget to extract this?"
                .format(json_dir))
            return False

        xero_url_accrec = "https://go.xero.com/AccountsReceivable/View.aspx?InvoiceID="
        xero_url_accpay = "https://go.xero.com/AccountsPayable/View.aspx?InvoiceID="

        csv_header_file_name = "{}/{}-delta.csv".format(json_dir, h_data_type)
        csv_detail_file_name = "{}/{}-delta.csv".format(json_dir, d_data_type)

        header_col_header = """
    Type,InvoiceID,InvoiceNumber,Reference,
    AmountDue,AmountPaid,AmountCredited,CurrencyRate,IsDiscounted,HasAttachments,HasErrors,
    ContactID,Name,Date,BrandingThemeID,BrandingThemeName,Status,LineAmountTypes,
    SubTotal,TotalTax,Total,UpdatedDateUTC,CurrencyCode,ProcessingNotes, URL
    """

        detail_col_header = """
    InvoiceID,InvoiceNumber,Type,Date,LineItemNumber,
    LineItemID,ItemCode,ItemCode2,Description,UnitAmount,TaxAmount,LineAmount,AccountCode,Quantity
    """

        csv_header_file = open(csv_header_file_name, 'w', encoding='utf-8')
        csv_header_file.write(
            re.sub(r"[\n\t\s]", "", header_col_header) + "\n")

        csv_detail_file = open(csv_detail_file_name, 'w', encoding='utf-8')
        csv_detail_file.write(
            re.sub(r"[\n\t\s]", "", detail_col_header) + "\n")

        # read in branding themes
        themes_csv_file = self.data_dir + "/processing/default/branding-themes.csv"
        if not os.path.isfile(themes_csv_file):
            msg = "ERROR {} file does not exist, did you forget to extract this?"
            print(msg)
            self.log.write(msg)
            return
        themes = {}
        with open(themes_csv_file, "r") as f:
            reader = csv.reader(f, delimiter=",")
            for j, line in enumerate(reader):
                if j > 0:
                    themes[line[0]] = line[1]

        i = 0
        j = 0

        for file in json_files:

            i = i + 1

            self.log.write(
                "INFO [invoice-line-items] processing file {}".format(file))

            json_file_name = "{}/{}".format(json_dir, file)

            with open(json_file_name, encoding='utf-8') as f:
                data = json.load(f)

            collection = 'Invoices'
            if collection not in data:
                self.log.write(
                    "ERROR '{}' collection not found in JSON file {}".format(
                        collection, file))
                continue

            # zero ts in json header
            zero_created_datetime = clean_date(data['DateTimeUTC'])

            # single invoice, but is part of a collection
            invoice = data['Invoices'][0]

            type = invoice['Type'] if 'Type' in invoice else ''
            invoice_id = invoice['InvoiceID']
            invoice_number = invoice[
                'InvoiceNumber'] if 'InvoiceNumber' in invoice else ''
            reference = invoice['Reference'] if 'Reference' in invoice else ''
            amount_due = invoice[
                'AmountDue'] if 'AmountDue' in invoice else 0.00
            amount_paid = invoice[
                'AmountPaid'] if 'AmountPaid' in invoice else 0.00
            amount_credited = invoice[
                'AmountCredited'] if 'AmountCredited' in invoice else 0.00
            currency_rate = invoice[
                'CurrencyRate'] if 'CurrencyRate' in invoice else 0.00
            is_discounted = invoice[
                'IsDiscounted'] if 'IsDiscounted' in invoice else ''
            has_attachments = invoice[
                'HasAttachments'] if 'HasAttachments' in invoice else ''
            has_errors = invoice['HasErrors'] if 'HasErrors' in invoice else ''

            if 'Contact' in invoice and invoice['Contact']:
                contact = invoice['Contact']
                contact_id = contact['ContactID']
                name = contact['Name'] if 'Name' in contact else ''
            else:
                contact = ""
                contact_id = ""
                name = ""

            # use DateString
            date = (
                invoice['DateString'])[:10] if 'DateString' in invoice else ''

            branding_theme_id = invoice[
                'BrandingThemeID'] if 'BrandingThemeID' in invoice else ''
            status = invoice['Status'] if 'Status' in invoice else ''
            line_amount_types = invoice[
                'LineAmountTypes'] if 'LineAmountTypes' in invoice else ''
            sub_total = invoice['SubTotal'] if 'SubTotal' in invoice else ''
            total_tax = invoice['TotalTax'] if 'TotalTax' in invoice else ''
            total = invoice['Total'] if 'Total' in invoice else ''
            updated_date_utc = clean_date(
                invoice['UpdatedDateUTC']
            ) if 'UpdatedDateUTC' in invoice else ''
            currency_code = invoice[
                'CurrencyCode'] if 'CurrencyCode' in invoice else ''

            if type == "ACCPAY":
                url = xero_url_accpay + invoice_id
            elif type == "ACCREC":
                url = xero_url_accrec + invoice_id
            else:
                url = ""

            # get branding theme name
            processing_notes = ""
            if branding_theme_id in themes.keys():
                branding_theme_name = themes[branding_theme_id]
            else:
                branding_theme_name = ""
                processing_note = "branding theme id not found"

            columns = [
                type, invoice_id, invoice_number, reference, amount_due,
                amount_paid, amount_credited, currency_rate, is_discounted,
                has_attachments, has_errors, contact_id, name, date,
                branding_theme_id, branding_theme_name, status,
                line_amount_types, sub_total, total_tax, total,
                updated_date_utc, currency_code, processing_notes, url
            ]

            prep_columns = list(
                map(lambda col: "\"" + str(col) + "\"", columns))
            line = ",".join(prep_columns) + "\n"
            csv_header_file.write(line)

            # process line items
            if 'LineItems' not in invoice:
                self.log.write(
                    "WARN no line items found for invoice {}".format(
                        invoice_id))
                continue

            line_items = invoice['LineItems']

            # artificial li number
            line_item_num = 0
            for line_item in line_items:

                # total line items
                j = j + 1

                line_item_num = line_item_num + 1

                li_lid = line_item['LineItemID']
                li_item_code = line_item[
                    'ItemCode'] if 'ItemCode' in line_item else ''
                li_desc = line_item[
                    'Description'] if 'Description' in line_item else ''
                li_unit_amt = line_item[
                    'UnitAmount'] if 'UnitAmount' in line_item else 0.00
                li_tax_amt = line_item[
                    'TaxAmount'] if 'TaxAmount' in line_item else 0.00
                li_line_amt = line_item[
                    'LineAmount'] if 'LineAmount' in line_item else 0.00
                li_acct_code = line_item[
                    'AccountCode'] if 'AccountCode' in line_item else ''
                li_qty = line_item[
                    'Quantity'] if 'Quantity' in line_item else 0.00

                # desc field allows cr's, strip here
                li_desc = li_desc.strip("\n")

                # some codes have an addiitonal code piped on to them
                li_item_code2 = ""
                parts = li_item_code.split("|")
                if len(parts) == 2:
                    li_item_code = parts[0].strip()
                    li_item_code2 = parts[1].strip()

                columns = [
                    invoice_id, invoice_number, type, date, line_item_num,
                    li_lid, li_item_code, li_item_code2, li_desc, li_unit_amt,
                    li_tax_amt, li_line_amt, li_acct_code, li_qty
                ]

                prep_columns = list(
                    map(lambda col: "\"" + str(col) + "\"", columns))
                line = ",".join(prep_columns) + "\n"
                csv_detail_file.write(line)

        csv_header_file.close()
        self.log.write("INFO [{}] CSV file created {} ({:,} records)".format(
            'invoice-headers', csv_header_file_name, i))

        csv_detail_file.close()
        self.log.write("INFO [{}] CSV file created {} ({:,} records)".format(
            'invoice-line-items', csv_detail_file_name, i))

        ark = Archiver(self.log)
        files = list(
            map(lambda file: "{}/{}".format(json_dir, file), json_files))
        ark.archive('invoices', files)
        ark.archive(h_data_type, csv_header_file_name)
        ark.archive(d_data_type, csv_detail_file_name)
コード例 #16
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Initialize colors
    color_candidates = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_candidates.append((red, green, blue))

    scene, cube_nodes = build_scene(args.num_cubes, color_candidates)
    camera = OrthographicCamera(xmag=0.9, ymag=0.9)
    camera_node = Node(camera=camera)
    scene.add_node(camera_node)
    renderer = OffscreenRenderer(
        viewport_width=args.image_size, viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    for scene_index in tqdm(range(total_scenes_to_render)):

        camera_distance = 2
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

        # Change cube color and position
        update_cube_color_and_position(cube_nodes, color_candidates)

        # Transfer changes to the vertex buffer on gpu
        udpate_vertex_buffer(cube_nodes)

    renderer.delete()
コード例 #17
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    # Set GPU device
    rtx.set_device(args.gpu_device)

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)
    camera_distance = 2

    for _ in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for _ in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            camera_position = np.array(
                (rand_position_xz[0], wall_height / 2, rand_position_xz[1]))
            center = np.array((0, wall_height / 2, 0))

            # Compute yaw and pitch
            camera_direction = camera_position - center
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera.look_at(tuple(camera_position), tuple(center), up=(0, 1, 0))
            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)
コード例 #18
0
 def __init__(self, cam_config, storage):
     self.storage = storage
     self.cam = IpCamSnapper(cam_config)
     self.writer = FileDao()
     self.archiver = Archiver(self.storage, self.cam)
     self.store_in = self.storage.get_cam_snap_path(self.cam.name)
コード例 #19
0
    def parse(self):

        data_type = "branding-themes"

        proc_dir = "processing/default"
        json_file_name = "{}/{}/{}.json".format(self.data_dir, proc_dir,
                                                data_type)
        csv_file_name = "{}/{}/{}.csv".format(self.data_dir, proc_dir,
                                              data_type)

        if not os.path.isfile(json_file_name):
            self.log.write(
                "ERROR {} file does not exist, did you forget to extract this?"
                .format(json_file_name))
            return False

        with open(json_file_name, encoding='utf-8') as f:
            data = json.load(f)

        collection = 'BrandingThemes'
        if collection not in data:
            self.log.write(
                "ERROR '{}' collection not found in JSON file".format(
                    collection))
            return

        # zero ts in json header
        zero_created_datetime = clean_date(data['DateTimeUTC'])

        col_header = "BrandingThemeID,Name,LogoUrl,Type,SortOrder,CreatedDateUTC"

        csv_file = open(csv_file_name, 'w', encoding='utf-8')
        csv_file.write(re.sub(r'\n', '', col_header) + "\n")

        i = 0

        for theme in data['BrandingThemes']:

            i = i + 1

            id = theme['BrandingThemeID']
            name = theme['Name']
            url = theme['LogoUrl'] if 'LogoUrl' in theme else ''
            type = theme['Type'] if 'Type' in theme else ''
            sort_order = theme['SortOrder'] if 'SortOrder' in theme else ''
            created_date = clean_date(theme['CreatedDateUTC'])

            columns = [id, name, url, type, sort_order, created_date]

            prep_columns = list(
                map(lambda col: "\"" + str(col) + "\"", columns))
            line = ",".join(prep_columns) + "\n"

            csv_file.write(line)

        csv_file.close()
        self.log.write("INFO [{}] CSV file created {} ({:,} records)".format(
            data_type, csv_file_name, i))

        ark = Archiver(self.log)
        ark.archive(data_type, json_file_name)
コード例 #20
0
    def merge_invoice_delta(self):

        # master + (daily) delta files
        header_master_file = "{}/master/invoice-headers.csv".format(
            self.data_dir)
        header_delta_file = "{}/processing/invoices/invoice-headers-delta.csv".format(
            self.data_dir)
        detail_master_file = "{}/master/invoice-line-items.csv".format(
            self.data_dir)
        detail_delta_file = "{}/processing/invoices/invoice-line-items-delta.csv".format(
            self.data_dir)

        # read in as df's
        df_header_master = pd.read_csv(header_master_file,
                                       index_col='InvoiceID')
        df_header_delta = pd.read_csv(header_delta_file, index_col='InvoiceID')
        df_detail_master = pd.read_csv(
            detail_master_file)  #, index_col='LineItemID')
        df_detail_delta = pd.read_csv(
            detail_delta_file)  #, index_col='LineItemID')

        hm_cnt = df_header_master.shape[0]
        hd_cnt = df_header_delta.shape[0]
        #print("{:,} rows in header master".format(hm_cnt))
        #print("{:,} rows in header delta".format(hd_cnt))

        dm_cnt = df_detail_master.shape[0]
        dd_cnt = df_detail_delta.shape[0]
        #print("{:,} rows in detail master".format(dm_cnt))
        #print("{:,} rows in detail delta".format(dd_cnt))

        h_del_cnt = 0
        d_del_cnt = 0
        # loop through invoice header delta
        for id, row in df_header_delta.iterrows():
            # id record exists delete it (will be re-inserted next)
            if id in df_header_master.index.values:
                # delete header row
                h_del_cnt = h_del_cnt + 1
                df_header_master.drop(id, inplace=True)
                # delete related detail rows
                d_del_cnt = d_del_cnt + df_detail_master[
                    df_detail_master['InvoiceID'] == id].shape[0]
                df_detail_master.drop(df_detail_master[
                    df_detail_master['InvoiceID'] == id].index,
                                      inplace=True)

        # concat master files (with deletes) + delta files = UPSERTED master files
        df_new_header_master = pd.concat([df_header_master, df_header_delta])
        df_new_detail_master = pd.concat([df_detail_master, df_detail_delta])

        new_header_master_file = "{}/processing/invoices/invoice-headers.csv".format(
            self.data_dir)
        new_detail_master_file = "{}/processing/invoices/invoice-line-items.csv".format(
            self.data_dir)

        if os.path.exists(new_header_master_file):
            os.remove(new_header_master_file)
        df_new_header_master.to_csv(new_header_master_file,
                                    header=True,
                                    index=True,
                                    quoting=csv.QUOTE_ALL)

        if os.path.exists(new_detail_master_file):
            os.remove(new_detail_master_file)
        df_new_detail_master.to_csv(new_detail_master_file,
                                    header=True,
                                    index=False,
                                    quoting=csv.QUOTE_ALL)

        self.log.write(
            "INFO [invoice-headers] {:,} invoice records inserted into header master"
            .format(df_new_header_master.shape[0] - hm_cnt))
        self.log.write(
            "INFO [invoice-headers] {:,} invoice records updated in header master"
            .format(hd_cnt - (df_new_header_master.shape[0] - hm_cnt)))
        self.log.write(
            "INFO [invoice-headers] master file written to {}".format(
                new_header_master_file))

        self.log.write(
            "INFO [invoice-details] {:,} invoice records inserted into detail master"
            .format(df_new_detail_master.shape[0] - dm_cnt))
        self.log.write(
            "INFO [invoice-details] {:,} invoice records updated in detail master"
            .format(dd_cnt - (df_new_detail_master.shape[0] - dm_cnt)))
        self.log.write(
            "INFO [invoice-details] master file written to {}".format(
                new_detail_master_file))

        ark = Archiver(self.log)
        ark.archive('invoice-headers', new_header_master_file)
        ark.archive('invoice-line-items', new_detail_master_file)
        ark.copy('invoice-headers', new_header_master_file, 'master')
        ark.copy('invoice-line-items', new_detail_master_file, 'master')

        formats = [
            '', '', '', '', '0.00', '0.00', '0.00', '0.00', '', '', '', '', '',
            'short', '', '', '', '', '0.00', '0.00', '0.00', 'long', '', '', ''
        ]
        ark.copy('invoice-headers',
                 new_header_master_file,
                 'current',
                 excelize=True,
                 xlsx_formats=formats)

        formats = [
            '', '', '', 'short', '0', '', '', '', '', '0.00', '0.00', '0.00',
            '', '0.00'
        ]
        ark.copy('invoice-line-items',
                 new_detail_master_file,
                 'current',
                 excelize=True,
                 xlsx_formats=formats)
コード例 #21
0
ファイル: pipeline.py プロジェクト: CollComm/Tuan
def main(source_, citylist_, period_):
	source = source_
	citylist = citylist_
	period = period_
	if source:
		source = source.split(",")
		print "source: ", source
	if citylist:
		citylist = citylist.split(",")
		print "city list: ", citylist

	while True:
		if not source: break
		sleep_interval = (0,30)
		if "meituan" in source:
			meituan_app = Pipeline(MeituanCrawler(), MeituanParser(), None, "meituan")
			if not citylist:
				error = meituan_app.start(sleep_interval)
				if len(error) > 0:
					meituan_app.rescue(error, sleep_interval)
			else:
				meituan_app.rescue(citylist, sleep_interval)
		if "nuomi" in source:
			nuomi_app = Pipeline(NuomiCrawler(), NuomiParser(), None, "nuomi")
			if not citylist:
				error = nuomi_app.start(sleep_interval)
				if len(error) > 0:
					nuomi_app.rescue(error, sleep_interval)
			else:
				nuomi_app.rescue(citylist, sleep_interval)
		if "lashou" in source:
			lashou_app = Pipeline(LashouCrawler(), LashouParser(), None, "lashou")
			if not citylist:
				error = lashou_app.start(sleep_interval)
				if len(error) > 0:
					lashou_app.rescue(error, sleep_interval)
			else:
				lashou_app.rescue(citylist, sleep_interval)
		if "wowo" in source:
			wowo_app = Pipeline(WowoCrawler(), WowoParser(), None, "wowo")
			if not citylist:
				error = wowo_app.start(sleep_interval)
				if len(error) > 0:
					wowo_app.rescue(error, sleep_interval)
			else:
				wowo_app.rescue(citylist, sleep_interval)
		if "dida" in source:
			dida_app = Pipeline(DidaCrawler(), DidaParser(), None, "dida")
			if not citylist:
				error = dida_app.start(sleep_interval)
				if len(error) > 0:
					dida_app.rescue(error, sleep_interval)
			else:
				dida_app.rescue(citylist, sleep_interval)
		if "dianping" in source:
			dianping_app = Pipeline(DianpingCrawler(), DianpingParser(), None, "dianping")
			if not citylist:
				error = dianping_app.start(sleep_interval)
				if len(error) > 0:
					dianping_app.rescue(error, sleep_interval)
			else:
				dianping_app.rescue(citylist, sleep_interval)
		if "manzuo" in source:
			manzuo_app = Pipeline(ManzuoCrawler(), ManzuoParser(), None, "manzuo")
			if not citylist:
				error = manzuo_app.start(sleep_interval)
				if len(error) > 0:
					manzuo_app.rescue(error, sleep_interval)
			else:
				manzuo_app.rescue(citylist, sleep_interval)
		if "ftuan" in source:
			ftuan_app = Pipeline(FtuanCrawler(), FtuanParser(), None, "ftuan")
			if not citylist:
				error = ftuan_app.start(sleep_interval)
				if len(error) > 0:
					ftuan_app.rescue(error, sleep_interval)
			else:
				ftuan_app.rescue(citylist, sleep_interval)
		if "wuba" in source:
			wuba_app = Pipeline(WubaCrawler(), WubaParser(), None, "wuba")
			if not citylist:
				error = wuba_app.start(sleep_interval)
				if len(error) > 0:
					wuba_app.rescue(error, sleep_interval)
			else:
				wuba_app.rescue(citylist, sleep_interval)

		# archive first
		archiver = Archiver()
		for src in source:
			archiver.archive(src, src, True)  # False achive locally, True achive to S3

		# repeat
		if not period: break
		time.sleep( int(period) * 3600 )

		# check config file
		stop_crawl = 0
		check_config = CheckConfig()
		config = check_config.check('crawl_config')
		for src in source:
			if src in config:
				if "period" in config[src]:
					period = config[src]["period"]
				if "stop" in config[src]:
					stop_crawl = config[src]["stop"]
				break
		if stop_crawl == 1:
			break
コード例 #22
0
ファイル: client.py プロジェクト: ketoni/pixer
 def __init__(self):
     self.session = requests.Session()
     self.archiver = Archiver()
     self.response = ""
     self.html_soup = ""
コード例 #23
0
#! /usr/bin/env python

from warner import Warner
from archiver import Archiver

if __name__ == "__main__":
    warner = Warner()
    archiver = Archiver()
    warner.warn()
    archiver.archive()
コード例 #24
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.ambient_light_intensity = 0.05

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    camera = rtx.OrthographicCamera()

    for _ in tqdm(range(args.total_scenes)):
        scene = build_scene(args.num_cubes, color_array)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)

        camera_distance = 1

        for _ in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            center = (0, 0, 0)
            camera.look_at(tuple(camera_position), center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

        archiver.add(scene_data)
コード例 #25
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 3
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-3, 3, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #26
0
class ArchiverListener:
    ROBOT_LISTENER_API_VERSION = 2

    def __init__(self,
                 config_file_or_database,
                 db_engine=None,
                 user=None,
                 pw=None,
                 host=None,
                 port=5432):
        if not db_engine:
            config = read_config_file(config_file_or_database)
        else:
            config = {
                'database': config_file_or_database,
                'db_engine': db_engine,
                'user': user,
                'password': pw,
                'host': host,
                'port': port,
            }
        database = database_connection(config)
        self.archiver = Archiver(database, config)
        self.archiver.test_type = "Robot Framework"
        self.rpa = False
        self.dry_run = False
        self.generator = None

    def start_suite(self, name, attrs):
        if not self.archiver.test_run_id:
            self.archiver.begin_test_run('ArchiverListener', None,
                                         self.generator, self.rpa,
                                         self.dry_run)
        self.archiver.begin_suite(name)

    def end_suite(self, name, attrs):
        self.archiver.end_suite(attrs)

    def start_test(self, name, attrs):
        self.archiver.begin_test(name)

    def end_test(self, name, attrs):
        self.archiver.end_test(attrs)

    def start_keyword(self, name, attrs):
        self.archiver.begin_keyword(attrs['kwname'], attrs['libname'],
                                    attrs['type'], attrs['args'])

    def end_keyword(self, name, attrs):
        self.archiver.end_keyword(attrs)

    def log_message(self, message):
        self.archiver.begin_log_message(message['level'], message['timestamp'])
        self.archiver.end_log_message(message['message'])

    def message(self, message):
        if not self.generator:
            self.generator = message['message']
        elif message['message'].startswith('Settings:'):
            self.process_settings(message['message'])

    def process_settings(self, settings):
        settings = dict([row.split(':', 1) for row in settings.split('\n')])

        self.rpa = bool('RPA' in settings
                        and settings['RPA'].strip() == 'True')
        self.dry_run = bool(settings['DryRun'].strip() == 'True')

    def close(self):
        self.archiver.end_test_run()
コード例 #27
0
  def parse(self):

    data_type = "items"
    xero_url = "https://go.xero.com/Accounts/Inventory/"

    proc_dir = "processing/default"
    json_file_name = "{}/{}/{}.json".format(self.data_dir, proc_dir, data_type)
    csv_file_name = "{}/{}/{}.csv".format(self.data_dir, proc_dir, data_type)

    if not os.path.isfile(json_file_name):
      self.log.write("ERROR {} file does not exist, did you forget to extract this?".format(json_file_name))
      return False

    with open(json_file_name, encoding='utf-8') as f:
      data = json.load(f)

    collection = 'Items'
    if collection not in data:
      self.log.write("ERROR '{}' collection not found in JSON file".format(collection))
      return

    # zero ts in json header
    zero_created_datetime = clean_date(data['DateTimeUTC'])

    col_header = """
    ItemID,Code,Code2,Description,PurchaseDescription,UpdatedDateUTC,PurchasedUnitPrice,PurchasedCOGSAccountCode,
    PurchasedTaxType,SalesUnitPrice,SalesAccountCode,SalesTaxType,Name,IsTrackedAsInventory,
    InventoryAssetAccountCode,TotalCostPool,QuantityOnHand,IsSold,IsPurchased,
    SupplierCode,ProductDescription,ProductSegment1,ProductSegment2,
    PurchaseSupplierCode,PurchaseProductDescription,PurchaseProductSegment1,PurchaseProductSegment2,
    ProcessingNotes, URL 
    """

    csv_file = open(csv_file_name, 'w', encoding='utf-8')
    csv_file.write(re.sub(r"[\n\t\s]", "", col_header) + "\n")

    i = 0

    for item in data[collection]:

      i = i + 1

      item_id = item['ItemID']
      code = item['Code']
      description = item['Description']

      purchase_description = item['PurchaseDescription'] if 'PurchaseDescription' in item else ''
      updated_date_utc = clean_date(item['UpdatedDateUTC']) if 'UpdatedDateUTC' in item else ''

      if 'PurchaseDetails' in item and item['PurchaseDetails']:
        details = item['PurchaseDetails']
        purchase_unit_price = details['UnitPrice'] if 'UnitPrice' in details else ''
        purchase_cogs_account_code = details['COGSAccountCode'] if 'COGSAccountCode' in details else ''
        purchase_tax_type = details['TaxType'] if 'TaxType' in details else ''
      else:
        purchase_unit_price = ''
        purchase_cogs_account_code = ''
        purchase_tax_type = ''

      if 'SalesDetails' in item and item['SalesDetails']:
        details = item['SalesDetails']
        sales_unit_price = details['UnitPrice'] if 'UnitPrice' in details else ''
        sales_account_code = details['AccountCode'] if 'AccountCode' in details else ''
        sales_tax_type = details['TaxType'] if 'TaxType' in details else ''
      else:
        sales_unit_price = ''
        sales_account_code = ''
        sales_tax_type = ''

      name = item['Name']
      is_tracked_as_inventory = item['IsTrackedAsInventory'] if 'IsTrackedAsInventory' in item else' '
      inventory_asset_account_code = item['InventoryAssetAccountCode'] if 'InventoryAssetAccountCode' in item else' '
      total_cost_pool = item['TotalCostPool'] if 'TotalCostPool' in item else' '
      quantity_on_hand = item['QuantityOnHand'] if 'QuantityOnHand' in item else' '
      is_sold = item['IsSold'] if 'IsSold' in item else' '
      is_purchased = item['IsPurchased'] if 'IsPurchased' in item else' '

      # some codes have an addiitonal code piped on to them
      code2 = ""
      parts = code.split("|")
      if len(parts) == 2:
        code = parts[0].strip()
        code2 = parts[1].strip()

      processing_notes = ""

      supplier_code = ""
      product_description = ""
      product_segment_1 = ""
      product_segment_2 = ""

      purchase_supplier_code = ""
      purchase_product_description = ""
      purchase_product_segment_1 = ""
      purchase_product_segment_2 = ""

      # parse desc's for supplier code, desc, product segment 1, product segment 2
      parts = description.split("|")
      if len(parts) != 4:
        processing_notes = "malformed [Description] field"
      else:
        supplier_code = parts[0].strip()
        product_description = parts[1].strip()
        product_segment_1 = parts[2].strip()
        product_segment_2 = parts[3].strip()

      parts = purchase_description.split("|")
      if len(parts) != 4:
        if not processing_notes:
          ProcessingNotes = "malformed [PurchaseDescription] field"
        else:
          processing_notes = processing_notes + "/" + "malformed [PurchaseDescription] field"
      else:
        purchase_supplier_code = parts[0].strip()
        purchase_product_description = parts[1].strip()
        purchase_product_segment_1 = parts[2].strip()
        purchase_product_segment_2 = parts[3].strip()

      url = xero_url + item_id

      columns = [
        item_id, code, code2, description, purchase_description, updated_date_utc, purchase_unit_price,
        purchase_cogs_account_code,
        purchase_tax_type, sales_unit_price, sales_account_code, sales_tax_type, name, is_tracked_as_inventory,
        inventory_asset_account_code, total_cost_pool, quantity_on_hand, is_sold, is_purchased,
        supplier_code, product_description, product_segment_1, product_segment_2,
        purchase_supplier_code, purchase_product_description, purchase_product_segment_1, purchase_product_segment_2,
        processing_notes, url
      ]

      prep_columns = list(map(lambda col: "\"" + str(col) + "\"", columns))
      line = ",".join(prep_columns) + "\n"

      csv_file.write(line)

    csv_file.close()
    self.log.write("INFO [{}] CSV file created {} ({:,} records)".format(data_type, csv_file_name, i))

    formats = [
      '', '', '', '', '', 'long', '0.00',
      '',
      '', '0.00', '', '', '', '',
      '', '0.00', '0.00', '', '',
      '', '', '', '',
      '', '', '', '',
      '', ''
    ]

    ark = Archiver(self.log)
    ark.archive(data_type, json_file_name)
    ark.archive(data_type, csv_file_name)
    ark.copy(data_type, csv_file_name, 'master')
    ark.copy(data_type, csv_file_name, 'current', excelize=True, xlsx_formats=formats)
コード例 #28
0
  def parse(self):

    data_type = "invoices"
    xero_url_accrec = "https://go.xero.com/AccountsReceivable/View.aspx?InvoiceID="
    xero_url_accpay = "https://go.xero.com/AccountsPayable/View.aspx?InvoiceID="

    proc_dir = "processing/default"
    json_file_name = "{}/{}/{}.json".format(self.data_dir, proc_dir, data_type)
    csv_file_name = "{}/{}/{}.csv".format(self.data_dir, proc_dir, data_type)

    if not os.path.isfile(json_file_name):
      self.log.write("ERROR {} file does not exist, did you forget to extract this?".format(json_file_name))
      return False

    with open(json_file_name, encoding='utf-8') as f:
      data = json.load(f)

    collection = 'Invoices'
    if collection not in data:
      self.log.write("ERROR '{}' collection not found in JSON file".format(collection))
      return

    # zero ts in json header
    zero_created_datetime = clean_date(data['DateTimeUTC'])

    col_header = """
    InvoiceID,Type,InvoiceNumber,Reference,
    AmountDue,AmountPaid,AmountCredited,CurrencyRate,IsDiscounted,HasAttachments,HasErrors,
    ContactID,Name,Date,BrandingThemeID,BrandingThemeName,Status,LineAmountTypes,
    SubTotal,TotalTax,Total,UpdatedDateUTC,CurrencyCode,ProcessingNotes, URL
    """

    csv_file = open(csv_file_name, 'w', encoding='utf-8')
    csv_file.write(re.sub(r"[\n\t\s]", "", col_header) + "\n")

    # read in branding themes
    themes_csv_file = self.data_dir + "/processing/default/branding-themes.csv"
    if not os.path.isfile(themes_csv_file):
      self.log.write("ERROR {} file does not exist, did you forget to extract this?".format(themes_csv_file))
      return
    themes = {}
    with open(themes_csv_file, "r") as f:
        reader = csv.reader(f, delimiter = ",")
        for j, line in enumerate(reader):
            if j > 0:
              themes[line[0]] = line[1]

    i = 0

    for invoice in data['Invoices']:

      i = i + 1

      type = invoice['Type'] if 'Type' in invoice else ''
      invoice_id = invoice['InvoiceID']
      invoice_number = invoice['InvoiceNumber'] if 'InvoiceNumber' in invoice else ''
      reference = invoice['Reference'] if 'Reference' in invoice else ''
      amount_due = invoice['AmountDue'] if 'AmountDue' in invoice else 0.00
      amount_paid = invoice['AmountPaid'] if 'AmountPaid' in invoice else 0.00
      amount_credited = invoice['AmountCredited'] if 'AmountCredited' in invoice else 0.00
      currency_rate = invoice['CurrencyRate'] if 'CurrencyRate' in invoice else 0.00
      is_discounted = invoice['IsDiscounted'] if 'IsDiscounted' in invoice else ''
      has_attachments = invoice['HasAttachments'] if 'HasAttachments' in invoice else ''
      has_errors = invoice['HasErrors'] if 'HasErrors' in invoice else ''

      if 'Contact' in invoice and invoice['Contact']:
        contact = invoice['Contact']
        contact_id = contact['ContactID']
        name = contact['Name'] if 'Name' in contact else ''
      else:
        contact = ""
        contact_id = ""
        name = ""

      # use DateString
      date = (invoice['DateString'])[:10] if 'DateString' in invoice else ''

      branding_theme_id = invoice['BrandingThemeID'] if 'BrandingThemeID' in invoice else ''
      status = invoice['Status'] if 'Status' in invoice else ''
      line_amount_types = invoice['LineAmountTypes'] if 'LineAmountTypes' in invoice else ''
      sub_total = invoice['SubTotal'] if 'SubTotal' in invoice else ''
      total_tax = invoice['TotalTax'] if 'TotalTax' in invoice else ''
      total = invoice['Total'] if 'Total' in invoice else ''
      updated_date_utc = clean_date(invoice['UpdatedDateUTC']) if 'UpdatedDateUTC' in invoice else ''
      currency_code = invoice['CurrencyCode'] if 'CurrencyCode' in invoice else ''

      if type == "ACCPAY":
        url  = xero_url_accpay + invoice_id
      elif type == "ACCREC":
        url  = xero_url_accrec + invoice_id
      else:
        url = ""

      # get branding theme name
      processing_notes = ""
      if branding_theme_id in themes.keys():
        branding_theme_name =  themes[branding_theme_id]
      else:
        branding_theme_name =  ""
        processing_note = "branding theme id not found"

      columns = [
        invoice_id, type, invoice_number, reference, amount_due,
        amount_paid, amount_credited, currency_rate, is_discounted, has_attachments, has_errors, contact_id,
        name,  date,
        branding_theme_id, branding_theme_name, status, line_amount_types, sub_total, total_tax,
        total, updated_date_utc, currency_code, processing_notes, url
      ]

      prep_columns = list(map(lambda col: "\"" + str(col) + "\"", columns))
      line = ",".join(prep_columns) + "\n"

      csv_file.write(line)

    csv_file.close()
    self.log.write("INFO [{}] CSV file created {} ({:,} records)".format(data_type, csv_file_name, i))

    formats = [
      '', '', '', '', '0.00',
      '0.00', '0.00', '0.00', '', '', '', '',
      '', 'short',
      '', '', '', '', '0.00', '0.00',
      '0.00', 'long', '', '', ''
    ]

    ark = Archiver(self.log)
    ark.archive(data_type, json_file_name)
    ark.archive(data_type, csv_file_name)
    ark.copy(data_type, csv_file_name, 'master')
    ark.copy(data_type, csv_file_name, 'current', excelize=True, xlsx_formats=formats)
コード例 #29
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)

        camera_distance = 4
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)

            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
コード例 #30
0
    print('  original aspect ratio).')
    print('')
    print('Tarball archiving: (NOT implemented yet)')
    print('  All files contained in the target folder will be compressed')
    print('  into a .tar.xz archive using the LZMA2 compression filter.')
    print('')


def print_error(msg):
    print('archive: ' + str(msg))
    print('Use "archive --help" for info on the usage')


if __name__ == '__main__':
    print_header()
    archiver = Archiver()

    try:
        opts, args = getopt.getopt(sys.argv[1:], "hpvt",
                                   ["help", "pictures", "videos", "tarball"])
    except getopt.GetoptError as err:
        print_error(err)
        sys.exit(2)

    for o, v in opts:
        if o in ('-h', '--help'):
            print_help()
            sys.exit(0)
        elif o in ('-p', '--pictures'):
            archiver.rescale_pictures = True
        elif o in ('-v', '--videos'):
コード例 #31
0
ファイル: skycolor.py プロジェクト: mynameisfiber/skycolor
from flask import Flask, jsonify, send_file, request
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop

from archiver import Archiver
from presets import PRESETS
from utils import route_preset, load_webcam
from utils import average_color, image_color
from utils import draw_rectangle, img_to_io


app = Flask(__name__)
ARCHIVER = Archiver(PRESETS, callback_minutes=15).start()
route_preset.app = app
route_preset.presets = PRESETS


@app.route("/archive/<string:location>")
def get_archive(location):
    if location not in PRESETS:
        return jsonify([])
    try:
        N = int(request.args.get("N", 96))
    except ValueError:
        N = 96
    finally:
        N = min(N, 96*7)
    return jsonify(ARCHIVER.get_last_N(location, N))

コード例 #32
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)
    camera_distance = 2

    for _ in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for _ in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            camera_position = np.array(
                (rand_position_xz[0], wall_height / 2, rand_position_xz[1]))
            center = np.array((0, wall_height / 2, 0))

            # Compute yaw and pitch
            camera_direction = camera_position - center
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera.look_at(tuple(camera_position), tuple(center), up=(0, 1, 0))
            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)
コード例 #33
0
    def parse(self):

        data_type = "contacts"
        xero_url = "https://go.xero.com/Contacts/View/"

        proc_dir = "processing/default"
        json_file_name = "{}/{}/{}.json".format(self.data_dir, proc_dir,
                                                data_type)
        csv_file_name = "{}/{}/{}.csv".format(self.data_dir, proc_dir,
                                              data_type)

        if not os.path.isfile(json_file_name):
            self.log.write(
                "ERROR {} file does not exist, did you forget to extract this?"
                .format(json_file_name))
            return False

        with open(json_file_name, encoding='utf-8') as f:
            data = json.load(f)

        collection = 'Contacts'
        if collection not in data:
            self.log.write(
                "ERROR '{}' collection not found in JSON file".format(
                    collection))
            return

        # zero ts in json header
        #zero_created_datetime = clean_date(data['DateTimeUTC'])

        col_header = """
    ContactID,AccountNumber,ContactStatus,Name,FirstName,LastName,EmailAddress,SkypeUserName,Segment1,Segment2,Segment3,BankAccountDetails,TaxNumber,
    Street_City,Street_Region,Street_PostalCode,Street_Country,
    POBOX_AddressLine1,POBOX_AddressLine2,POBOX_AddressLine3,POBOX_AddressLine4,POBOX_City,POBOX_Region,POBOX_PostalCode,POBOX_Country,POBOX_AttentionTo,
    DEFAULT_PhoneNumber,DEFAULT_PhoneAreaCode,DEFAULT_PhoneCountryCode,
    MOBILE_PhoneNumber,MOBILE_PhoneAreaCode,MOBILE_PhoneCountryCode,
    FAX_PhoneNumber,FAX_PhoneAreaCode,FAX_PhoneCountryCode,
    DDI_PhoneNumber,DDI_PhoneAreaCode,DDI_PhoneCountryCode,
    UpdatedDateUTC,IsSupplier,IsCustomer,
    ProcessingNotes,URL 
    """

        csv_file = open(csv_file_name, 'w', encoding='utf-8')
        csv_file.write(re.sub(r"[\n\t\s]", "", col_header) + "\n")

        i = 0

        for contact in data[collection]:

            i = i + 1

            contact_id = contact['ContactID']

            account_number = contact[
                'AccountNumber'] if 'AccountNumber' in contact else ''
            contact_status = contact[
                'ContactStatus'] if 'ContactStatus' in contact else ''
            name = contact['Name'] if 'Name' in contact else ''
            first_name = contact['FirstName'] if 'FirstName' in contact else ''
            last_name = contact['LastName'] if 'LastName' in contact else ''
            email_address = contact[
                'EmailAddress'] if 'EmailAddress' in contact else ''

            # parse segments
            skype_user_name = contact[
                'SkypeUserName'] if 'SkypeUserName' in contact else ''
            parts = skype_user_name.split("|")
            if len(parts) == 3:
                segment_1 = parts[0].strip()
                segment_2 = parts[1].strip()
                segment_3 = parts[2].strip()
                processing_notes = ""
            else:
                segment_1 = ""
                segment_2 = ""
                segment_3 = ""
                processing_notes = "malformed [SkypeUserName] field"

            bank_account_details = contact[
                'BankAccountDetails'] if 'BankAccountDetails' in contact else ''
            tax_number = zero_to_empty(
                contact['TaxNumber']) if 'TaxNumber' in contact else ''

            # (ugly) initializer
            street_city, street_region, street_postalcode, street_country = "", "", "", ""
            pobox_addressline1, pobox_addressline2, pobox_address_line3, pobox_address_line4, pobox_city, pobox_region = "", "", "", "", "", ""
            pobox_postal_code, pobox_country, pobox_attention_to = "", "", ""

            if 'Addresses' in contact and contact['Addresses']:
                addresses = contact['Addresses']
                for address in addresses:
                    if address['AddressType'] == 'STREET':
                        street_city = zero_to_empty(
                            address['City']) if 'City' in address else ''
                        street_region = zero_to_empty(
                            address['Region']) if 'Region' in address else ''
                        street_postalcode = zero_to_empty(
                            address['PostalCode']
                        ) if 'PostalCode' in address else ''
                        street_country = zero_to_empty(
                            address['Country']) if 'Country' in address else ''
                    elif address['AddressType'] == 'POBOX':
                        pobox_addressline1 = zero_to_empty(
                            address['AddressLine1']
                        ) if 'AddressLine1' in address else ''
                        pobox_addressline2 = zero_to_empty(
                            address['AddressLine2']
                        ) if 'AddressLine2' in address else ''
                        pobox_address_line3 = zero_to_empty(
                            address['AddressLine3']
                        ) if 'AddressLine3' in address else ''
                        pobox_address_line4 = zero_to_empty(
                            address['AddressLine4']
                        ) if 'AddressLine4' in address else ''
                        pobox_city = zero_to_empty(
                            address['City']) if 'City' in address else ''
                        pobox_region = zero_to_empty(
                            address['Region']) if 'Region' in address else ''
                        pobox_postal_code = zero_to_empty(
                            address['PostalCode']
                        ) if 'PostalCode' in address else ''
                        pobox_country = zero_to_empty(
                            address['Country']) if 'Country' in address else ''
                        pobox_attention_to = zero_to_empty(
                            address['AttentionTo']
                        ) if 'AttentionTo' in address else ''
                    else:
                        # TODO : other type of address (write note to log)
                        pass

            # (ugly) initializer
            ddi_phone_number, ddi_phone_area_code, ddi_phone_country_code = "", "", ""
            default_phone_number, default_phone_area_code, default_phone_country_code = "", "", ""
            fax_phone_number, fax_phone_area_code, fax_phone_country_code = "", "", ""
            mobile_phone_number, mobile_phone_area_code, mobile_phone_country_code = "", "", ""

            if 'Phones' in contact and contact['Phones']:
                phones = contact['Phones']
                for phone in phones:
                    if phone['PhoneType'] == 'DDI':
                        ddi_phone_number = zero_to_empty(
                            phone['PhoneNumber']
                        ) if 'PhoneNumber' in phone else ''
                        ddi_phone_area_code = zero_to_empty(
                            phone['PhoneAreaCode']
                        ) if 'PhoneAreaCode' in phone else ''
                        ddi_phone_country_code = zero_to_empty(
                            phone['PhoneCountryCode']
                        ) if 'PhoneCountryCode' in phone else ''
                    elif phone['PhoneType'] == 'DEFAULT':
                        default_phone_number = zero_to_empty(
                            phone['PhoneNumber']
                        ) if 'PhoneNumber' in phone else ''
                        default_phone_area_code = zero_to_empty(
                            phone['PhoneAreaCode']
                        ) if 'PhoneAreaCode' in phone else ''
                        default_phone_country_code = zero_to_empty(
                            phone['PhoneCountryCode']
                        ) if 'PhoneCountryCode' in phone else ''
                    elif phone['PhoneType'] == 'FAX':
                        fax_phone_number = zero_to_empty(
                            phone['PhoneNumber']
                        ) if 'PhoneNumber' in phone else ''
                        fax_phone_area_code = zero_to_empty(
                            phone['PhoneAreaCode']
                        ) if 'PhoneAreaCode' in phone else ''
                        fax_phone_country_code = zero_to_empty(
                            phone['PhoneCountryCode']
                        ) if 'PhoneCountryCode' in phone else ''
                    elif phone['PhoneType'] == 'MOBILE':
                        mobile_phone_number = zero_to_empty(
                            phone['PhoneNumber']
                        ) if 'PhoneNumber' in phone else ''
                        mobile_phone_area_code = zero_to_empty(
                            phone['PhoneAreaCode']
                        ) if 'PhoneAreaCode' in phone else ''
                        mobile_phone_country_code = zero_to_empty(
                            phone['PhoneCountryCode']
                        ) if 'PhoneCountryCode' in phone else ''
                    else:
                        # TODO : other type of phone (write note to log)
                        pass

            updated_date_utc = clean_date(
                contact['UpdatedDateUTC']
            ) if 'UpdatedDateUTC' in contact else ''
            is_supplier = contact[
                'IsSupplier'] if 'IsSupplier' in contact else ''
            is_customer = contact[
                'IsCustomer'] if 'IsCustomer' in contact else ''

            url = xero_url + contact_id

            columns = [
                contact_id, account_number, contact_status, name, first_name,
                last_name, email_address, skype_user_name, segment_1,
                segment_2, segment_3, bank_account_details, tax_number,
                street_city, street_region, street_postalcode, street_country,
                pobox_addressline1, pobox_addressline2, pobox_address_line3,
                pobox_address_line4, pobox_city, pobox_region,
                pobox_postal_code, pobox_country, pobox_attention_to,
                default_phone_number, default_phone_area_code,
                default_phone_country_code, mobile_phone_number,
                mobile_phone_area_code, mobile_phone_country_code,
                fax_phone_number, fax_phone_area_code, fax_phone_country_code,
                ddi_phone_number, ddi_phone_area_code, ddi_phone_country_code,
                updated_date_utc, is_supplier, is_customer, processing_notes,
                url
            ]

            prep_columns = list(
                map(lambda col: "\"" + str(col) + "\"", columns))
            line = ",".join(prep_columns) + "\n"

            csv_file.write(line)

        csv_file.close()
        self.log.write("INFO [{}] CSV file created {} ({:,} records)".format(
            data_type, csv_file_name, i))

        formats = [
            '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
            '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
            '', '', '', '', 'long', '', '', '', ''
        ]

        ark = Archiver(self.log)
        ark.archive(data_type, json_file_name)
        ark.archive(data_type, csv_file_name)
        ark.copy(data_type, csv_file_name, 'master')
        ark.copy(data_type,
                 csv_file_name,
                 'current',
                 excelize=True,
                 xlsx_formats=formats)