示例#1
0
    def __init__(self, wnck_window, group):
        self.group_r = weakref.ref(group)
        self.globals = Globals()
        self.opacify_obj = Opacify()
        connect(self.globals, "show-only-current-monitor-changed",
                             self.__on_show_only_current_monitor_changed)
        self.screen = wnck.screen_get_default()
        self.wnck = wnck_window
        self.deopacify_sid = None
        self.opacify_sid = None
        self.select_sid = None
        self.xid = self.wnck.get_xid()
        self.is_active_window = False
        self.on_current_desktop = self.is_on_current_desktop()
        self.monitor = self.get_monitor()

        self.state_changed_event = self.wnck.connect("state-changed",
                                                self.__on_window_state_changed)
        self.icon_changed_event = self.wnck.connect("icon-changed",
                                                self.__on_window_icon_changed)
        self.name_changed_event = self.wnck.connect("name-changed",
                                                self.__on_window_name_changed)
        self.geometry_changed_event = self.wnck.connect("geometry-changed",
                                                self.__on_geometry_changed)

        self.item = WindowItem(self, group)
        self.needs_attention = self.wnck.needs_attention()
        self.item.show()
        self.__on_show_only_current_monitor_changed()
def main():
  connection = common.connect()
  region = common.prompt_region(connection)
  connection = common.connect(region)
  zone = common.prompt_zone(connection)
  security_group = common.prompt_security_group(connection)
  prefix = "{}-{}-".format(security_group, zone.split("-")[-1])
  name = _prompt_name(connection, prefix)
  instance_type = _prompt_instance_type()
  key_path = common.prompt_key_path()
  key_name = os.path.basename(key_path).split(".")[0]

  arguments = _LaunchArguments(instance_type=instance_type,
                               key_name=key_name,
                               name=name,
                               security_group=security_group,
                               zone=zone)

  env.host_string = _launch(connection, arguments, region)
  env.key_filename = key_path
  env.user = _USERNAME
  common.wait_until_remote_reachable()
  sudo("hostname {}".format(name))
  _update_system_files(name)
  _install()
  _update_installed_files()
  reboot()

  if instance_type.ephemeral_disk_count > 1:
    _create_ephemeral_raid(instance_type.ephemeral_disk_count)
  
  if _GIT_REPO:
    _clone()
def main():
  connection = common.connect()

  prefix = raw_input(
    "Do you want the section of {} to have a prefix? (default '')".format(
      _HOSTS_PATH))
  if prefix:
    prefix = prefix + " "
  region = common.prompt_region(connection)
  type_ = common.prompt_choice("Type", ["private", "public"], "public")

  begin_marker = "# {}{} begin".format(prefix, region.name)
  end_marker = "# {}{} end".format(prefix, region.name)

  with open(_HOSTS_PATH) as hosts_file:
    content = hosts_file.read()

  content = re.sub("\s*{}.*{}".format(begin_marker, end_marker), "", content,
                   flags=re.DOTALL)

  connection = common.connect(region)
  mapping = sorted(_get_mapping(connection, type_ == "public"))
  content += _generate_mapping_content(begin_marker, end_marker, mapping)

  with tempfile.NamedTemporaryFile(delete=False) as temporary_file:
    temporary_file.write(content)

  subprocess.check_call([
    "sudo",
    "cp",
    temporary_file.name,
    _HOSTS_PATH
  ])
示例#4
0
    def __init__(self, group, class_group=None,
                 desktop_entry=None, identifier=None):
        self.dockbar_r = weakref.ref(group.dockbar_r())
        self.theme = Theme()
        self.globals = Globals()
        connect(self.globals, "color-changed", self.reset_surfaces)
        self.desktop_entry = desktop_entry
        self.identifier = identifier
        self.class_group = class_group

        # Setting size to something other than zero to
        # avoid crashes if surface_update() is runned
        # before the size is set.
        self.size = 15

        self.icon = None
        self.surfaces = {}

        self.average_color = None

        self.max_win_nr = self.theme.get_windows_cnt()
        self.types_in_theme = 0
        for type in self.theme.get_types():
            if not type in self.TYPE_DICT:
                continue
            self.types_in_theme = self.types_in_theme | self.TYPE_DICT[type]
示例#5
0
def parse_arguments(arguments):
    """Parse command-line arguments."""
    option_parser = OptionParser()
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#6
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, arguments)."""
    option_parser = OptionParser()
    option_parser.add_option("-y", "--year", default=date.today().year)
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#7
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, runners).
    May raise a ConnectionError"""
    option_parser = OptionParser("%prog RUNNER_ID [RUNNER_IDS...]")
    option_parser.add_option("-t", "--sort-times", action="store_true", default=False)
    options, runner_ids = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#8
0
def parse_arguments(arguments):
    """Parse the command-line arguments.  Returns the tuple (options,
    arguments)."""
    option_parser = OptionParser()
    option_parser.add_option("--show-venues", action="store_true",
                             default=False, help="Show the venue where each "
                             "race was held.")
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#9
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options,
    arguments)."""
    option_parser = OptionParser()
    option_parser.add_option("-g", "--gender", action="callback",
                             callback=gender_callback, type="str")
    option_parser.add_option("--school")
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#10
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, arguments)."""
    option_parser = OptionParser()
    option_parser.add_option("-p", "--pretty", action="store_true",
                             default=False, help="Print database ID and "
                             "official name.  If not set, the program prints "
                             "out a list of all nicknames as well.")
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#11
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, race_id)."""
    option_parser = OptionParser()
    option_parser.set_usage("%prog [options] RACE_ID")
    option_parser.add_option("-y", "--year", default=date.today().year,
                             help="Year in which the race occurred.",
                             type="int")
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
        race = Races.get(int(arguments[0]))
    except ConnectionError, error:
        option_parser.error(error)
示例#12
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, (school,)).
    May raise a ConnectionError."""
    option_parser = OptionParser("%prog SCHOOL")
    option_parser.add_option("-g", "--sort-by-gender", action="store_true",
                             default=False)
    option_parser.add_option("-y", "--year", default=date.today().year,
                             type="int")
    options, arguments = option_parser.parse_args(arguments[1:])
    try:
        connect(options.server)
    except ConnectionError, error:
        option_parser.error(error)
示例#13
0
    def __init__(self, window, group):
        CairoButton.__init__(self)
        self.set_no_show_all(True)

        self.window_r = weakref.ref(window)
        self.group_r = weakref.ref(group)
        self.globals = Globals()

        self.opacify_sid = None
        self.deopacify_sid = None
        self.press_sid = None
        self.pressed = False

        self.close_button = CairoCloseButton()
        self.close_button.set_no_show_all(True)
        if self.globals.settings["show_close_button"]:
            self.close_button.show()
        self.label = gtk.Label()
        self.label.set_ellipsize(pango.ELLIPSIZE_END)
        self.label.set_alignment(0, 0.5)
        self.__update_label()
        self.area.set_needs_attention(window.wnck.needs_attention())
        hbox = gtk.HBox()
        icon = window.wnck.get_mini_icon()
        self.icon_image = gtk.image_new_from_pixbuf(icon)
        hbox.pack_start(self.icon_image, False)
        hbox.pack_start(self.label, True, True, padding = 4)
        alignment = gtk.Alignment(1, 0.5, 0, 0)
        alignment.add(self.close_button)
        hbox.pack_start(alignment, False, False)

        vbox = gtk.VBox()
        vbox.pack_start(hbox, False)
        self.preview_box = gtk.Alignment(0.5, 0.5, 0, 0)
        self.preview_box.set_padding(4, 2, 0, 0)
        self.preview = gtk.Image()
        self.preview_box.add(self.preview)
        self.preview.show()
        vbox.pack_start(self.preview_box, True, True)
        self.add(vbox)
        self.preview_box.set_no_show_all(True)
        vbox.show_all()
        
        self.show_all()
        self.update_show_state()

        self.drag_dest_set(0, [], 0)
        self.drag_entered = False

        self.close_button.connect("button-press-event", self.disable_click)
        self.close_button.connect("clicked", self.__on_close_button_clicked)
        self.close_button.connect("leave-notify-event",
                                  self.__on_close_button_leave)
        connect(self.globals, "show-close-button-changed",
                              self.__on_show_close_button_changed)
        connect(self.globals, "color-changed", self.__update_label)
        connect(self.globals, "preview-size-changed", self.update_preview)
        connect(self.globals, "window-title-width-changed",
                              self.__update_label)
示例#14
0
def walldistdbread(logger, connfile, table, begin, end, crits, users, hosts,
                   title, plan, norm):
    '''
    Connect to DB, run query and store x values into a list which is returned.

    Specific to getting data for plotting a histogram of the waiting
    distribution, i.e. dbread() isn't really suitable for doing it (not
    least because we don't have time on the x axis, we have walltimes).
    '''

    # Connect
    c = common.connect(logger, os.path.expanduser(connfile))
    cursor = c.cursor()

    # Queues or groups
    critcond, crits = mkcritcond(crits)

    # Submit hosts
    hostcond, hosts = mkcond(hosts, 'fromHost')

    # Users
    usercond, users = mkcond(users, 'userName')

    # Normalisation factor
    if norm == None:
        factor = ''
    else:
        factor = ' * hostFactor * %s' % norm

    # Query
    # Difference is already in days by virtue of Oracle
    sel = "SELECT (eventTime - startTime) * 24 %s" % factor
    tab = "FROM %s" % table

    # Time condition is compulsory and there are default values anyway
    timecond = "WHERE eventTime BETWEEN :begin AND :end"
    span = [datetime.date.fromtimestamp(begin),
            datetime.date.fromtimestamp(end)]

    # Statement
    stmt = '%s %s %s AND %s AND %s %s' % \
        (sel, tab, timecond, STTCOND, CPUCOND, critcond + hostcond + usercond)
    params = span + [EPOCH] + crits + hosts + users

    print "Querying..."
    t = time.time()
    if plan:
        mkplan(cursor, stmt)

    cursor.execute(stmt, params)

    # Run query and store values
    f = open(title.translate(TRANS) + '.data', 'w')
    xs = []
    for x, in cursor:
        print >>f, '%f' % x
        xs.append(x)
    print "Queried in %f s" % (time.time() - t)
    f.close()
    return xs
示例#15
0
def SenderClient(nick):
    sock = connect()
    sendCmd(sock, "LOGIN %s\r\n" % nick)
    sendCmd(sock, "JOIN #foo\r\n")
    sendCmd(sock, "MSG #foo Hello!\r\n")
    sendCmd(sock, "PART #foo\r\n")
    sendCmd(sock, "LOGOUT\r\n")
    sock.close()
示例#16
0
def main():

  if com.is_db_exists() :
    print('이미 파일이 있음')
    return
  
  com.DB = com.connect()
  com.make_table() 
示例#17
0
文件: basic.py 项目: unshift/ita
def NormalOp(i):
    print "Go! %d"%i
    sock = connect()
    sendCmd(sock, "LOGIN foo%d\r\n" % i)
    sendCmd(sock, "JOIN #foo\r\n")
    sendCmd(sock, "MSG #foo hello\r\n")
    sendCmd(sock, "MSG foo%d sup\r\n" % i)
    sendCmd(sock, "PART #foo\r\n")
    sendCmd(sock, "LOGOUT\r\n")
    disconnect(sock)
示例#18
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, (given_name,
    surname, school, gender)).  May raise a ConnectionError."""
    option_parser = OptionParser()
    option_parser.set_usage("%prog [options] GIVEN_NAME SURNAME SCHOOL GENDER")
    option_parser.add_option("--competition-year", default=date.today().year,
                             type="int")
    option_parser.add_option("-n", "--nicknames")
    option_parser.add_option("-y", "--year", help="Graduation year", type="int")
    options, arguments = option_parser.parse_args(arguments[1:])
    index = count(0)
    try:
        connect(options.server)
        given_name = arguments[index.next()]
        surname = arguments[index.next()]
        school_id = Schools.get(arguments[index.next()])
        gender = parse_gender(arguments[index.next()])
    except ConnectionError, error:
        option_parser.error(error)
示例#19
0
def parse_arguments(arguments):
    "Parse command line arguments.  Returns the tuple (options, (gender,))."
    option_parser = race_display_options(GenderedOptionParser())
    option_parser.add_option("-c", "--conference", help="Include schools from "
                             "only the specified conference.")
    option_parser.add_option("-d", "--dist-limit", help="Minimum race "
                             "distance.", type="int")
    option_parser.add_option("-f", "--filter", help="Include or exclude the "
                             "specified schools.")
    option_parser.add_option("-p", "--previous-years", default=0, type="int")
    option_parser.add_option("-y", "--year", default=date.today().year,
                             type="int")
    options, arguments = option_parser.parse_args(arguments[1:])
    index = count()
    try:
        connect(options.server)
        gender = arguments[next(index)]
    except IndexError:
        option_parser.error("")
    except ConnectionError, error:
        option_parser.error(error)
示例#20
0
def parse_arguments(arguments):
    """Parse command line arguments.  Returns the tuple (options, (name,
    date, venue, city, state))."""
    option_parser = OptionParser()
    option_parser.set_usage("%prog [options] RACE_NAME DATE VENUE")
    option_parser.add_option("-c", "--comments", help="Comments on the race, "
                             "such as conditions, intensity of competition, "
                             "etc.")
    option_parser.add_option("-e", "--elevation", type="int")
    option_parser.add_option("-m", "--mens-distance", help="The length of "
                             "the men's race.", type="int")
    option_parser.add_option("-w", "--womens-distance", help="The length of "
                             "the women's race.", type="int")
    options, arguments = option_parser.parse_args(arguments[1:])
    index = count()
    try:
        connect(options.server)
        race_name = arguments[next(index)]
        date = Date.from_string(arguments[next(index)])
        venue = arguments[next(index)]
        venue = Venues.get(venue)
    except ConnectionError, error:
        option_parser.error(error)
示例#21
0
def main():
    socket = connect()
    if not common.__dict__.has_key(sys.argv[1]):
        raise Exception("Invalid request type")
    req_type = common.__dict__[sys.argv[1]]
    args = []
    for pair in sys.argv[2:]:
        pair_parts = pair.split(':', 1)
        if len(pair_parts) == 1 or pair_parts[0] == 's':
            args.append(pair_parts[-1])
        elif pair_parts[0] == 'i':
            args.append(int(pair_parts[1]))
    send(socket, req_type, *args)
    resp = recv_all(socket)
    print resp
    print "PAYLOAD SIZE: %d" % (len(resp['payload']),)
示例#22
0
def connect_facebook(keyword):
    photos = []
    is_end_of_page = False
    page = 1
    while is_end_of_page == False:
        url = generate_url(keyword, str(i), field)
        json = connect(url)
        for p in json['photos']['photo']:
            photos.append(p)
        pages = json['photos']['pages']
        if pages > page and page < DEFAULT_MAX_PAGE_NUM:
            page = page + 1
        else:
            is_end_of_page = True
    print 'total size : ' + str(len(photos))
    return photos
示例#23
0
def main():
    '''
    Set up pylsf to read event records and set up pyinotify to notice them
    as they come. Nothing expected, nothing returned.
    '''

    # Read arguments
    p = optparse.OptionParser()
    help = "user/passwd@dsn-formatted database connection file absolute path"
    p.add_option("-c", "--connfile", help=help)
    help = "LSF accounting log file absolute path"
    p.add_option("-a", "--acctfile", help=help)
    help = "PID file absolute path (defaults to %s)" % PIDFILE
    p.add_option("-p", "--pidfile", help=help, default=PIDFILE)
    help = "log file absolute path (defaults to %s)" % LOGFILE
    p.add_option("-l", "--logfile", help=help, default=LOGFILE)
    help = "how many minutes between log heart beats (defaults to %d)" % \
           common.HBDELTA
    p.add_option("-b", "--heartbeatdelta", type='int',
                 help=help, default=common.HBDELTA)
    help = "Don't touch the DB"
    p.add_option("-d", "--dryrun", action='store_true', help=help)
    options, args = p.parse_args()

    # Set up logging
    #fmt = '%(asctime)s %(levelname)s %(message)s'
    #logging.basicConfig(level=logging.INFO, format=fmt,
    #                    filename='/tmp/acct.log')

    #h = logging.handlers.SysLogHandler(address='/dev/log')
    h = logging.FileHandler(options.logfile)
    #fmt = "%(name)s: %(levelname)s %(message)s"
    fmt = "%(asctime)s %(name)s: %(levelname)s %(message)s"
    h.setFormatter(logging.Formatter(fmt, common.LOGDATEFMT))
    logger = logging.getLogger(common.LOGGER)
    logger.addHandler(h)
    logger.setLevel(logging.INFO)

    # Try to connect before daemonising to exit with a useful code
    try:
        if not options.dryrun:
            logger.info("Trying DB connection...")
            connection = common.connect(logger, options.connfile)
            connection.close()
    except common.AcctDBError, e:
        logger.error(e)
        return 1
示例#24
0
def main():
  ec2_connection = common.connect()
  region = common.prompt_region(ec2_connection)
  elb_connection = common.connect_elb_region(region.name)
  elb = common.prompt_elb(elb_connection, _DEFAULT_ELB_NAME)

  if not elb:
    print("No ELBs exist for region {}".format(region.name))
    return

  choice = common.prompt_choice("Choice", _ELB_ACTIONS, _DEFAULT_ELB_ACTION)
  while (choice != "Exit"):
    elb_info = _get_elb_info(ec2_connection, elb)
    _handle_user_choice(choice, elb, elb_info.zones,
                        elb_info.registered_instances,
                        elb_info.unregistered_instances)
    choice = common.prompt_choice("Choice", _ELB_ACTIONS, _DEFAULT_ELB_ACTION)
  return
示例#25
0
def ListenerClient(nick):
    global status
    
    sock = connect()
    sendCmd(sock, "LOGIN %s\r\n" % nick)
    sock.recv(1024)
    sendCmd(sock, "JOIN #foo\r\n")
    data = ""
    while 1:
        data = sock.recv(1024)
        if not data: break
        if data == "OK\r\n":
            continue
        if "GOTROOMMSG" in data:
            break
        
    print "[%s] %s" % (nick, data.strip())
    sendCmd(sock, "PART #foo\r\n")
    sendCmd(sock, "LOGOUT\r\n")
    sock.close()
示例#26
0
def main():
    # Read arguments
    p = optparse.OptionParser()
    help = "user/passwd@dsn-formatted database connection file path"
    p.add_option("-c", "--connfile", help=help)
    help = "table name"
    p.add_option("-t", "--table", help=help)
    help="don't do anything, only SQL-print what would be done"
    p.add_option("-d", "--dryrun", action='store_true', help=help)
    help = "log file absolute path (defaults to %s)" % LOGFILE
    p.add_option("-l", "--logfile", help=help, default=LOGFILE)
    help = "how many months to plan ahead (defaults to %d)" % AHEAD
    p.add_option("-p", "--plan", help=help, type='int', default=AHEAD)
    options, args = p.parse_args()

    if None in (options.connfile, options.table):
        p.print_help()
        return 1

    # Set up logging
    h = logging.FileHandler(options.logfile)
    fmt = "%(asctime)s %(name)s: %(levelname)s %(message)s"
    h.setFormatter(logging.Formatter(fmt, common.LOGDATEFMT))
    logger = logging.getLogger(common.LOGGER)
    logger.addHandler(h)
    logger.setLevel(logging.INFO)

    # DB
    connection = common.connect(logger, options.connfile)
    cursor = connection.cursor()

    # Create partitions
    last = lastpartition(cursor, options.table)
    stmts, months = createpartitions(options.table.upper(), last, options.plan)
    for stmt, month in izip(stmts, months):
        logger.info("Adding partition for %s to %s" % (month, options.table))
        logger.info(stmt)
        if not options.dryrun:
            cursor.execute(stmt)
    logger.info("Done")
示例#27
0
def main():
    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    common.connect()

    tags = os.environ.get('RD_CONFIG_TAGS')
    mappingList = os.environ.get('RD_CONFIG_MAPPING')
    defaults = os.environ.get('RD_CONFIG_ATTRIBUTES')

    running = False
    if os.environ.get('RD_CONFIG_RUNNING') == 'true':
        running = True

    boEmoticon = False
    if os.environ.get('RD_CONFIG_EMOTICON') == 'true':
        boEmoticon = True

    field_selector = None
    if os.environ.get('RD_CONFIG_FIELD_SELECTOR'):
        field_selector = os.environ.get('RD_CONFIG_FIELD_SELECTOR')

    label_selector = None

    if os.environ.get('RD_CONFIG_LABEL_SELECTOR'):
        label_selector = os.environ.get('RD_CONFIG_LABEL_SELECTOR')

    node_set = []
    v1 = client.CoreV1Api()

    log.debug(label_selector)
    log.debug(field_selector)

    if field_selector and label_selector:
        ret = v1.list_pod_for_all_namespaces(
            watch=False,
            field_selector=field_selector,
            label_selector=label_selector,
        )

    if field_selector and label_selector is None:
        ret = v1.list_pod_for_all_namespaces(
            watch=False,
            field_selector=field_selector,
        )

    if label_selector and field_selector is None:
        ret = v1.list_pod_for_all_namespaces(
            watch=False,
            label_selector=label_selector,
        )

    if label_selector is None and field_selector is None:
        ret = v1.list_pod_for_all_namespaces(watch=False, )

    for i in ret.items:
        for container in i.spec.containers:
            log.debug("%s\t%s\t%s\t%s" %
                      (i.status.pod_ip, i.metadata.namespace, i.metadata.name,
                       container.name))

            node_data = nodeCollectData(i, container, defaults, tags,
                                        mappingList, boEmoticon)

            if running is False:
                if (node_data["terminated"] is False):
                    node_set.append(node_data)

            if running is True:
                if node_data["status"].lower() == "running":
                    node_set.append(node_data)

    print(json.dumps(node_set, indent=4, sort_keys=True))
示例#28
0
def connect_flickr(keyword, field):
    url = generate_url(keyword, str(1), field)
    return connect(url)
示例#29
0
  def run(self):
    print "Starting " + self.name

    common.vectorClock[:1] = [[str(common.port), str(0)]]
    common.findNeighbors()
    common.connect()
    common.updateList()

    count = 0
    while self.run_client:
      #if count % 2:
      common.askingToWrite()
      #else:
        #common.askingToRead()

      if common.read == False and common.write == False:
        continue

      #print "Esperando..."
      while True:
        if common.waiting() == True:
          print "\nAcessou a memoria compartilhada!"
          localtime = time.localtime()
          print time.strftime("%Y/%m/%d %H:%M:%S", localtime)
          print common.vectorClock
          break

      common.toDoSomething()

      count = count + 1




#daqui pra baixo eh tratamento de quebra de hashes
      #for s in server.connected_servers:
        #try:
          #import pdb; pdb.set_trace()
        #  if s.ping():
        #   p = s.getPort()
        #    if p not in self.broken_hashes_server and p != self.port:
        #      vector = s.getHashes()
        #      print "\nQuebrando hashes..."
        #      v = self.breakHashes(vector)
        #      if v is not None:
        #        h = []
        #        for i in v:
        #          h.append(md5(i).hexdigest())

        #        if s.sendPasswords(v, h, server.vectorClock) == 1:
        #          self.broken_hashes_server.append(s.getPort())

        #          print "...Hashes quebradas!"
        #          print "Lista de pares cujas hashes jah foram quebradas:"
        #          print self.broken_hashes_server

        #          print "Lista de senhas:"
        #          print v

        #          print "Lista de hashes:"
        #          print h


        #        else:
        #         continue

        #except:
        #  continue

    print "Exiting" + self.name
示例#30
0
    score = 0.0
    matched = 0
    for n in nearest:
        items = n[2]
        if items.has_key(item):
            matched += 1
            score += float(items[item])
    # average
    if matched > 0:
        score = score / matched
    return score


if __name__ == '__main__':
    
    common.connect()
    
    client = common.client
    login = common.login
    
    #loadMLFile(sys.argv[1])
    #loadMoviesFile(sys.argv[1])
    
    # find 10 nearest neighbors (similar viewers)
    #nearest = kNearestNeighbors('ml', 'user_1', 10)

    # predict rating for a given movie
    #predictedScore = recommendedScore(nearest, '')

    
示例#31
0
文件: malicious.py 项目: unshift/ita
#!/usr/bin/python

import unittest
import socket
import sys

from common import connect, disconnect, sendCmd

print "infinite stream..."
try:
    pass
    sock = connect()
    while 1:
        sock.send("x")
    sock.close()
except:
    print "got exception!"
    print sys.exc_info()
    

print "login w/2KB username..."
try:
    sock = connect()
    cmd = "LOGIN "
    for i in range(1, 2048):
        cmd += "x"
    cmd += "\r\n"
    sendCmd(sock, cmd)
    data = sock.recv(1024)
    print "data = %s" % data   
    sendCmd(sock, "JOIN #foo")
示例#32
0
def main():
    # parse options
    (options, args) = parse_options()

    if os.isatty(sys.stdin.fileno()):
        raise RuntimeError('Need configuration in stdin.')
    config = common.read_config(sys.stdin)
    conn = common.connect(config.s3)
    bucket = None

    try:
        # setup
        real_stdout = sys.stdout
        sys.stdout = sys.stderr

        # verify all required config items are present
        if 'roundtrip' not in config:
            raise RuntimeError('roundtrip section not found in config')
        for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
            if item not in config.roundtrip:
                raise RuntimeError(
                    "Missing roundtrip config item: {item}".format(item=item))
        for item in ['num', 'size', 'stddev']:
            if item not in config.roundtrip.files:
                raise RuntimeError(
                    "Missing roundtrip config item: files.{item}".format(
                        item=item))

        seeds = dict(config.roundtrip.get('random_seed', {}))
        seeds.setdefault('main', random.randrange(2**32))

        rand = random.Random(seeds['main'])

        for name in ['names', 'contents', 'writer', 'reader']:
            seeds.setdefault(name, rand.randrange(2**32))

        print 'Using random seeds: {seeds}'.format(seeds=seeds)

        # setup bucket and other objects
        bucket_name = common.choose_bucket_prefix(config.roundtrip.bucket,
                                                  max_len=30)
        bucket = conn.create_bucket(bucket_name)
        print "Created bucket: {name}".format(name=bucket.name)
        objnames = realistic.names(
            mean=15,
            stddev=4,
            seed=seeds['names'],
        )
        objnames = itertools.islice(objnames, config.roundtrip.files.num)
        objnames = list(objnames)
        files = realistic.files(
            mean=1024 * config.roundtrip.files.size,
            stddev=1024 * config.roundtrip.files.stddev,
            seed=seeds['contents'],
        )
        q = gevent.queue.Queue()

        logger_g = gevent.spawn_link_exception(yaml.safe_dump_all,
                                               q,
                                               stream=real_stdout)

        print "Writing {num} objects with {w} workers...".format(
            num=config.roundtrip.files.num,
            w=config.roundtrip.writers,
        )
        pool = gevent.pool.Pool(size=config.roundtrip.writers)
        start = time.time()
        for objname in objnames:
            fp = next(files)
            pool.spawn_link_exception(
                writer,
                bucket=bucket,
                objname=objname,
                fp=fp,
                queue=q,
            )
        pool.join()
        stop = time.time()
        elapsed = stop - start
        q.put(
            dict(
                type='write_done',
                duration=int(round(elapsed * NANOSECOND)),
            ))

        print "Reading {num} objects with {w} workers...".format(
            num=config.roundtrip.files.num,
            w=config.roundtrip.readers,
        )
        # avoid accessing them in the same order as the writing
        rand.shuffle(objnames)
        pool = gevent.pool.Pool(size=config.roundtrip.readers)
        start = time.time()
        for objname in objnames:
            pool.spawn_link_exception(
                reader,
                bucket=bucket,
                objname=objname,
                queue=q,
            )
        pool.join()
        stop = time.time()
        elapsed = stop - start
        q.put(
            dict(
                type='read_done',
                duration=int(round(elapsed * NANOSECOND)),
            ))

        q.put(StopIteration)
        logger_g.get()

    finally:
        # cleanup
        if options.cleanup:
            if bucket is not None:
                common.nuke_bucket(bucket)
示例#33
0
def main():

    common.connect()

    api = core_v1_api.CoreV1Api()

    namespace = os.environ.get('RD_CONFIG_NAMESPACE')
    name = os.environ.get('RD_CONFIG_NAME')
    container = os.environ.get('RD_CONFIG_CONTAINER_NAME')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s", name)
    log.debug("Namespace: %s", namespace)
    log.debug("Container: %s", container)
    log.debug("--------------------------")

    data = {}

    data["api_version"] = os.environ.get('RD_CONFIG_API_VERSION')
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["container_name"] = os.environ.get('RD_CONFIG_CONTAINER_NAME')
    data["image"] = os.environ.get('RD_CONFIG_IMAGE')
    data["ports"] = os.environ.get('RD_CONFIG_PORTS')
    data["replicas"] = os.environ.get('RD_CONFIG_REPLICAS')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')
    data["labels"] = os.environ.get('RD_CONFIG_LABELS')
    if os.environ.get('RD_CONFIG_ENVIRONMENTS'):
        data["environments"] = os.environ.get('RD_CONFIG_ENVIRONMENTS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS'):
        evs = os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS')
        data["environments_secrets"] = evs

    if os.environ.get('RD_CONFIG_LIVENESS_PROBE'):
        data["liveness_probe"] = os.environ.get('RD_CONFIG_LIVENESS_PROBE')

    if os.environ.get('RD_CONFIG_READINESS_PROBE'):
        data["readiness_probe"] = os.environ.get('RD_CONFIG_READINESS_PROBE')

    if os.environ.get('RD_CONFIG_VOLUME_MOUNTS'):
        data["volume_mounts"] = os.environ.get('RD_CONFIG_VOLUME_MOUNTS')

    if os.environ.get('RD_CONFIG_VOLUMES'):
        data["volumes"] = os.environ.get('RD_CONFIG_VOLUMES')

    if os.environ.get('RD_CONFIG_CONTAINER_COMMAND'):
        cc = os.environ.get('RD_CONFIG_CONTAINER_COMMAND')
        data["container_command"] = cc

    if os.environ.get('RD_CONFIG_CONTAINER_ARGS'):
        data["container_args"] = os.environ.get('RD_CONFIG_CONTAINER_ARGS')

    if os.environ.get('RD_CONFIG_RESOURCES_REQUESTS'):
        rr = os.environ.get('RD_CONFIG_RESOURCES_REQUESTS')
        data["resources_requests"] = rr

    if os.environ.get('RD_CONFIG_WAITREADY'):
        data["waitready"] = os.environ.get('RD_CONFIG_WAITREADY')

    if os.environ.get('RD_CONFIG_IMAGEPULLSECRETS'):
        data["image_pull_secrets"] = os.environ.get('RD_CONFIG_IMAGEPULLSECRETS')

    pod = create_pod(data)
    resp = None
    try:
        resp = api.create_namespaced_pod(namespace=namespace,
                                         body=pod,
                                         pretty="True")

        print("Pod Created successfully")

    except ApiException:
        log.exception("Exception creating pod:")
        exit(1)

    if not resp:
        print("Pod %s does not exist" % name)
        exit(1)
示例#34
0
    def __init__(self, astergui, parent=None):
        """
        Create view.

        Arguments:
            astergui (AsterGui): *AsterGui* instance.
            parent (Optional[QWidget]): Parent widget. Defaults to
                *None*.
        """
        super(DataFiles, self).__init__(parent)
        self.astergui = astergui
        self.setObjectName("DataFilesBase")
        self.ops = {}

        # Files tree
        self.view = FilesView(self)
        self.view.setObjectName('DataFilesView')
        connect(self.view.clicked, self.updateButtonsState)
        connect(self.view.doubleClicked, self._doubleClicked)

        # Toolbar
        self.toolbar = Q.QToolBar(self)
        self.toolbar.setToolButtonStyle(Q.Qt.ToolButtonIconOnly)
        self.toolbar.setObjectName("DataFilesToolbar")

        # - add file
        action = Q.QAction(translate("AsterStudy", "&Add File"), self)
        action.setToolTip(translate("AsterStudy", "Add file"))
        action.setStatusTip(
            translate("AsterStudy", "Add a data file to the stage"))
        action.setIcon(load_icon("as_pic_new_file.png"))
        connect(action.triggered, self.add)
        self.ops[DataFiles.Add] = action

        # - edit file
        action = Q.QAction(translate("AsterStudy", "&Edit File"), self)
        action.setToolTip(translate("AsterStudy", "Edit file"))
        action.setStatusTip(
            translate("AsterStudy", "Edit properties of selected data file"))
        action.setIcon(load_icon("as_pic_edit_file.png"))
        connect(action.triggered, self.edit)
        self.ops[DataFiles.Edit] = action

        # - view file
        action = Q.QAction(translate("AsterStudy", "&View File"), self)
        action.setToolTip(translate("AsterStudy", "View file"))
        action.setStatusTip(
            translate("AsterStudy", "View properties of selected data file"))
        action.setIcon(load_icon("as_pic_view_file.png"))
        connect(action.triggered, self.edit)
        self.ops[DataFiles.View] = action

        # - remove file
        action = Q.QAction(translate("AsterStudy", "&Remove File"), self)
        action.setToolTip(translate("AsterStudy", "Remove file"))
        action.setStatusTip(
            translate("AsterStudy", "Remove selected data file "
                      "from the stage"))
        action.setIcon(load_icon("as_pic_remove_file.png"))
        connect(action.triggered, self.remove)
        self.ops[DataFiles.Remove] = action

        # - go to
        action = Q.QAction(translate("AsterStudy", "&Go To"), self)
        action.setToolTip(translate("AsterStudy", "Go to"))
        action.setStatusTip(
            translate("AsterStudy", "Go to the selected command"))
        action.setIcon(load_icon("as_pic_goto.png"))
        connect(action.triggered, self.goto)
        self.ops[DataFiles.GoTo] = action

        # - fill in toolbar
        self.toolbar.addAction(self.ops[DataFiles.Add])
        self.toolbar.addAction(self.ops[DataFiles.Edit])
        self.toolbar.addAction(self.ops[DataFiles.View])
        self.toolbar.addAction(self.ops[DataFiles.Remove])
        self.toolbar.addAction(self.ops[DataFiles.GoTo])

        # Layout widgets
        vbox_layout = Q.QVBoxLayout(self)
        vbox_layout.setContentsMargins(5, 5, 5, 5)
        vbox_layout.addWidget(self.view)
        vbox_layout.addWidget(self.toolbar)
示例#35
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["type"] = os.environ.get('RD_CONFIG_TYPE')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    common.connect()

    try:
        if data["type"] == "Deployment":
            k8s_beta = client.ExtensionsV1beta1Api()
            resp = k8s_beta.delete_namespaced_deployment(
                name=data["name"],
                namespace=data["namespace"],
                body=client.V1DeleteOptions(propagation_policy='Foreground',
                                            grace_period_seconds=5),
                pretty="true")

            print("Deployment deleted. status='%s'" % str(resp.status))

        if data["type"] == "Service":
            api_instance = client.CoreV1Api()
            resp = api_instance.delete_namespaced_service(
                namespace=data["namespace"],
                name=data["name"],
                body=client.V1DeleteOptions(propagation_policy='Foreground',
                                            grace_period_seconds=5),
                pretty="true")
            print("Service deleted. status='%s'" % str(resp.status))

        if data["type"] == "Ingress":
            k8s_beta = client.ExtensionsV1beta1Api()
            body = client.V1DeleteOptions()
            resp = k8s_beta.delete_namespaced_ingress(
                name=data["name"],
                namespace=data["namespace"],
                body=body,
                pretty="true")
            print("Ingress deleted. status='%s'" % str(resp.status))

        if data["type"] == "Job":
            api_instance = client.BatchV1Api()
            resp = api_instance.delete_namespaced_job(
                namespace=data["namespace"], name=data["name"], pretty="true")
            print("Job deleted. status='%s'" % str(resp.status))

        if data["type"] == "StorageClass":
            api_instance = client.StorageV1Api()

            resp = api_instance.delete_storage_class(
                name=data["name"],
                body=client.V1DeleteOptions(),
                pretty="true")
            print("Storage Class deleted. status='%s'" % str(resp))

        if data["type"] == "PersistentVolumeClaim":
            api_instance = client.CoreV1Api()

            resp = api_instance.delete_namespaced_persistent_volume_claim(
                namespace=data["namespace"],
                body=client.V1DeleteOptions(),
                name=data["name"],
                pretty="true")
            print("PVC  deleted. status='%s'" % str(resp.status))

        if data["type"] == "Secret":
            api_instance = client.CoreV1Api()

            resp = api_instance.delete_namespaced_secret(
                namespace=data["namespace"],
                name=data["name"],
                body=client.V1DeleteOptions(),
                pretty="true")
            print("Secret  deleted. status='%s'" % str(resp.status))

        if data["type"] == "PersistentVolume":
            api_instance = client.CoreV1Api()

            resp = api_instance.delete_persistent_volume(
                namespace=data["namespace"],
                name=data["name"],
                body=client.V1DeleteOptions(),
                pretty="true")
            print("PV deleted. status='%s'" % str(resp.status))

    except ApiException as e:
        log.error("Exception error creating: %s\n" % e)
        sys.exit(1)
示例#36
0
def main():
    parser = OptionParser(usage='%prog [options]')
    parser.add_option(
        '-n',
        '--dry-run',
        action='store_true',
        default=False,
        help='show what would be inserted without actually doing it')
    options, args = parser.parse_args()

    # temporary table to receive new runs
    db = connect()
    db.autocommit = False

    #cursor = db.cursor()
    #cursor.execute('SELECT run_id FROM run_suppress UNION SELECT run_id FROM run')
    #known = frozenset(row[0] for row in results(cursor))
    #print 'known:', len(known)
    #del cursor
    #return 13

    db.cursor().execute("""
      CREATE TEMPORARY TABLE upload_run (
          run_id VARCHAR(24) PRIMARY KEY NOT NULL,
          application_name VARCHAR(50) NOT NULL,
          application_version VARCHAR(50) NOT NULL,
          application_release VARCHAR(50) NOT NULL,
          build_distribution VARCHAR(50) NOT NULL,
          version VARCHAR(255) NOT NULL,
          sparsity INTEGER NOT NULL CHECK (sparsity > 0),
          exit_signal SMALLINT NOT NULL CHECK (exit_signal >= 0),
          exit_status SMALLINT NOT NULL CHECK (exit_status >= 0),
          CHECK (exit_status = 0 OR exit_signal = 0),
          date TIMESTAMP NOT NULL,
          server_name VARCHAR(50) NOT NULL
      )
      """)

    if args:
        subdirs = args
    else:
        subdirs = sys.stdin
        subdirs = imap(str.rstrip, subdirs)

    # upload new runs into temporary table
    subdirs = imap(dirname, subdirs)
    runs = uploadRuns(db, subdirs)

    # remove anything we already knew about
    print 'discard already-known runs'
    cursor = db.cursor()
    cursor.execute("""
      DELETE FROM upload_run
        USING run
        WHERE upload_run.run_id = run.run_id
    """)
    print '\t%d discarded' % cursor.rowcount
    del cursor

    # merge new runs into main table
    db.cursor().execute("""
      INSERT INTO run SELECT
          run_id,
          build_id,
          version,
          sparsity,
          exit_signal,
          exit_status,
          date,
          server_name
      FROM upload_run
      NATURAL LEFT JOIN build
    """)

    # finish up with database
    if options.dry_run:
        db.rollback()
    else:
        db.commit()

    db.close()

    # archive new runs
    for run in runs:
        archive = archives[run.server_name]
        year, month = run.date.split('-')[0:2]
        destdir = '%s/%s/%s' % (archive, year, month)
        destination = '%s/%s' % (destdir, run.run_id)
        print 'archive: %s -> %s' % (run.subdir, destdir)
        if not options.dry_run:
            if not exists(destdir):
                makedirs(destdir)
            rename(run.subdir, destination)
示例#37
0
    def __init__(self, node, astergui, parent=None):  # pragma pylint: disable=too-many-locals
        """
        Create editor panel.

        Arguments:
            node (Stage, Unit): Object to manage.
            astergui (AsterGui): *AsterGui* instance.
            parent (Optional[QWidget]): Parent widget. Defaults to
                *None*.
        """
        #----------------------------------------------------------------------
        super(UnitPanel, self).__init__(parent=parent,
                                        name="",
                                        astergui=astergui)

        #----------------------------------------------------------------------
        self.node = node
        self.prev_unit = None
        self.unit = None

        #----------------------------------------------------------------------
        # set title
        title = translate("UnitPanel", "Edit data file") \
            if self.mode == UnitPanel.EditMode \
            else translate("UnitPanel", "Add data file")
        self._controllername = title
        self.setWindowTitle(title)
        # set icon
        pixmap = load_pixmap("as_pic_edit_file.png") \
            if self.mode == UnitPanel.EditMode \
            else load_pixmap("as_pic_new_file.png")
        self.setPixmap(pixmap)

        #----------------------------------------------------------------------
        # top-level layout
        v_layout = Q.QVBoxLayout(self)
        v_layout.setContentsMargins(0, 0, 0, 0)

        #----------------------------------------------------------------------
        # top level widget to easily manage read-only mode
        self.frame = Q.QWidget(self)
        v_layout.addWidget(self.frame)

        #----------------------------------------------------------------------
        # main layout
        glayout = Q.QGridLayout(self.frame)
        glayout.setContentsMargins(0, 0, 0, 0)

        #----------------------------------------------------------------------
        # 'Mode' controls
        label = Q.QLabel(translate("DataFiles", "Mode"), self.frame)
        glayout.addWidget(label, 0, 0)

        self.attr_combo = Q.QComboBox(self.frame)
        self.attr_combo.setObjectName("Mode")
        attr_list = [FileAttr.In, FileAttr.Out, FileAttr.InOut]
        for attr in attr_list:
            self.attr_combo.addItem(FileAttr.value2str(attr), attr)
        self.attr_combo.setCurrentIndex(-1)
        glayout.addWidget(self.attr_combo, 0, 1)

        #----------------------------------------------------------------------
        label = Q.QLabel(translate("DataFiles", "Filename"), self.frame)
        glayout.addWidget(label, 1, 0)

        self.file_combo = Q.QComboBox(self.frame)
        self.file_combo.setObjectName("Filename")
        glayout.addWidget(self.file_combo, 1, 1, 1, 2)

        self.file_btn = Q.QToolButton(self.frame)
        self.file_btn.setText("...")
        self.file_btn.setObjectName("Filename")
        glayout.addWidget(self.file_btn, 1, 3)

        #----------------------------------------------------------------------
        # 'Unit' controls
        label = Q.QLabel(translate("DataFiles", "Unit"), self.frame)
        glayout.addWidget(label, 2, 0)

        self.unit_edit = Q.QLineEdit(self.frame)
        self.unit_edit.setObjectName("Unit")
        self.unit_edit.setValidator(Q.QIntValidator(2, 99, self.unit_edit))
        glayout.addWidget(self.unit_edit, 2, 1)

        #----------------------------------------------------------------------
        # 'Exists' controls
        label = Q.QLabel(translate("DataFiles", "Exists"), self.frame)
        glayout.addWidget(label, 3, 0)

        self.exists_check = Q.QCheckBox(self.frame)
        self.exists_check.setObjectName("Exists")
        self.exists_check.setEnabled(False)
        glayout.addWidget(self.exists_check, 3, 1)

        #----------------------------------------------------------------------
        # 'Embedded' controls
        label = Q.QLabel(translate("DataFiles", "Embedded"), self.frame)
        glayout.addWidget(label, 4, 0)

        self.embedded_check = Q.QCheckBox(self.frame)
        self.embedded_check.setObjectName("Embedded")
        glayout.addWidget(self.embedded_check, 4, 1)

        #----------------------------------------------------------------------
        # tune layout
        glayout.setColumnStretch(1, 2)
        glayout.setColumnStretch(2, 2)
        glayout.setRowStretch(glayout.rowCount(), 10)

        #----------------------------------------------------------------------
        # initialize unit model
        file_model = UnitModel(self.stage)
        self.file_combo.setModel(file_model)
        self.file_combo.setCurrentIndex(-1)

        #----------------------------------------------------------------------
        # initialize controls from data model object
        if self.mode == UnitPanel.EditMode:
            self.setEditData(node)

        #----------------------------------------------------------------------
        # connections
        connect(self.file_combo.currentIndexChanged, self.updateControls)
        connect(self.file_combo.currentIndexChanged, self.updateButtonStatus)
        connect(self.file_btn.clicked, self.browseFile)
        connect(self.unit_edit.textChanged, self.updateButtonStatus)
        connect(self.attr_combo.currentIndexChanged, self.updateButtonStatus)
        connect(self.embedded_check.toggled, self.embeddedChanged)
        connect(file_model.rowsAboutToBeInserted, self.beforeUpdate)
        connect(file_model.rowsInserted, self.afterUpdate)
        connect(file_model.rowsAboutToBeRemoved, self.beforeUpdate)
        connect(file_model.rowsRemoved, self.afterUpdate)

        #----------------------------------------------------------------------
        # update status
        self.updateControls()
示例#38
0
def main():
    # parse options
    (options, args) = parse_options()

    if os.isatty(sys.stdin.fileno()):
        raise RuntimeError('Need configuration in stdin.')
    config = common.read_config(sys.stdin)
    conn = common.connect(config.s3)
    bucket = None

    try:
        # setup
        real_stdout = sys.stdout
        sys.stdout = sys.stderr

        # verify all required config items are present
        if 'readwrite' not in config:
            raise RuntimeError('readwrite section not found in config')
        for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
            if item not in config.readwrite:
                raise RuntimeError(
                    "Missing readwrite config item: {item}".format(item=item))
        for item in ['num', 'size', 'stddev']:
            if item not in config.readwrite.files:
                raise RuntimeError(
                    "Missing readwrite config item: files.{item}".format(
                        item=item))

        seeds = dict(config.readwrite.get('random_seed', {}))
        seeds.setdefault('main', random.randrange(2**32))

        rand = random.Random(seeds['main'])

        for name in ['names', 'contents', 'writer', 'reader']:
            seeds.setdefault(name, rand.randrange(2**32))

        print 'Using random seeds: {seeds}'.format(seeds=seeds)

        # setup bucket and other objects
        bucket_name = common.choose_bucket_prefix(config.readwrite.bucket,
                                                  max_len=30)
        bucket = conn.create_bucket(bucket_name)
        print "Created bucket: {name}".format(name=bucket.name)

        # check flag for deterministic file name creation
        if not config.readwrite.get('deterministic_file_names'):
            print 'Creating random file names'
            file_names = realistic.names(
                mean=15,
                stddev=4,
                seed=seeds['names'],
            )
            file_names = itertools.islice(file_names,
                                          config.readwrite.files.num)
            file_names = list(file_names)
        else:
            print 'Creating file names that are deterministic'
            file_names = []
            for x in xrange(config.readwrite.files.num):
                file_names.append('test_file_{num}'.format(num=x))

        files = realistic.files2(
            mean=1024 * config.readwrite.files.size,
            stddev=1024 * config.readwrite.files.stddev,
            seed=seeds['contents'],
        )
        q = gevent.queue.Queue()

        # warmup - get initial set of files uploaded if there are any writers specified
        if config.readwrite.writers > 0:
            print "Uploading initial set of {num} files".format(
                num=config.readwrite.files.num)
            warmup_pool = gevent.pool.Pool(size=100)
            for file_name in file_names:
                fp = next(files)
                warmup_pool.spawn_link_exception(
                    write_file,
                    bucket=bucket,
                    file_name=file_name,
                    fp=fp,
                )
            warmup_pool.join()

        # main work
        print "Starting main worker loop."
        print "Using file size: {size} +- {stddev}".format(
            size=config.readwrite.files.size,
            stddev=config.readwrite.files.stddev)
        print "Spawning {w} writers and {r} readers...".format(
            w=config.readwrite.writers, r=config.readwrite.readers)
        group = gevent.pool.Group()
        rand_writer = random.Random(seeds['writer'])

        # Don't create random files if deterministic_files_names is set and true
        if not config.readwrite.get('deterministic_file_names'):
            for x in xrange(config.readwrite.writers):
                this_rand = random.Random(rand_writer.randrange(2**32))
                group.spawn_link_exception(
                    writer,
                    bucket=bucket,
                    worker_id=x,
                    file_names=file_names,
                    files=files,
                    queue=q,
                    rand=this_rand,
                )

        # Since the loop generating readers already uses config.readwrite.readers
        # and the file names are already generated (randomly or deterministically),
        # this loop needs no additional qualifiers. If zero readers are specified,
        # it will behave as expected (no data is read)
        rand_reader = random.Random(seeds['reader'])
        for x in xrange(config.readwrite.readers):
            this_rand = random.Random(rand_reader.randrange(2**32))
            group.spawn_link_exception(
                reader,
                bucket=bucket,
                worker_id=x,
                file_names=file_names,
                queue=q,
                rand=this_rand,
            )

        def stop():
            group.kill(block=True)
            q.put(StopIteration)

        gevent.spawn_later(config.readwrite.duration, stop)

        # wait for all the tests to finish
        group.join()
        print 'post-join, queue size {size}'.format(size=q.qsize())

        if q.qsize() > 0:
            for temp_dict in q:
                if 'error' in temp_dict:
                    raise Exception('exception:\n\t{msg}\n\t{trace}'.format(
                        msg=temp_dict['error']['msg'],
                        trace=temp_dict['error']['traceback']))
                else:
                    yaml.safe_dump(temp_dict, stream=real_stdout)

    finally:
        # cleanup
        if options.cleanup:
            if bucket is not None:
                common.nuke_bucket(bucket)
示例#39
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}

    data["api_version"] = os.environ.get('RD_CONFIG_API_VERSION')
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["container_name"] = os.environ.get('RD_CONFIG_CONTAINER_NAME')
    data["image"] = os.environ.get('RD_CONFIG_IMAGE')
    data["ports"] = os.environ.get('RD_CONFIG_PORTS')
    data["replicas"] = os.environ.get('RD_CONFIG_REPLICAS')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')
    data["labels"] = os.environ.get('RD_CONFIG_LABELS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS'):
        data["environments"] = os.environ.get('RD_CONFIG_ENVIRONMENTS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS'):
        evs = os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS')
        data["environments_secrets"] = evs

    if os.environ.get('RD_CONFIG_LIVENESS_PROBE'):
        data["liveness_probe"] = os.environ.get('RD_CONFIG_LIVENESS_PROBE')

    if os.environ.get('RD_CONFIG_READINESS_PROBE'):
        data["readiness_probe"] = os.environ.get('RD_CONFIG_READINESS_PROBE')

    if os.environ.get('RD_CONFIG_VOLUME_MOUNTS'):
        data["volume_mounts"] = os.environ.get('RD_CONFIG_VOLUME_MOUNTS')

    if os.environ.get('RD_CONFIG_VOLUMES'):
        data["volumes"] = os.environ.get('RD_CONFIG_VOLUMES')

    if os.environ.get('RD_CONFIG_CONTAINER_COMMAND'):
        cc = os.environ.get('RD_CONFIG_CONTAINER_COMMAND')
        data["container_command"] = cc

    if os.environ.get('RD_CONFIG_CONTAINER_ARGS'):
        data["container_args"] = os.environ.get('RD_CONFIG_CONTAINER_ARGS')

    if os.environ.get('RD_CONFIG_RESOURCES_REQUESTS'):
        rr = os.environ.get('RD_CONFIG_RESOURCES_REQUESTS')
        data["resources_requests"] = rr

    if os.environ.get('RD_CONFIG_ANNOTATIONS'):
        data["annotations"] = os.environ.get('RD_CONFIG_ANNOTATIONS')

    if os.environ.get('RD_CONFIG_IMAGEPULLSECRETS'):
        data["image_pull_secrets"] = os.environ.get(
            'RD_CONFIG_IMAGEPULLSECRETS')

    log.debug("Creating job from data:")
    log.debug(data)

    common.connect()

    apiV1 = client.AppsV1Api()

    deployment = create_deployment_object(data)
    create_deployment(apiV1, deployment, data)
示例#40
0
# -*- coding: utf-8 -*-

from hbase.ttypes import TPut, TColumnValue, TGet, TDelete, TColumn
from common import connect

with connect() as client:
    table = 'tsdata'

    # put multiple
    tputs = [
        TPut('sys.cpu.user:20180421:192.168.1.1', [
            TColumnValue('cf', '1015', '0.28'),
            TColumnValue('cf', '1016', '0.35'),
            TColumnValue('cf', '1017', '0.25'),
        ]),
        TPut('sys.cpu.user:20180421:192.168.1.2', [
            TColumnValue('cf', '1015', '0.45'),
            TColumnValue('cf', '1016', '0.32'),
            TColumnValue('cf', '1017', '0.58'),
        ]),
    ]

    client.putMultiple(table, tputs)

    # get multiple
    tgets = [
        TGet('sys.cpu.user:20180421:192.168.1.1', [TColumn('cf', '1015')]),
        TGet('sys.cpu.user:20180421:192.168.1.2', [TColumn('cf', '1015')])
    ]
    for tresult in client.getMultiple(table, tgets):
        print(tresult)
示例#41
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}

    data["type"] = os.environ.get('RD_CONFIG_TYPE')
    data["yaml"] = os.environ.get('RD_CONFIG_YAML')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    common.connect()

    try:
        if data["type"] == "Deployment":
            dep = yaml.load(data["yaml"])
            k8s_beta = client.ExtensionsV1beta1Api()
            resp = k8s_beta.create_namespaced_deployment(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Service":
            api_instance = client.CoreV1Api()
            dep = yaml.load(data["yaml"])
            resp = api_instance.create_namespaced_service(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Ingress":
            dep = yaml.load(data["yaml"])
            k8s_beta = client.ExtensionsV1beta1Api()
            resp = k8s_beta.create_namespaced_ingress(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Job":
            api_instance = client.BatchV1Api()
            dep = yaml.load(data["yaml"])
            resp = api_instance.create_namespaced_job(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "StorageClass":
            dep = yaml.load(data["yaml"])
            api_instance = client.StorageV1Api()

            resp = api_instance.create_storage_class(body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "PersistentVolumeClaim":
            dep = yaml.load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_namespaced_persistent_volume_claim(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Secret":
            dep = yaml.load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_namespaced_secret(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "PersistentVolume":
            dep = yaml.load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_persistent_volume(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

    except ApiException as e:
        log.error("Exception error creating: %s\n" % e)
        sys.exit(1)
示例#42
0
def _main():

    # Get some time interval options.
    parser = argparse.ArgumentParser(description="Import time entries from Toggl to Shotgun")
    add_common_arguments(parser)
    parser.add_argument(
        "--start", "-s",
        action="store",
        required=False,
        default=None,
        help="First day to import data for in the YYYY-MM-DD format. Defaults to 5 days ago at midnight."
    )
    parser.add_argument(
        "--end", "-e",
        action="store",
        required=False,
        default=None,
        help="Last day to import data for in the YYYY-MM-DD format. Defaults to current time."
    )

    # Read the options from the command line.
    args = parser.parse_args()

    if args.start is not None:
        start = _user_str_to_utc_timezone(args.start)
    else:
        # Go back as far as 5 days ago to import data.
        today_at_midnight = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
        start = today_at_midnight - datetime.timedelta(days=7) + UTC_OFFSET

    if args.end is not None:
        end = _user_str_to_utc_timezone(args.end)
    else:
        # Otherwise now is pretty much the further something can be logged.
        end = datetime.datetime.utcnow()

    # Log into Shotgun and toggl.
    (sg, sg_self), (toggl, wid) = connect(args.headless)

    # Get Toggl project information
    toggl_projects = get_projects_from_toggl(toggl)
    # Create a map that goes from Toggl project id to a Shotgun ticket id.
    toggl_projects_to_sg = {project_id: ticket_id for ticket_id, (_, project_id) in toggl_projects}

    # Get the entries that the user requested.
    time_entries = toggl.TimeEntries.get(
        start_date=_to_toggl_date_format(start),
        end_date=_to_toggl_date_format(end)
    )

    previous_day = None
    # Group tasks by day, project and task name so we can compute and save a duration for a given task
    # on a given project on a give day.
    for (day, pid, task_name), time_entries in _sort_time_entries(_massage_time_entries(time_entries)):

        # Task names are optional. If any, set to a empty string.
        task_name = task_name or ""

        # if the project is not tracked in Shotgun, skip it!
        ticket_id = toggl_projects_to_sg.get(pid)
        if ticket_id is None:
            continue

        # If we're on a new day, print its header.
        if previous_day != day:
            print day
            previous_day = day

        # Sum all the durations, except the one in progress if it is present (durtion < 9())
        total_task_duration = int(sum((entry["duration"] for entry in time_entries if entry["duration"] >= 0)) / 60.0)

        # Show some progress.
        print "   Ticket %s, Task %s %s" % (
            ticket_id,
            task_name.ljust(40),
            _to_hours_minutes(total_task_duration)
        )

        ticket_link = {"type": "Ticket", "id": ticket_id}

        # Find if we have an entry for this time log.
        timelog_entity = sg.find_one(
            "TimeLog",
            [
                ["entity", "is", ticket_link],
                ["description", "is", task_name],
                ["date", "is", day]
            ]
        )

        # Create or update the entry in Shotgun.
        if timelog_entity:
            sg.update(
                "TimeLog",
                timelog_entity["id"],
                {"duration": total_task_duration}
            )
        else:
            sg.create("TimeLog", {
                "entity": ticket_link,
                "description": task_name,
                "duration": max(total_task_duration, 1),
                "project": {"type": "Project", "id": 12},
                "date": day
            })
示例#43
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}

    data["api_version"] = os.environ.get('RD_CONFIG_API_VERSION')
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["container_name"] = os.environ.get('RD_CONFIG_CONTAINER_NAME')
    data["image"] = os.environ.get('RD_CONFIG_IMAGE')
    if os.environ.get('RD_CONFIG_PORTS'):
        data["ports"] = os.environ.get('RD_CONFIG_PORTS')

    data["replicas"] = os.environ.get('RD_CONFIG_REPLICAS')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    if os.environ.get('RD_CONFIG_LABELS'):
        data["labels"] = os.environ.get('RD_CONFIG_LABELS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS'):
        data["environments"] = os.environ.get('RD_CONFIG_ENVIRONMENTS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS'):
        evs = os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS')
        data["environments_secrets"] = evs

    if os.environ.get('RD_CONFIG_LIVENESS_PROBE'):
        data["liveness_probe"] = os.environ.get('RD_CONFIG_LIVENESS_PROBE')

    if os.environ.get('RD_CONFIG_READINESS_PROBE'):
        data["readiness_probe"] = os.environ.get('RD_CONFIG_READINESS_PROBE')

    if os.environ.get('RD_CONFIG_CONTAINER_COMMAND'):
        cc = os.environ.get('RD_CONFIG_CONTAINER_COMMAND')
        data["container_command"] = cc

    if os.environ.get('RD_CONFIG_CONTAINER_ARGS'):
        data["container_args"] = os.environ.get('RD_CONFIG_CONTAINER_ARGS')

    if os.environ.get('RD_CONFIG_RESOURCES_REQUESTS'):
        rr = os.environ.get('RD_CONFIG_RESOURCES_REQUESTS')
        data["resources_requests"] = rr

    log.debug("Updating Deployment data:")
    log.debug(data)

    common.connect()

    try:
        extensions_v1beta1 = client.ExtensionsV1beta1Api()
        deployment = create_deployment_object(data)

        log.debug("deployment object: ")
        log.debug(deployment)

        update_deployment(extensions_v1beta1, deployment, data)
    except ApiException as e:
        log.error("Exception updating deployment: %s\n" % e)
        sys.exit(1)
示例#44
0
def wait():
    try:
        name = environ.get("RD_CONFIG_NAME")
        namespace = environ.get("RD_CONFIG_NAMESPACE")
        container = environ.get("RD_CONFIG_CONTAINER")
        retries = int(environ.get("RD_CONFIG_RETRIES"))
        sleep = float(environ.get("RD_CONFIG_SLEEP"))
        show_log = environ.get("RD_CONFIG_SHOW_LOG") == "true"

        # Poll for completion if retries
        retries_count = 0
        completed = False


        while True:

            common.connect()

            batch_v1 = client.BatchV1Api()
            core_v1 = client.CoreV1Api()

            api_response = batch_v1.read_namespaced_job_status(
                name,
                namespace,
                pretty="True"
            )
            log.debug(api_response)

            #for condition in api_response.status.conditions:
            #    log.info(condition.type)

            retries_count = retries_count + 1
            if retries_count > retries:
                log.error("Number of retries exceeded")
                completed = True

            if api_response.status.conditions:
                for condition in api_response.status.conditions:
                    if condition.type == "Failed":
                        completed = True


            if api_response.status.completion_time:
                completed = True

            if show_log:
                log.debug("Searching for pod associated with job")

                schedule_start_time = time.time()
                schedule_timeout = 600
                while True:
                    try:
                        pod_list = core_v1.list_namespaced_pod(
                            namespace,
                            label_selector="job-name==" + name
                        )
                        first_item = pod_list.items[0]
                        pod_name = first_item.metadata.name
                        break
                    except IndexError as IndexEx:
                        log.warning("Still Waiting for Pod to be Scheduled")
                        time.sleep(60)
                        if schedule_timeout and time.time() - schedule_start_time > schedule_timeout:  # pragma: no cover
                            raise TimeoutError

                log.info("Fetching logs from pod: {0}".format(pod_name))

                # time.sleep(15)
                log.info("========================== job log start ==========================")
                start_time = time.time()
                timeout = 300
                log.debug(container)
                while True:
                    try:
                        if container:
                            core_v1.read_namespaced_pod_log(name=pod_name,
                                                            namespace=namespace, container=container)
                        else:
                            core_v1.read_namespaced_pod_log(name=pod_name,
                                                            namespace=namespace)

                        break
                    except ApiException as ex:
                        log.warning("Pod is not ready, status: {}".format(ex.status))
                        if ex.status == 200:
                            break
                        else:
                            log.info("waiting for log")
                            time.sleep(15)
                            if timeout and time.time() - start_time > timeout:  # pragma: no cover
                                raise TimeoutError

                w = watch.Watch()
                for line in w.stream(core_v1.read_namespaced_pod_log,
                                        name=pod_name,
                                        container=container,
                                        namespace=namespace):
                    print(line)

                log.info("=========================== job log end ===========================")

            if completed:
                break

            log.info("Waiting for job completion")
            show_log = False
            time.sleep(sleep)

        if api_response.status.succeeded:
            log.info("Job succeeded")
            sys.exit(0)
        else:
            log.info("Job failed")
            sys.exit(1)

    except ApiException as e:
        log.error("Exception waiting for job: %s\n" % e)
        sys.exit(1)
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    common.connect()

    data = {}
    data["api_version"] = os.environ.get('RD_CONFIG_API_VERSION')
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')
    data["container_name"] = os.environ.get('RD_CONFIG_CONTAINER_NAME')
    data["container_image"] = os.environ.get('RD_CONFIG_CONTAINER_IMAGE')

    data["image_pull_policy"] = os.environ.get('RD_CONFIG_IMAGE_PULL_POLICY')

    if os.environ.get('RD_CONFIG_PARALLELISM'):
        data["parallelism"] = os.environ.get('RD_CONFIG_PARALLELISM')

    if os.environ.get('RD_CONFIG_LABELS'):
        data["labels"] = os.environ.get('RD_CONFIG_LABELS')

    if os.environ.get('RD_CONFIG_ANNOTATIONS'):
        data["annotations"] = os.environ.get('RD_CONFIG_ANNOTATIONS')

    if os.environ.get('RD_CONFIG_SELECTORS'):
        data["selectors"] = os.environ.get('RD_CONFIG_SELECTORS')

    if os.environ.get('RD_CONFIG_CONTAINER_COMMAND'):
        cmd = os.environ.get('RD_CONFIG_CONTAINER_COMMAND')
        data["container_command"] = cmd

    if os.environ.get('RD_CONFIG_CONTAINER_ARGS'):
        data["container_args"] = os.environ.get('RD_CONFIG_CONTAINER_ARGS')

    if os.environ.get('RD_CONFIG_RESOURCES_REQUESTS'):
        req = os.environ.get('RD_CONFIG_RESOURCES_REQUESTS')
        data["resources_requests"] = req

    if os.environ.get('RD_CONFIG_VOLUME_MOUNTS'):
        data["volume_mounts"] = os.environ.get('RD_CONFIG_VOLUME_MOUNTS')

    if os.environ.get('RD_CONFIG_VOLUMES'):
        data["volumes"] = os.environ.get('RD_CONFIG_VOLUMES')

    if os.environ.get('RD_CONFIG_JOB_RESTART_POLICY'):
        rpolicy = os.environ.get('RD_CONFIG_JOB_RESTART_POLICY')
        data["job_restart_policy"] = rpolicy

    if os.environ.get('RD_CONFIG_COMPLETIONS'):
        data["completions"] = os.environ.get('RD_CONFIG_COMPLETIONS')

    if os.environ.get('RD_CONFIG_ACTIVE_DEADLINE_SECONDS'):
        active_ds = os.environ.get('RD_CONFIG_ACTIVE_DEADLINE_SECONDS')
        data["active_deadline_seconds"] = active_ds

    if os.environ.get('RD_CONFIG_BACKOFF_LIMIT'):
        backoff_limit = os.environ.get('RD_CONFIG_BACKOFF_LIMIT')
        data["backoff_limit"] = backoff_limit

    if os.environ.get('RD_CONFIG_ENVIRONMENTS'):
        data["environments"] = os.environ.get('RD_CONFIG_ENVIRONMENTS')

    if os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS'):
        esecret = os.environ.get('RD_CONFIG_ENVIRONMENTS_SECRETS')
        data["environments_secrets"] = esecret

    log.debug("Creating job")
    log.debug(data)

    job = create_job_object(data)

    log.debug("new job: ")
    log.debug(job)

    try:

        k8s_client = client.BatchV1Api()
        api_response = k8s_client.create_namespaced_job(
            body=job, namespace=data["namespace"])

        print(common.parseJson(api_response.status))

    except ApiException as e:
        log.error("Exception creating job: %s\n" % e)
        sys.exit(1)
示例#46
0
def main():
    # parse options
    (options, args) = parse_options()

    if os.isatty(sys.stdin.fileno()):
        raise RuntimeError('Need configuration in stdin.')
    config = common.read_config(sys.stdin)
    conn = common.connect(config.s3)
    bucket = None

    try:
        # setup
        real_stdout = sys.stdout
        sys.stdout = sys.stderr

        # verify all required config items are present
        if 'readwrite' not in config:
            raise RuntimeError('readwrite section not found in config')
        for item in ['readers', 'writers', 'duration', 'files', 'bucket']:
            if item not in config.readwrite:
                raise RuntimeError("Missing readwrite config item: {item}".format(item=item))
        for item in ['num', 'size', 'stddev']:
            if item not in config.readwrite.files:
                raise RuntimeError("Missing readwrite config item: files.{item}".format(item=item))

        seeds = dict(config.readwrite.get('random_seed', {}))
        seeds.setdefault('main', random.randrange(2**32))

        rand = random.Random(seeds['main'])

        for name in ['names', 'contents', 'writer', 'reader']:
            seeds.setdefault(name, rand.randrange(2**32))

        print 'Using random seeds: {seeds}'.format(seeds=seeds)

        # setup bucket and other objects
        bucket_name = common.choose_bucket_prefix(config.readwrite.bucket, max_len=30)
        bucket = conn.create_bucket(bucket_name)
        print "Created bucket: {name}".format(name=bucket.name)

        # check flag for deterministic file name creation
        if not config.readwrite.get('deterministic_file_names'):
            print 'Creating random file names'
            file_names = realistic.names(
                mean=15,
                stddev=4,
                seed=seeds['names'],
                )
            file_names = itertools.islice(file_names, config.readwrite.files.num)
            file_names = list(file_names)
        else:
            print 'Creating file names that are deterministic'
            file_names = []
            for x in xrange(config.readwrite.files.num):
                file_names.append('test_file_{num}'.format(num=x))

        files = realistic.files2(
            mean=1024 * config.readwrite.files.size,
            stddev=1024 * config.readwrite.files.stddev,
            seed=seeds['contents'],
            )
        q = gevent.queue.Queue()

        
        # warmup - get initial set of files uploaded if there are any writers specified
        if config.readwrite.writers > 0:
            print "Uploading initial set of {num} files".format(num=config.readwrite.files.num)
            warmup_pool = gevent.pool.Pool(size=100)
            for file_name in file_names:
                fp = next(files)
                warmup_pool.spawn_link_exception(
                    write_file,
                    bucket=bucket,
                    file_name=file_name,
                    fp=fp,
                    )
            warmup_pool.join()

        # main work
        print "Starting main worker loop."
        print "Using file size: {size} +- {stddev}".format(size=config.readwrite.files.size, stddev=config.readwrite.files.stddev)
        print "Spawning {w} writers and {r} readers...".format(w=config.readwrite.writers, r=config.readwrite.readers)
        group = gevent.pool.Group()
        rand_writer = random.Random(seeds['writer'])

        # Don't create random files if deterministic_files_names is set and true
        if not config.readwrite.get('deterministic_file_names'):
            for x in xrange(config.readwrite.writers):
                this_rand = random.Random(rand_writer.randrange(2**32))
                group.spawn_link_exception(
                    writer,
                    bucket=bucket,
                    worker_id=x,
                    file_names=file_names,
                    files=files,
                    queue=q,
                    rand=this_rand,
                    )

        # Since the loop generating readers already uses config.readwrite.readers
        # and the file names are already generated (randomly or deterministically),
        # this loop needs no additional qualifiers. If zero readers are specified,
        # it will behave as expected (no data is read)
        rand_reader = random.Random(seeds['reader'])
        for x in xrange(config.readwrite.readers):
            this_rand = random.Random(rand_reader.randrange(2**32))
            group.spawn_link_exception(
                reader,
                bucket=bucket,
                worker_id=x,
                file_names=file_names,
                queue=q,
                rand=this_rand,
                )
        def stop():
            group.kill(block=True)
            q.put(StopIteration)
        gevent.spawn_later(config.readwrite.duration, stop)

        # wait for all the tests to finish
        group.join()
        print 'post-join, queue size {size}'.format(size=q.qsize())

        if q.qsize() > 0:
            for temp_dict in q:
                if 'error' in temp_dict:
                    raise Exception('exception:\n\t{msg}\n\t{trace}'.format(
                                    msg=temp_dict['error']['msg'],
                                    trace=temp_dict['error']['traceback'])
                                   )
                else:
                    yaml.safe_dump(temp_dict, stream=real_stdout)

    finally:
        # cleanup
        if options.cleanup:
            if bucket is not None:
                common.nuke_bucket(bucket)
示例#47
0
def main():

    common.connect()

    api = core_v1_api.CoreV1Api()
    namespace = os.environ.get('RD_CONFIG_NAMESPACE')
    name = os.environ.get('RD_CONFIG_NAME')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s" % name)
    log.debug("Namespace: %s " % namespace)
    log.debug("--------------------------")

    delete_on_fail = False
    if os.environ.get('RD_CONFIG_DELETEONFAIL') == 'true':
        delete_on_fail = True

    resp = None
    try:
        resp = api.read_namespaced_pod(name=name, namespace=namespace)
    except ApiException as e:
        if e.status != 404:
            log.error("Unknown error: %s" % e)
            exit(1)

    if not resp:
        log.error("Pod %s does not exits." % name)
        exit(1)

    core_v1 = client.CoreV1Api()
    response = core_v1.read_namespaced_pod_status(name=name,
                                                  namespace=namespace,
                                                  pretty="True")

    if response.spec.containers:
        container = response.spec.containers[0].name
    else:
        log.error("Container not found")
        exit(1)

    script = os.environ.get('RD_CONFIG_SCRIPT')
    invocation = "/bin/bash"
    if 'RD_CONFIG_INVOCATION' in os.environ:
        invocation = os.environ.get('RD_CONFIG_INVOCATION')

    destination_path = "/tmp"

    if 'RD_NODE_FILE_COPY_DESTINATION_DIR' in os.environ:
        destination_path = os.environ.get('RD_NODE_FILE_COPY_DESTINATION_DIR')

    temp = tempfile.NamedTemporaryFile()
    destination_file_name = os.path.basename(temp.name)
    full_path = destination_path + "/" + destination_file_name

    try:
        temp.write(script)
        temp.seek(0)

        log.debug("coping script from %s to %s" % (temp.name, full_path))

        common.copy_file(name=name,
                         namespace=namespace,
                         container=container,
                         source_file=temp.name,
                         destination_path=destination_path,
                         destination_file_name=destination_file_name)

    finally:
        temp.close()

    permissions_command = ["chmod", "+x", full_path]

    log.debug("setting permissions %s" % permissions_command)
    resp = common.run_command(name=name,
                              namespace=namespace,
                              container=container,
                              command=permissions_command)

    if resp.peek_stdout():
        print(resp.read_stdout())

    if resp.peek_stderr():
        print(resp.read_stderr())
        sys.exit(1)

    # calling exec and wait for response.
    exec_command = invocation.split(" ")
    exec_command.append(full_path)

    if 'RD_CONFIG_ARGUMENTS' in os.environ:
        arguments = os.environ.get('RD_CONFIG_ARGUMENTS')
        exec_command.append(arguments)

    log.debug("running script %s" % exec_command)

    resp, error = common.run_interactive_command(name=name,
                                                 namespace=namespace,
                                                 container=container,
                                                 command=exec_command)
    if error:
        log.error("error running script")

        if delete_on_fail:
            log.info("removing POD on fail")
            data = {}
            data["name"] = name
            data["namespace"] = namespace
            common.delete_pod(api, data)

            log.info("POD deleted")
        sys.exit(1)

    rm_command = ["rm", full_path]

    log.debug("removing file %s" % rm_command)
    resp = common.run_command(name=name,
                              namespace=namespace,
                              container=container,
                              command=rm_command)

    if resp.peek_stdout():
        log.debug(resp.read_stdout())

    if resp.peek_stderr():
        log.debug(resp.read_stderr())
        sys.exit(1)
示例#48
0
    def __init__(self, astergui):
        """
        Create dialog.

        Arguments:
            astergui (AsterGui): Parent AsterGui instance.
        """
        # pragma pylint: disable=too-many-statements

        Dialog.__init__(self, astergui.mainWindow())
        self.setObjectName("Preferences")

        self.astergui = astergui
        self.widgets = {}
        self.changes = []

        self.setWindowTitle(translate("PrefDlg", "Preferences"))

        items = []
        values = []
        icons = []

        def _add_item(_text, _value=None, _icon=None, _clear=False):
            if _clear:
                items[:] = []
                values[:] = []
                icons[:] = []
            items.append(_text)
            if _value is not None:
                values.append(_value)
            if _icon is not None:
                icons.append(_icon)

        # 'General' tab
        title = translate("PrefDlg", "General")
        tab = self.addTab(title)
        tab.setObjectName("Preferences_General")

        # Language
        title = translate("PrefDlg", "Language")
        _add_item("English", "en", "as_lang_en.png", _clear=True)
        _add_item("Français", "fr", "as_lang_fr.png")
        self._addSelectorItem(tab, title, "language", items, icons, values)
        self.widgets["language"].need_restart = True

        # Add spacing
        self._addSpacing(tab, 10)

        # code_aster version
        title = translate("PrefDlg", "Version of code_aster")
        _add_item(translate("PrefDlg", "Use default"), "default", _clear=True)
        _add_item(translate("PrefDlg", "Ask"), "ask")
        self._addSelectorItem(tab, title, "code_aster_version", items, icons,
                              values)

        # Add spacing
        self._addSpacing(tab, 10)

        # Toolbar buttons style
        title = translate("PrefDlg", "Toolbar button style")
        _add_item(translate("PrefDlg", "Only display icon"),
                  "icon_only",
                  _clear=True)
        _add_item(translate("PrefDlg", "Only display text"), "text_only")
        _add_item(translate("PrefDlg", "Text appears beside icon"),
                  "text_beside_icon")
        _add_item(translate("PrefDlg", "Text appears under icon"),
                  "text_under_icon")
        _add_item(translate("PrefDlg", "Follow style"), "follow_style")
        self._addSelectorItem(tab, title, "toolbar_button_style", items, icons,
                              values)

        # Workspace tabs position
        title = translate("PrefDlg", "Workspace tab pages position")
        _add_item(translate("PrefDlg", "North"), "north", _clear=True)
        _add_item(translate("PrefDlg", "South"), "south")
        _add_item(translate("PrefDlg", "West"), "west")
        _add_item(translate("PrefDlg", "East"), "east")
        self._addSelectorItem(tab, title, "workspace_tab_position", items,
                              icons, values)

        # Add spacing
        self._addSpacing(tab, 10)

        # Strict import mode
        title = translate("PrefDlg", "Strict import mode")
        self._addSwitchItem(tab, title, "strict_import_mode")

        title = translate("PrefDlg",
                          "Limit of number of lines for graphical mode")
        self._addIntegerItem(tab, title, "nblines_limit", 1, 100000)

        # Add spacing
        self._addSpacing(tab, 10)

        # Switch on/off Undo/Redo feature
        title = translate("PrefDlg", "Disable Undo/Redo feature")
        self._addSwitchItem(tab, title, "disable_undo_redo")

        self._addSpacing(tab, 10)

        # Use CodeAster native naming
        # (switch off business-oriented translation)
        title = translate("PrefDlg", "Use business-oriented translations")
        self._addSwitchItem(tab, title, "use_business_translations")
        if behavior().forced_native_names:
            force = behavior().force_native_names
            self.widgets["use_business_translations"].setChecked(not force)
            self.widgets["use_business_translations"].setDisabled(True)
            self.widgets["use_business_translations"].ignore = True

        self._addSpacing(tab, 10)

        # Documentation url
        title = translate("PrefDlg", "Documentation website")
        self._addStringItem(tab, title, "doc_base_url")

        # Add stretch
        self._addSpacing(tab, 0, True)

        # --- end of 'General' tab

        # 'Interface' tab
        title = translate("PrefDlg", "Interface")
        tab = self.addTab(title)
        tab.setObjectName("Preferences_Interface")

        # *** Data Settings

        # Show catalogue name in Data Settings panel
        title = translate("PrefDlg",
                          "Show catalogue name in Data Settings panel")
        self._addSwitchItem(tab, title, "show_catalogue_name")

        # Show comments in Data Settings panel
        title = translate("PrefDlg", "Show comments in Data Settings panel")
        self._addSwitchItem(tab, title, "show_comments")

        # Enable auto-hide feature for search tool in Data Settings panel
        title = translate("PrefDlg", "Auto-hide search panel")
        self._addSwitchItem(tab, title, "auto_hide_search")

        # Add spacing
        self._addSpacing(tab, 10)

        # *** Data Files

        # Sort stages in Data Files panel
        title = translate("PrefDlg", "Sort stages in Data Files panel")
        self._addSwitchItem(tab, title, "sort_stages")

        # Show related concepts in Data Files panel
        title = translate("PrefDlg",
                          "Show related concepts in Data Files panel")
        self._addSwitchItem(tab, title, "show_related_concepts")

        # Join similar files in Data Files panel
        title = translate("PrefDlg",
                          "Join similar data files in Data Files panel")
        self._addSwitchItem(tab, title, "join_similar_files")

        # Add spacing
        self._addSpacing(tab, 10)

        # *** Operations

        # Auto-edit command
        title = translate("PrefDlg", "Automatically activate command edition")
        self._addSwitchItem(tab, title, "auto_edit")

        # Add spacing
        self._addSpacing(tab, 10)

        # *** Windows

        # Show read-only banner
        title = translate("PrefDlg", "Show read-only banner")
        self._addSwitchItem(tab, title, "show_readonly_banner")

        # Add spacing
        self._addSpacing(tab, 10)

        # *** Parameter panel

        # Content label mode
        title = translate("PrefDlg", "Parameter content display mode")
        _add_item(translate("PrefDlg", "None"), "none", _clear=True)
        _add_item(translate("PrefDlg", "Parameters"), "parameters")
        _add_item(translate("PrefDlg", "Keywords"), "keywords")
        _add_item(translate("PrefDlg", "Values"), "values")
        self._addSelectorItem(tab, title, "content_mode", items, icons, values)

        # Show tooltip for 'into' items
        title = translate("PrefDlg", "Show identifier for selector items")
        self._addSwitchItem(tab, title, "show_selector_value")

        # Sort selector items in Parameters panel
        title = translate("PrefDlg", "Sort selector items")
        self._addSwitchItem(tab, title, "sort_selector_values")

        # Show catalogue name for command selector items
        title = translate("PrefDlg",
                          "Show catalogue name in command selector items")
        self._addSwitchItem(tab, title, "show_catalogue_name_in_selectors")

        # External list
        title = translate("PrefDlg", "Edit list-like keywords in sub-panel")
        self._addSwitchItem(tab, title, "external_list")

        # Add spacing
        self._addSpacing(tab, 10)

        # *** Other features

        # Allow delete case used by other case(s)
        title = translate("PrefDlg",
                          "Allow deleting cases used by other case(s)")
        self._addSwitchItem(tab, title, "allow_delete_cases")

        # Add stretch
        self._addSpacing(tab, 0, True)

        # --- end of 'Interface' tab

        # 'Editor' tab
        title = translate("PrefDlg", "Editor")
        tab = self.addTab(title)
        tab.setObjectName("Preferences_Editor")

        # - External editor
        title = translate("PrefDlg", "External editor")
        self._addFileItem(tab, title, "external_editor", self._browseEditor)

        # Add spacing
        self._addSpacing(tab, 10)

        # Use external editor for text stage
        title = translate("PrefDlg",
                          "Use external editor for text stage edition")
        self._addSwitchItem(tab, title, "use_external_editor_stage")

        # Use external editor for data files edition
        title = translate("PrefDlg",
                          "Use external editor for data files edition")
        self._addSwitchItem(tab, title, "use_external_editor_data_file")

        # Use external editor for message files viewing
        title = translate("PrefDlg",
                          "Use external editor for message files viewing")
        self._addSwitchItem(tab, title, "use_external_editor_msg_file")

        # Add spacing
        self._addSpacing(tab, 10)

        # Warn for file's size limit
        title = translate("PrefDlg", "Warn when viewing file larger than")
        self._addIntegerItem(tab, title, "file_size_limit", 1, 100000, " KB")

        try:
            import PyEditorPy
            if hasattr(PyEditorPy, "PyEditor_Widget"):
                # Add spacing
                self._addSpacing(tab, 10)

                # Python editor group
                title = translate("PrefDlg", "Python editor")
                grp = self._addGroupBox(tab, title, obj_name="python_editor")

                # - Font
                title = translate("PrefDlg", "Font")
                self._addFontItem(grp, title, "PyEditor/font")

                # - Enable current line highlight
                title = translate("PrefDlg", "Enable current line highlight")
                self._addSwitchItem(grp, title,
                                    "PyEditor/highlightcurrentline")

                # - Enable text wrapping
                title = translate("PrefDlg", "Enable text wrapping")
                self._addSwitchItem(grp, title, "PyEditor/textwrapping")

                # - Center cursor on scroll
                title = translate("PrefDlg", "Center cursor on scroll")
                self._addSwitchItem(grp, title,
                                    "PyEditor/centercursoronscroll")

                # - Display line numbers area
                title = translate("PrefDlg", "Display line numbers area")
                self._addSwitchItem(grp, title, "PyEditor/linenumberarea")

                # Add spacing
                self._addSpacing(grp, 10)

                # - Completion mode
                title = translate("PrefDlg", "Completion mode")
                _add_item(translate("PrefDlg", "None"), "none", _clear=True)
                _add_item(translate("PrefDlg", "Auto"), "auto")
                _add_item(translate("PrefDlg", "Manual"), "manual")
                _add_item(translate("PrefDlg", "Always"), "always")
                self._addSelectorItem(grp, title, "PyEditor/completionpolicy",
                                      items, icons, values)

                # Add spacing
                self._addSpacing(grp, 10)

                # - Display tab delimiters
                title = translate("PrefDlg", "Display tab delimiters")
                self._addSwitchItem(grp, title, "PyEditor/tabspacevisible")

                # - Tab size
                title = "\t" + translate("PrefDlg", "Tab size")
                self._addIntegerItem(grp, title, "PyEditor/tabsize", 1, 99)

                # Add spacing
                self._addSpacing(grp, 10)

                # - Display vertical edge
                title = translate("PrefDlg", "Display vertical edge")
                self._addSwitchItem(grp, title, "PyEditor/verticaledge")
                connect(self.widgets["PyEditor/verticaledge"].stateChanged,
                        self._updateState)

                # - Number of columns
                title = "\t" + translate("PrefDlg", "Number of columns")
                self._addIntegerItem(grp, title, "PyEditor/numbercolumns", 1,
                                     200)

        except ImportError:
            pass

        # Add stretch
        self._addSpacing(tab, 0, True)

        # --- end of 'Editor' tab

        # 'Confirmations' tab
        title = translate("PrefDlg", "Confirmations")
        tab = self.addTab(title)
        tab.setObjectName("Preferences_Confirmations")

        # - Delete object
        title = translate("PrefDlg", "Delete object")
        self._addSwitchItem(tab, title, "msgbox_delete")

        # - Undefined files
        title = translate("PrefDlg", "Undefined files")
        self._addSwitchItem(tab, title, "msgbox_undefined_files")

        # - Break operation
        title = translate("PrefDlg", "Break current operation")
        self._addSwitchItem(tab, title, "msgbox_break")

        # - Delete case used by other case(s)
        title = translate("PrefDlg", "Delete case used by other case(s)")
        self._addSwitchItem(tab, title, "msgbox_delete_case")

        # - Delete child stages
        title = translate("PrefDlg", "Delete child stages")
        self._addSwitchItem(tab, title, "msgbox_delete_stages")

        # - Convert invalid graphical stage
        title = translate("PrefDlg", "Convert invalid graphical stage")
        self._addSwitchItem(tab, title, "msgbox_convert_invalid_graphic_stage")

        # - Close the parameter panel
        title = translate("PrefDlg",
                          "Close parameter panel with modifications")
        self._addSwitchItem(tab, title, "msgbox_parampanel_close")

        # - Abort the parameter panel
        title = translate("PrefDlg", "Abort command edition")
        self._addSwitchItem(tab, title, "msgbox_parampanel_abort")

        # Add stretch
        self._addSpacing(tab, 0, True)

        # --- end of 'Confirmations' tab

        # 'Catalogs' tab
        title = translate("PrefDlg", "Catalogs")
        tab = self.addTab(title)
        tab.setObjectName("Preferences_Catalogs")

        # User's catalogs
        title = translate("PrefDlg", "User's catalogs")
        grp = self._addGroupBox(tab, title, obj_name="user_catalogs")
        self._addDirItem(grp, "user_catalogs")

        # --- end of 'Catalogs' tab

        title = translate("PrefDlg", "Defaults")
        def_btn = self.addButton(title)
        def_btn.setObjectName("Dialog_defaults")
        connect(def_btn.clicked, self._fromResources)

        self.okButton().setObjectName("Dialog_ok")
        self.cancelButton().setObjectName("Dialog_cancel")

        self._fromResources(False)

        self.tabWidget().setCurrentIndex(PrefDlg.last_tab)
示例#49
0
def parse_weather(lat, lon):
    url = generate_url(lat, lon)
    place = connect(url)
    woeid = place['query']['results']['Result']['woeid']
    return connect(get_weather_url(woeid))
示例#50
0
#!/usr/bin/python

import unittest
import socket
import sys

from common import connect, disconnect, sendCmd

print "infinite stream..."
try:
    pass
    sock = connect()
    while 1:
        sock.send("x")
    sock.close()
except:
    print "got exception!"
    print sys.exc_info()

print "login w/2KB username..."
try:
    sock = connect()
    cmd = "LOGIN "
    for i in range(1, 2048):
        cmd += "x"
    cmd += "\r\n"
    sendCmd(sock, cmd)
    data = sock.recv(1024)
    print "data = %s" % data
    sendCmd(sock, "JOIN #foo")
    data = sock.recv(1024)
示例#51
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}

    data["type"] = os.environ.get('RD_CONFIG_TYPE')
    data["yaml"] = os.environ.get('RD_CONFIG_YAML')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    common.connect()

    try:
        if data["type"] == "Deployment":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.AppsV1Api()
            resp = api_instance.create_namespaced_deployment(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "ConfigMap":
            api_instance = client.CoreV1Api()
            dep = yaml.safe_load(data["yaml"])
            resp = api_instance.create_namespaced_config_map(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "StatefulSet":
            dep = yaml.safe_load(data["yaml"])
            k8s_beta = client.AppsV1Api()
            resp = k8s_beta.create_namespaced_stateful_set(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Service":
            api_instance = client.CoreV1Api()
            dep = yaml.safe_load(data["yaml"])
            resp = api_instance.create_namespaced_service(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Ingress":
            dep = yaml.safe_load(data["yaml"])
            k8s_beta = client.ExtensionsV1beta1Api()
            resp = k8s_beta.create_namespaced_ingress(
                body=dep, namespace=data["namespace"], pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Job":
            api_instance = client.BatchV1Api()
            dep = yaml.safe_load(data["yaml"])
            resp = api_instance.create_namespaced_job(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "StorageClass":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.StorageV1Api()

            resp = api_instance.create_storage_class(body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "PersistentVolumeClaim":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_namespaced_persistent_volume_claim(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "Secret":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_namespaced_secret(
                namespace=data["namespace"], body=dep, pretty="true")

            print(common.parseJson(resp.metadata))

        if data["type"] == "PersistentVolume":
            dep = yaml.safe_load(data["yaml"])
            api_instance = client.CoreV1Api()

            resp = api_instance.create_persistent_volume(body=dep,
                                                         pretty="true")

            print(common.parseJson(resp.status))

        if data["type"] == "BuildConfig":
            dep = yaml.safe_load(data["yaml"])

            k8s_client = config.new_client_from_config()
            openshift_client = DynamicClient(k8s_client)
            v1_bc = openshift_client.resources.get(
                api_version='build.openshift.io/v1', kind='BuildConfig')

            resp = v1_bc.create(body=dep, namespace=data["namespace"])

            print(common.parseJson(resp.metadata))
    except ApiException as e:
        log.error("Exception error creating: %s\n" % e)
        sys.exit(1)
示例#52
0
def main():

    common.connect()
    api = core_v1_api.CoreV1Api()

    name = os.environ.get('RD_NODE_DEFAULT_NAME')
    namespace = os.environ.get('RD_NODE_DEFAULT_NAMESPACE')
    container = os.environ.get('RD_NODE_DEFAULT_CONTAINER_NAME')

    log.debug("--------------------------")
    log.debug("Pod Name:  %s" % name)
    log.debug("Namespace: %s " % namespace)
    log.debug("Container: %s " % container)
    log.debug("--------------------------")

    resp = None
    try:
        resp = api.read_namespaced_pod(name=name, namespace=namespace)
    except ApiException as e:
        if e.status != 404:
            print("Unknown error: %s" % e)
            exit(1)

    if not resp:
        print("Pod %s does not exits." % name)
        exit(1)

    source_file = os.environ.get('RD_FILE_COPY_FILE')
    destination_file = os.environ.get('RD_FILE_COPY_DESTINATION')

    #force print destination to avoid error with node-executor
    print destination_file

    log.debug("Copying file from %s to %s" % (source_file, destination_file))

    destination_path = os.path.dirname(destination_file)
    destination_file_name = os.path.basename(destination_file)

    # Copying file client -> pod
    exec_command = ['tar', 'xvf', '-', '-C', '/']
    resp = stream(api.connect_get_namespaced_pod_exec,
                  name,
                  'default',
                  command=exec_command,
                  container=container,
                  stderr=True,
                  stdin=True,
                  stdout=True,
                  tty=False,
                  _preload_content=False)

    with TemporaryFile() as tar_buffer:
        with tarfile.open(fileobj=tar_buffer, mode='w') as tar:
            tar.add(name=source_file,
                    arcname=destination_path + "/" + destination_file_name)

        tar_buffer.seek(0)
        commands = []
        commands.append(tar_buffer.read())

        while resp.is_open():
            resp.update(timeout=1)
            if resp.peek_stdout():
                print("STDOUT: %s" % resp.read_stdout())
            if resp.peek_stderr():
                print("STDERR: %s" % resp.read_stderr())
            if commands:
                c = commands.pop(0)
                # print("Running command... %s\n" % c)
                resp.write_stdin(c)
            else:
                break
        resp.close()
示例#53
0
def main():

    if os.environ.get('RD_CONFIG_DEBUG') == 'true':
        log.setLevel(logging.DEBUG)
        log.debug("Log level configured for DEBUG")

    data = {}
    data["name"] = os.environ.get('RD_CONFIG_NAME')
    data["type"] = os.environ.get('RD_CONFIG_TYPE')
    data["namespace"] = os.environ.get('RD_CONFIG_NAMESPACE')

    common.connect()

    try:
        if data["type"] == "Deployment":
            apps_v1 = client.AppsV1Api()
            resp = apps_v1.delete_namespaced_deployment(
                name=data["name"],
                namespace=data["namespace"],
                body=client.V1DeleteOptions(propagation_policy='Foreground',
                                            grace_period_seconds=5),
                pretty="true")

        if data["type"] == "ConfigMap":
            apps_v1 = client.CoreV1Api()
            resp = apps_v1.delete_namespaced_config_map(
                name=data["name"],
                namespace=data["namespace"],
                body=client.V1DeleteOptions(propagation_policy='Foreground',
                                            grace_period_seconds=5),
                pretty="true")

        if data["type"] == "StatefulSet":
            apps_v1 = client.AppsV1Api()
            resp = apps_v1.delete_namespaced_stateful_set(
                name=data["name"],
                namespace=data["namespace"],
                body=client.V1DeleteOptions(propagation_policy='Foreground',
                                            grace_period_seconds=5),
                pretty="true")

        if data["type"] == "Service":
            apps_v1 = client.CoreV1Api()
            resp = apps_v1.delete_namespaced_service(
                namespace=data["namespace"],
                name=data["name"],
                body=client.V1DeleteOptions(propagation_policy='Foreground',
                                            grace_period_seconds=5),
                pretty="true")

        if data["type"] == "Ingress":
            apps_v1 = client.ExtensionsV1beta1Api()
            body = client.V1DeleteOptions()
            resp = apps_v1.delete_namespaced_ingress(
                name=data["name"],
                namespace=data["namespace"],
                body=body,
                pretty="true")

        if data["type"] == "Job":
            api_instance = client.BatchV1Api()

            resp = api_instance.delete_namespaced_job(
                name=data["name"],
                namespace=data["namespace"],
                body=client.V1DeleteOptions(api_version='v1',
                                            kind="DeleteOptions",
                                            propagation_policy="Background"),
                pretty="true")

        if data["type"] == "StorageClass":
            api_instance = client.StorageV1Api()

            resp = api_instance.delete_storage_class(
                name=data["name"],
                body=client.V1DeleteOptions(),
                pretty="true")

        if data["type"] == "PersistentVolumeClaim":
            api_instance = client.CoreV1Api()

            resp = api_instance.delete_namespaced_persistent_volume_claim(
                namespace=data["namespace"],
                body=client.V1DeleteOptions(),
                name=data["name"],
                pretty="true")

        if data["type"] == "Secret":
            api_instance = client.CoreV1Api()

            resp = api_instance.delete_namespaced_secret(
                namespace=data["namespace"],
                name=data["name"],
                body=client.V1DeleteOptions(),
                pretty="true")

        if data["type"] == "PersistentVolume":
            api_instance = client.CoreV1Api()

            resp = api_instance.delete_persistent_volume(
                name=data["name"],
                body=client.V1DeleteOptions(),
                pretty="true")

        print(common.parseJson(resp))

    except ApiException as e:
        log.error("Exception error creating: %s\n" % e)
        sys.exit(1)