def peer_auth(name, live=False, cjdroute=False, yrd=False): 'add a password for inbound connections' if '/' in name: yield 'nope' exit(1) path = os.path.join(YRD_PEERS, name) if os.path.exists(path): with open(path) as f: password = json.load(f)['password'] else: password = utils.generate_key(31) info = {'type': 'in', 'name': name, 'password': password} if not live: with open(path, 'w') as f: f.write(json.dumps(info)) conf = utils.load_conf(CJDROUTE_CONF) c = cjdns.connect(password=conf['admin']['password']) c.addPassword(name, password) c.disconnect() publicKey = conf['publicKey'] port = conf['interfaces']['UDPInterface'][0]['bind'].split(':')[1] if (not cjdroute and not yrd) or cjdroute: yield utils.to_credstr(utils.get_ip(), port, publicKey, password) if not cjdroute and not yrd: yield '' if (not cjdroute and not yrd) or yrd: yield 'yrd peer add namehere %s:%s %s %s' % (utils.get_ip(), port, publicKey, password)
def __enter__(self): """ Set up the route reflector clusters when entering context. :return: self. """ # Construct the common environment variables passed in when starting # the route reflector. etcd_auth = "-e ETCD_AUTHORITY=%s:2379" % get_ip() # Create the route reflector hosts, grouped by redundancy. for ii in range(self.num_redundancy_groups): cluster_id = str(IPAddress(0xFF000001 + ii)) redundancy_group = [] for jj in range(self.num_in_redundancy_group): rr = DockerHost('RR.%d.%d' % (ii, jj), start_calico=False) ip = "-e IP=%s" % rr.ip rr.execute("docker load --input /code/routereflector.tar") # Check which type of etcd is being run, then invoke the # suggested curl command to add the RR entry to etcd. # # See https://github.com/projectcalico/calico-bird/tree/feature-ipinip/build_routereflector # for details. if os.getenv("ETCD_SCHEME", None) == "https": # Etcd is running with SSL/TLS, pass the key values rr.execute("docker run --privileged --net=host -d " "--name rr %s " "-e ETCD_AUTHORITY=%s:2379 " "-e ETCD_CA_CERT_FILE=%s " "-e ETCD_CERT_FILE=%s " "-e ETCD_KEY_FILE=%s " "-e ETCD_SCHEME=https " "-v %s/certs:%s/certs " "calico/routereflector" % (ip, ETCD_HOSTNAME_SSL, ETCD_CA, ETCD_CERT, ETCD_KEY, CHECKOUT_DIR, CHECKOUT_DIR)) rr.execute( r'curl --cacert %s --cert %s --key %s ' r'-L https://%s:2379/v2/keys/calico/bgp/v1/rr_v4/%s ' r'-XPUT -d value="{' r'\"ip\":\"%s\",' r'\"cluster_id\":\"%s\"' r'}"' % (ETCD_CA, ETCD_CERT, ETCD_KEY, ETCD_HOSTNAME_SSL, rr.ip, rr.ip, cluster_id)) else: rr.execute("docker run --privileged --net=host -d " "--name rr %s %s " "calico/routereflector" % (etcd_auth, ip)) rr.execute( r'curl -L http://%s:2379/v2/keys/calico/bgp/v1/rr_v4/%s ' r'-XPUT -d value="{' r'\"ip\":\"%s\",' r'\"cluster_id\":\"%s\"' r'}"' % (get_ip(), rr.ip, rr.ip, cluster_id)) # Store the redundancy group. redundancy_group.append(rr) self.redundancy_groups.append(redundancy_group) return self
def __init__(self, name, start_calico=True, dind=True, additional_docker_options="", post_docker_commands=["docker load -i /code/calico-node.tar", "docker load -i /code/busybox.tar"]): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False if dind: log_and_run("docker rm -f %s || true" % self.name) log_and_run("docker run --privileged -tid " "-v %s/docker:/usr/local/bin/docker " "-v %s:/code --name %s " "calico/dind:latest docker daemon --storage-driver=aufs %s" % (CHECKOUT_DIR, CHECKOUT_DIR, self.name, additional_docker_options)) self.ip = log_and_run("docker inspect --format " "'{{.NetworkSettings.Networks.bridge.IPAddress}}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) for command in post_docker_commands: self.execute(command) else: self.ip = get_ip(v6=False) self.ip6 = get_ip(v6=True) if start_calico: self.start_calico_node()
def __enter__(self): """ Set up the route reflector clusters when entering context. :return: self. """ # Construct the common environment variables passed in when starting # the route reflector. etcd_auth = "-e ETCD_AUTHORITY=%s:2379" % get_ip() # Create the route reflector hosts, grouped by redundancy. for ii in range(self.num_redundancy_groups): cluster_id = str(IPAddress(0xFF000001 + ii)) redundancy_group = [] for jj in range(self.num_in_redundancy_group): rr = DockerHost('RR.%d.%d' % (ii, jj), start_calico=False) ip = "-e IP=%s" % rr.ip rr.execute("docker load --input /code/routereflector.tar") # Check which type of etcd is being run, then invoke the # suggested curl command to add the RR entry to etcd. # # See https://github.com/projectcalico/calico-bird/tree/feature-ipinip/build_routereflector # for details. if os.getenv("ETCD_SCHEME", None) == "https": # Etcd is running with SSL/TLS, pass the key values rr.execute("docker run --privileged --net=host -d " "--name rr %s " "-e ETCD_AUTHORITY=%s:2379 " "-e ETCD_CA_CERT_FILE=%s " "-e ETCD_CERT_FILE=%s " "-e ETCD_KEY_FILE=%s " "-e ETCD_SCHEME=https " "-v %s/certs:%s/certs " "calico/routereflector" % (ip, ETCD_HOSTNAME_SSL, ETCD_CA, ETCD_CERT, ETCD_KEY, CHECKOUT_DIR,CHECKOUT_DIR)) rr.execute(r'curl --cacert %s --cert %s --key %s ' r'-L https://%s:2379/v2/keys/calico/bgp/v1/rr_v4/%s ' r'-XPUT -d value="{' r'\"ip\":\"%s\",' r'\"cluster_id\":\"%s\"' r'}"' % (ETCD_CA, ETCD_CERT, ETCD_KEY, ETCD_HOSTNAME_SSL, rr.ip, rr.ip, cluster_id)) else: rr.execute("docker run --privileged --net=host -d " "--name rr %s %s " "calico/routereflector" % (etcd_auth, ip)) rr.execute(r'curl -L http://%s:2379/v2/keys/calico/bgp/v1/rr_v4/%s ' r'-XPUT -d value="{' r'\"ip\":\"%s\",' r'\"cluster_id\":\"%s\"' r'}"' % (get_ip(), rr.ip, rr.ip, cluster_id)) # Store the redundancy group. redundancy_group.append(rr) self.redundancy_groups.append(redundancy_group) return self
def r_3login_callback(src): """ 第3方登录-的回调 """ args = casts(request.args, code=str, callUrl=str) if not args.code: abort(403, '缺少参数code') code = args.code callUrl = args.callUrl is_wxpaper = False #是否来自wxpaper.ldbcom.com is_wxpaper_need_data = False if callUrl.find("wxpaper.ldbcom.com") != -1: is_wxpaper = True if callUrl.find("auth_back") != -1: is_wxpaper_need_data = True #print('r_3login_callback:', code) user_id = 0 if src == 'weibo': token_info = weibo_login.get_access_token(code) #print('r_3login_callback1:', token_info) if not token_info: abort(403, 'no token') user = weibo_login.get_user_info(token_info.access_token, token_info.uid) #print('r_3login_callback2:', user) ip = get_ip() user_id = api_user.reg_or_log_weibo(user, ip) elif src == 'qq': pass #改在r_3login_callback_qq中处理了. elif src == 'wx_gzh': token_info = wx_gzh_login.get_access_token(code) # print('r_3login_callback11:', token_info) if not token_info: abort(403, 'no token') if not is_wxpaper: # 正常请求 user = wx_gzh_login.get_user_info(token_info.access_token, token_info.openid) # print('r_3login_callback12:', user) ip = get_ip() user_id = api_user.reg_or_log_wx(user, ip) else: #来自wxpaper.ldbcom.com user_json = wx_gzh_login.get_user_info(token_info.access_token, token_info.openid, True) api_user.set_wxpaper_cookie(user_json) if is_wxpaper_need_data: import urllib.parse data_args = urllib.parse.urlencode({'data': user_json}) return redirect(callUrl+'&'+data_args) else: return redirect(callUrl) if user_id: sess_result = api_user.set_sess(user_id) if sess_result == -1: return redirect('/feedback/?f=user_baned') if callUrl: return redirect(callUrl) else: return redirect('/') else: abort(403, "args err")
def __init__(self, name, start_calico=True, dind=True, additional_docker_options="", post_docker_commands=["docker load -i /code/calico-node.tar", "docker load -i /code/busybox.tar"], calico_node_autodetect_ip=False): self.name = name self.dind = dind self.workloads = set() self.ip = None """ An IP address value to pass to calicoctl as `--ip`. If left as None, no value will be passed, forcing calicoctl to do auto-detection. """ self.ip6 = None """ An IPv6 address value to pass to calicoctl as `--ipv6`. If left as None, no value will be passed. """ # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False docker_args = "--privileged -tid " \ "-v /lib/modules:/lib/modules " \ "-v %s/certs:%s/certs -v %s:/code --name %s" % \ (CHECKOUT_DIR, CHECKOUT_DIR, CHECKOUT_DIR, self.name) if ETCD_SCHEME == "https": docker_args += " --add-host %s:%s" % (ETCD_HOSTNAME_SSL, get_ip()) if dind: log_and_run("docker rm -f %s || true" % self.name) # Pass the certs directory as a volume since the etcd SSL/TLS # environment variables use the full path on the host. # Set iptables=false to prevent iptables error when using dind libnetwork log_and_run("docker run %s " "calico/dind:latest " " --storage-driver=aufs " "--iptables=false " "%s" % (docker_args, additional_docker_options)) self.ip = log_and_run("docker inspect --format " "'{{.NetworkSettings.Networks.bridge.IPAddress}}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) for command in post_docker_commands: self.execute(command) elif not calico_node_autodetect_ip: # Find the IP so it can be specified as `--ip` when launching node later. self.ip = get_ip(v6=False) self.ip6 = get_ip(v6=True) if start_calico: self.start_calico_node()
def set_ip_address(options): utils.log('Setting IP Address') wireless_ip = utils.get_ip('10.{0}.{1}.42', options.team_number) ethernet_ip = utils.get_ip('10.{0}.{1}.6', options.team_number) if options.wireless_mac_address != '[None]': configure_ip.switch_to_robot( options.wireless_mac_address, wireless_ip) if options.ethernet_mac_address != '[None]': configure_ip.switch_to_robot( options.ethernet_mac_address, ethernet_ip)
def __init__(self, name, start_calico=True, dind=True, additional_docker_options="", post_docker_commands=[ "docker load -i /code/calico-node.tar", "docker load -i /code/busybox.tar" ]): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False docker_args = "--privileged -tid -v %s/docker:/usr/local/bin/docker " \ "-v %s/certs:%s/certs -v %s:/code --name %s" % \ (CHECKOUT_DIR, CHECKOUT_DIR, CHECKOUT_DIR, CHECKOUT_DIR, self.name) if ETCD_SCHEME == "https": docker_args += " --add-host %s:%s" % (ETCD_HOSTNAME_SSL, get_ip()) if dind: log_and_run("docker rm -f %s || true" % self.name) # Pass the certs directory as a volume since the etcd SSL/TLS # environment variables use the full path on the host. log_and_run("docker run %s " "calico/dind:latest " "docker daemon --storage-driver=aufs %s" % (docker_args, additional_docker_options)) self.ip = log_and_run( "docker inspect --format " "'{{.NetworkSettings.Networks.bridge.IPAddress}}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) for command in post_docker_commands: self.execute(command) else: self.ip = get_ip(v6=False) self.ip6 = get_ip(v6=True) if start_calico: self.start_calico_node()
def main(): videoPath = str(sys.argv[1]) print("Your ip is: {}".format(utils.get_ip())) commands = [] Collector_Receiving_Ports = [] Collector_Sending_Ports = [] # Generate needed random free ports producerPort = str(utils.get_ip()) + ":" + str(utils.find_free_port()) for i in range(math.ceil(utils.N / 2)): Collector_Receiving_Ports.append( str(utils.get_ip()) + ":" + str(utils.find_free_port())) Collector_Sending_Ports.append( str(utils.get_ip()) + ":" + str(utils.find_free_port())) # Send Collector Ports to second computer try: ipPortConnecton = str(utils.SENDER) + ":" + utils.CONNECTION_PORT senderSocket, senderContext = utils.configure_port( ipPortConnecton, zmq.PUSH, "bind") data = pickle.dumps(Collector_Sending_Ports) senderSocket.send(data) print("Ports data has been sent...") except: print("Machine 1 (Sender) ERROR IN SENDING CONNECTION DATA, " + "Try Chaning the CONNECTION_PORT in utils.py file") # Generate needed Processes # Generate Producer commands.append('python Producer.py {} {}'.format(videoPath, producerPort)) # Generate N Consumers1 for i in range(utils.N): commands.append('python Consumer1.py {} {}'.format( producerPort, Collector_Receiving_Ports[math.floor(i / 2)])) # Generate N / 2 Collector for i in range(math.ceil(utils.N / 2)): commands.append('python Collector.py {} {}'.format( Collector_Receiving_Ports[i], Collector_Sending_Ports[i])) # Run in parallel processes = [Popen(cmd, shell=True) for cmd in commands] for p in processes: p.wait() senderSocket.close() senderContext.destroy()
def vote(): """ Endpoint to create a new transaction via our application. """ author = get_ip(request.remote_addr) questionid = request.args.get('id') answer = request.args.get('answer') post_object = { 'type': 'vote', 'content': { 'questionid': questionid, 'author': author + ':5000', 'vote': answer, 'timestamp': time.time() } } # Submit a transaction new_tx_address = "{}/new_transaction".format(CONNECTED_NODE_ADDRESS) requests.post(new_tx_address, json=post_object, headers={'Content-type': 'application/json'}) return redirect('/')
def preProcessArgs(args): # # First check for the boot server IP. If the option not specified, # then get the current IP on first NIC of current OS # if 'bootServerIP' not in args or args.bootServerIP is None: args.bootServerIP = get_ip() # # Create the config instance which will be used to generate the SFTP and files. # args.bootServerListenPort = 8888 args.cfg = Config(args.config,args.bootServerIP) args.valid_tags = args.cfg.getMachineTags() # # Setup tags param (if --tag used) # if 'tag' in args and args.tag: #args.tag: if ',' in args.tag: args.tags = args.tag.split(',') elif args.tag == 'all': args.tags = args.valid_tags else: args.tags = [args.tag] # Once tags are set, verify they are valid tags. for tag in args.tags: if tag not in args.valid_tags: print("\nERROR: invalid tag specified. Valid values are: %s\n" % ', '.join(args.valid_tags)) sys.exit(1) # # Setup hostnames param (if --hostname used) # if 'hostname' in args and args.hostname: #args.hostname: if ',' in args.hostname: args.hostnames = args.hostname.split(',') else: args.hostnames = [args.hostname] # # First retrieve the mentioned devices and also any existing host entries in the dhcp config # args.slHelper = SoftLayerHelper() try: args.adminSubnet = args.slHelper.getSubnet(args.cfg.subnet[Config.SUBNET_ADMIN]) except ObjectNotFoundException: print("\nERROR: Cannot find admin subnet as specified in configuration file. (subnet id=%s)" % args.cfg.subnet[Config.SUBNET_ADMIN]) sys.exit(1) # Load the DHCP conf into the args loadDhcpConf(args) if args.dhcpSharedNet is None or args.dhcpGroup is None: print("\nERROR: The dhcpd.conf file does have the structure expected. Run the configuration on the bootserver again.") sys.exit(1) args.hosts = args.dhcpGroup.getChildren(DhcpConfEntry.Type.Host) return args
def validate_connection(): data = request.get_json() request_addr = get_ip(request.remote_addr) if not data: return 'Invalid data', 400 # node = data['ipaddress'] node = request_addr + ':' + str(data['port']) if not node: return 'Invalid data', 400 peers.add(node) # add some role with node in here # set permission for node if node not in groups: groups[node] = 'peer' url = 'http://{}:5002/add_node'.format(orderer) response = requests.post(url, json={ 'ipaddress': request_addr, 'port': data['port'] }) if response.status_code >= 400: return 'Error to connect to orderer', 400 return "Success", 201
def start_calico_node_with_docker(self): """ Start calico in a container inside a host by calling docker directly. """ if ETCD_SCHEME == "https": etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL ssl_args = "-e ETCD_CA_CERT_FILE=%s " \ "-e ETCD_CERT_FILE=%s " \ "-e ETCD_KEY_FILE=%s " \ "-v %s/certs:%s/certs " \ % (ETCD_CA, ETCD_CERT, ETCD_KEY, CHECKOUT_DIR, CHECKOUT_DIR) else: etcd_auth = "%s:2379" % get_ip() ssl_args = "" # If the hostname has been overridden on this host, then pass it in # as an environment variable. if self.override_hostname: hostname_args = "-e HOSTNAME=%s" % self.override_hostname else: hostname_args = "" self.execute("docker run -d --net=host --privileged " "--name=calico-node " "%s " "-e IP=%s " "-e ETCD_ENDPOINTS=%s://%s %s " "-v /var/log/calico:/var/log/calico " "-v /var/run/calico:/var/run/calico " "%s" % (hostname_args, self.ip, ETCD_SCHEME, etcd_auth, ssl_args, NODE_CONTAINER_NAME))
def server_connect(self): if self.ip: return False self.ip = str(utils.get_ip()) if (not self.ip) or self.ip == SERVER_IP or self.connectedToServer: print "no connection to the network" return False d = self.connect_end_point() def c(ampProto): return ampProto.callRemote(Connect, ip=self.ip) d.addCallback(c) d.addErrback(err) reactor.callLater(10, d.cancel) def connected_server(args): pid = args['id'] otherPids = args['cur'] mapName = args['map'] # callback for after connection, arg:pid of self and server if pid != -1: # check no error print "my pid is ", pid self.pid = pid self.playerList = otherPids self.map = self._init_map_and_cm(mapName) else: print "Connected server but can't play game, map is full or game already started" d.addCallback(connected_server) d.addErrback(err) reactor.callLater(10, d.cancel) return True
def update_chaincode(): file = os.path.join(__location__, 'chaincode.py') code = '' with codecs.open(file, encoding='utf8', mode='r') as inp: code = inp.read() author = get_ip(request.remote_addr) post_object = { 'type': 'smartcontract', 'content': { 'code': code, 'author': author + ':5000', 'timestamp': time.time() } } # Submit a transaction new_tx_address = "{}/new_transaction".format(CONNECTED_NODE_ADDRESS) requests.post(new_tx_address, json=post_object, headers={'Content-type': 'application/json'}) return redirect('/')
def hola(solicitud): if solicitud.method == 'POST' and solicitud.POST.get('email') and solicitud.POST.get('nombre'): pais = get_pais(solicitud.META) email = solicitud.POST['email'] nombre = solicitud.POST['nombre'] payload = { 'email_address': email, 'apikey': settings.MAILCHIMP_APIKEY, 'merge_vars': { 'FNAME': nombre, 'OPTINIP': get_ip(solicitud.META), 'OPTIN_TIME': time.time(), 'PAIS': pais }, 'id': settings.MAILCHIMP_LISTID, 'email_type': 'html' } r = requests.post('http://us4.api.mailchimp.com/1.3/?method=listSubscribe', simplejson.dumps(payload)) return HttpResponse(r.text) return render_to_response('./hola.html')
def post(self): db = SETTINGS['db'] body = json.loads(self.request.body) item = body['image'] ip = utils.get_ip(self.request) # Get item db_item = yield motor.Op(db.images.find_one, {'unixtime': item['unixtime']}) _id = db_item['_id'] # Save like/unlike on item if ip in db_item['likes']['data']: db_item['likes']['data'].remove(ip) db_item['likes']['count'] -= 1 else: db_item['likes']['data'].append(ip) db_item['likes']['count'] += 1 # Update item update_command = {'$set': {'likes': db_item['likes']}} yield motor.Op(db.images.update, {'_id': _id}, update_command) data = {'likes': db_item['likes']} self.finish(data)
def share(filename, forever): """Share a file in the local network.""" ip = utils.get_ip() # port = get_port() # Bind to port 0. OS assigns a random open port. server = httpserver.HTTPServer((ip, 0), utils.LocalFileHandler) port = server.server_port server.filename = filename zc_info = zeroconf.ServiceInfo( "_http._tcp.local.", "%s._http._tcp.local." % filename, utils.ip_to_bytes(ip), port, 0, 0, {'filename': filename} ) url = "http://" + ip + ":" + str(port) + "/" + urllib.pathname2url(filename) zc_instance = zeroconf.Zeroconf() try: zc_instance.register_service(zc_info) click.echo('Sharing %s at %s' % (filename, url)) if forever: server.serve_forever(poll_interval=0.5) else: server.handle_request() click.echo('File downloaded by peer. Exiting') sys.exit(0) except KeyboardInterrupt: pass
def signup(): error = None form = SignUpForm() if form.validate_on_submit(): user = User( name=form.username.data, email=form.email.data, password=form.password.data, ip=get_ip()) try: db.session.add(user) db.session.commit() login_user(user) flash("You just added user <strong>%s</strong>" % user.name, "success") next = request.args.get("next") if not is_safe_url(next): return flask.abort(400) return redirect(next or url_for("index")) except: flash("That username already exists", "danger") return redirect(url_for("signup")) return render_template( "signup.html", error=error, form=form )
def transform(self): user_json = None if type(self.node) == Vmess: self.client_config = self.load_template('client.json') user_json = self.client_config["outbounds"][0]["settings"]["vnext"][0] user_json["users"][0]["id"] = self.node.password user_json["users"][0]["alterId"] = self.node.alter_id elif type(self.node) == Socks: self.client_config = self.load_template('client_socks.json') user_json = self.client_config["outbounds"][0]["settings"]["servers"][0] user_json["users"][0]["user"] = self.node.user_info user_json["users"][0]["pass"] = self.node.password elif type(self.node) == SS: self.client_config = self.load_template('client_ss.json') user_json = self.client_config["outbounds"][0]["settings"]["servers"][0] user_json["method"] = self.node.method user_json["password"] = self.node.password elif type(self.node) == Mtproto: print("\nMTProto协议只支持Telegram通信, 所以无法生成配置文件!\n") exit(-1) user_json["port"] = int(self.group.port) if type(self.node) != SS: self.client_config["outbounds"][0]["streamSettings"] = self.config["inbounds"][group.index]["streamSettings"] if group.tls == 'tls': content = self.config_factory.get_data("domain") user_json["address"] = str(content) self.client_config["outbounds"][0]["streamSettings"]["tlsSettings"] = {} else: user_json["address"] = str(get_ip())
def get_proxies(self): ip = get_ip(3) if len(ip) > 0 and ':' in ip: proxies = {'http': ip, 'https': ip} return proxies return None
def announce_new_transaction(): """ A function to announce to the network once a transaction has been added. Other blocks can simply verify the proof of work and add it to their respective chains. """ data = request.get_json() if not data: return "Invalid data at announce_new_block", 400 request_addr = get_ip(request.remote_addr) offline_node = [] for peer in peers: try: if peer.find(request_addr) != -1: continue url = "http://{}/get_transaction".format(peer) requests.post(url, json=data) except requests.exceptions.ConnectionError: print('Cant connect to node {}. Remove it from peers list'.format( peer)) offline_node.append(peer) for peer in offline_node: peers.remove(peer) return "Success", 201
def calicoctl(self, command): """ Convenience function for abstracting away calling the calicoctl command. Raises a CommandExecError() if the command returns a non-zero return code. :param command: The calicoctl command line parms as a single string. :return: The output from the command with leading and trailing whitespace removed. """ calicoctl = os.environ.get("CALICOCTL", "/code/dist/calicoctl") if ETCD_SCHEME == "https": etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL else: etcd_auth = "%s:2379" % get_ip() # Export the environment, in case the command has multiple parts, e.g. # use of | or ; # # Pass in all etcd params, the values will be empty if not set anyway calicoctl = "export ETCD_AUTHORITY=%s; " \ "export ETCD_SCHEME=%s; " \ "export ETCD_CA_CERT_FILE=%s; " \ "export ETCD_CERT_FILE=%s; " \ "export ETCD_KEY_FILE=%s; %s" % \ (etcd_auth, ETCD_SCHEME, ETCD_CA, ETCD_CERT, ETCD_KEY, calicoctl) return self.execute(calicoctl + " " + command)
def calicoctl(self, command): """ Convenience function for abstracting away calling the calicoctl command. Raises a CommandExecError() if the command returns a non-zero return code. :param command: The calicoctl command line parms as a single string. :return: The output from the command with leading and trailing whitespace removed. """ if os.environ.get("CALICOCTL"): calicoctl = os.environ["CALICOCTL"] else: if self.dind: calicoctl = "/code/dist/calicoctl" else: calicoctl = "dist/calicoctl" etcd_auth = "ETCD_AUTHORITY=%s:2379" % get_ip() # Export the environment, in case the command has multiple parts, e.g. # use of | or ; calicoctl = "export %s; %s" % (etcd_auth, calicoctl) return self.execute(calicoctl + " " + command)
def start_calico_node_with_docker(self): """ Start calico in a container inside a host by calling docker directly. """ if ETCD_SCHEME == "https": etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL ssl_args = "-e ETCD_CA_CERT_FILE=%s " \ "-e ETCD_CERT_FILE=%s " \ "-e ETCD_KEY_FILE=%s " \ "-v %s/certs:%s/certs " \ % (ETCD_CA, ETCD_CERT, ETCD_KEY, CHECKOUT_DIR, CHECKOUT_DIR) else: etcd_auth = "%s:2379" % get_ip() ssl_args = "" # If the hostname has been overridden on this host, then pass it in # as an environment variable. if self.override_hostname: hostname_args = "-e HOSTNAME=%s" % self.override_hostname else: hostname_args = "" self.execute("docker run -d --net=host --privileged " "--name=calico-node " "%s " "-e IP=%s " "-e ETCD_AUTHORITY=%s -e ETCD_SCHEME=%s %s " "-v /var/log/calico:/var/log/calico " "-v /var/run/calico:/var/run/calico " "calico/node:latest" % (hostname_args, self.ip, etcd_auth, ETCD_SCHEME, ssl_args))
def flush_data(self, data, now, compress=False): # Collect data until it's time to send it out self.data.extend(data) if (not self.data) or (now - self.last_send < self.send_delta): return logging.info('Sending data for processing at {}'.format(now)) with NamedTemporaryFile() as f: if compress: self._write_compressed(f) else: f.writelines(self.data) f.flush() fsync(f.fileno()) remote_path = self.api.send_file(DATA_TYPE, f.name, now, suffix=self.log_type) if remote_path is not None: data = { 'path': remote_path, 'log_type': self.log_type, 'utcoffset': utcoffset(), 'ip': get_ip(), } self.api.send_signal(DATA_TYPE, data) self.checkpoint(now)
def r_3login_callback_qq(): args = casts(request.args, code=str, callUrl=str) if not args.code: abort(403, '缺少参数code') code = args.code callUrl = args.callUrl # print('r_3login_callback:', code) user_id = 0 token_info = qq_login.get_access_token(code) # print('r_3login_callback11:', token_info) if not token_info: abort(403, 'no token') openid_info = qq_login.get_openid(token_info.access_token) if not openid_info: abort(403, "no openid") user = qq_login.get_user_info(token_info.access_token, openid_info.openid) # print('r_3login_callback12:', user) ip = get_ip() user_id = api_user.reg_or_log_qq(user, ip, openid_info.openid) if user_id: sess_result = api_user.set_sess(user_id) if sess_result == -1: return redirect('/feedback/?f=user_baned') if callUrl: return redirect(callUrl) else: return redirect('/') else: abort(403, "args err")
def start_calico_node_with_docker(self): """ Start calico in a container inside a host by calling docker directly. """ if ETCD_SCHEME == "https": etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL ssl_args = "-e ETCD_CA_CERT_FILE=%s " \ "-e ETCD_CERT_FILE=%s " \ "-e ETCD_KEY_FILE=%s " \ "-v %s/certs:%s/certs " \ % (ETCD_CA, ETCD_CERT, ETCD_KEY, CHECKOUT_DIR, CHECKOUT_DIR) else: etcd_auth = "%s:2379" % get_ip() ssl_args = "" self.execute("docker run -d --net=host --privileged " "--name=calico-node " "-e IP=%s -e ETCD_AUTHORITY=%s " "-e ETCD_SCHEME=%s %s " "-v /var/log/calico:/var/log/calico " "-v /var/run/calico:/var/run/calico " "calico/node:latest" % (self.ip, etcd_auth, ETCD_SCHEME, ssl_args))
def __init__(self, *args, **kwargs): log_path = kwargs.pop('log_path', AUDIT_LOG_PATH) kwargs.setdefault('poll_seconds', POLL_SECONDS) super(ShareWatcher, self).__init__(*args, **kwargs) self.share_dir = getenv('OBSRVBL_SHARE_DIR', DEFAULT_SHARE_DIR) self.share_file = getenv('OBSRVBL_SHARE_FILE', DEFAULT_SHARE_FILE) self.file_path = join(self.share_dir, self.share_file) # If we're in read only mode, track what's in the file currently. # The Samba audit log will be empty, so don't bother tracking it. if getenv('OBSRVBL_SHARE_READ_ONLY', 'false') == 'true': self.contents = self._read_contents() log_path = '/dev/null' self.source_ip = getenv('OBSRVBL_SHARE_IP') # If we're in read-write mode, dynamically generate a file and monitor # the Samba audit log else: try: remove(self.file_path) except (OSError, IOError): pass data = self._generate_contents() with io.open(self.file_path, 'wb') as outfile: outfile.write(data.encode('ascii')) fchmod(outfile.fileno(), 0o666) self.contents = self._read_contents() self.source_ip = get_ip() self.log_node = SambaAuditLogNode( log_type='samba_audit', api=self.api, log_path=log_path )
def post(self, request): serializer = UploadFileSerializer(request.POST, request.FILES) serializer.is_valid(raise_exception=True) filename, full_filename = serializer.save() update_task(pk="upload", filename=full_filename, sourceip=get_ip(request)) return Response({"resourceName": filename, "status": 0})
def execute(self, *args, **kwargs): result_list = super(SubDomainListDecorator, self).execute(*args, **kwargs) result_dict = {} if result_list is not None: for i in result_list: result_dict[i] = utils.get_ip(i) return result_dict
def process_exception(self, request, exception): """ Middleware displays bug information when the request comes from an superuser or INTERNAL_IPS user. """ # print(request.user.is_superuser or get_ip(request) in settings.INTERNAL_IPS) if request.user.is_superuser or get_ip( request) in settings.INTERNAL_IPS: return technical_500_response(request, *sys.exc_info())
def __init__(self, name, start_calico=True, dind=True, additional_docker_options="", post_docker_commands=[ "docker load -i /code/calico-node.tar", "docker load -i /code/busybox.tar" ]): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False if dind: log_and_run("docker rm -f %s || true" % self.name) log_and_run( "docker run --privileged -tid " "-v %s/docker:/usr/local/bin/docker " "-v %s:/code --name %s " "calico/dind:latest docker daemon --storage-driver=aufs %s" % (CHECKOUT_DIR, CHECKOUT_DIR, self.name, additional_docker_options)) self.ip = log_and_run( "docker inspect --format " "'{{.NetworkSettings.Networks.bridge.IPAddress}}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) for command in post_docker_commands: self.execute(command) else: self.ip = get_ip(v6=False) self.ip6 = get_ip(v6=True) if start_calico: self.start_calico_node()
def upload_file(request): serializer = UploadFileSerializer(request.POST, request.FILES) serializer.is_valid(raise_exception=True) filename, full_filename = serializer.save() status = update_task(pk="upload", filename=full_filename, sourceip=get_ip(request)) data = {"resourceName": filename, "status": int(not status)} return Response(data)
def null(request): #cache_key = "" widget_null = WidgetNull() widget_null.host = request.get_host() widget_null.real_ip = get_ip(request) widget_null.user_agent = request.META.get('HTTP_USER_AGENT', '') widget_null.referer = request.META.get('REFERER', '') widget_null.save() return HttpResponse(u"空请求")
def setUp(self): """ Clean up host containers before every test. """ containers = docker.ps("-qa").split() for container in containers: delete_container(container) self.ip = get_ip() self.start_etcd()
def __init__(self, name, start_calico=True, dind=True, additional_docker_options="", post_docker_commands=["docker load -i /code/calico-node.tar", "docker load -i /code/busybox.tar"]): self.name = name self.dind = dind self.workloads = set() # This variable is used to assert on destruction that this object was # cleaned up. If not used as a context manager, users of this object self._cleaned = False docker_args = "--privileged -tid -v %s/docker:/usr/local/bin/docker " \ "-v %s/certs:%s/certs -v %s:/code --name %s" % \ (CHECKOUT_DIR, CHECKOUT_DIR, CHECKOUT_DIR, CHECKOUT_DIR, self.name) if ETCD_SCHEME == "https": docker_args += " --add-host %s:%s" % (ETCD_HOSTNAME_SSL, get_ip()) if dind: log_and_run("docker rm -f %s || true" % self.name) # Pass the certs directory as a volume since the etcd SSL/TLS # environment variables use the full path on the host. log_and_run("docker run %s " "calico/dind:latest " "docker daemon --storage-driver=aufs %s" % (docker_args, additional_docker_options)) self.ip = log_and_run("docker inspect --format " "'{{.NetworkSettings.Networks.bridge.IPAddress}}' %s" % self.name) # Make sure docker is up docker_ps = partial(self.execute, "docker ps") retry_until_success(docker_ps, ex_class=CalledProcessError, retries=10) for command in post_docker_commands: self.execute(command) else: self.ip = get_ip(v6=False) self.ip6 = get_ip(v6=True) if start_calico: self.start_calico_node()
def get_jenkins_url(settings, docker): containers = docker.containers.list(filters={"name": master.MASTER_IMAGE}) if len(containers) == 0: print("No master container running, cant configure it correctly.") return None master_container = containers[0] return "http://{ip}:8080".format( ip=get_ip(master_container, docker, settings))
def search(self): ip = utils.get_ip(request) data = None try: data = Api(self._query, ip).fetch() except Exception as e: return None if data: return data
def transfer_file(options): utils.log('Transfering code to robot') target_ip = utils.get_ip('10.{0}.{1}.2', options.team_number) ftp = ftplib.FTP(target_ip) binary = utils.get( options.download_target, options.build_target, options.binary_name, r'Debug', options.binary_name + r'.out') command = 'STOR {0}'.format(binary) ftp.storbinary(command, open(binary, 'rb'))
def process_request(self, request): key = '_tracking_banned_ips' ips = cache.get(key) if ips is None: # Compile a list of all banned IP addresses log.info('Updating banned IPs cache') ips = [b.ip_address for b in BannedIP.objects.all()] cache.set(key, ips, 3600) # Check to see if the current user's IP address is in that list if utils.get_ip(request) in ips: raise Http404
def __enter__(self): """ Set up the route reflector clusters when entering context. :return: self. """ # Construct the common environment variables passed in when starting # the route reflector. etcd_auth = "-e ETCD_AUTHORITY=%s:2379" % get_ip() # Create the route reflector hosts, grouped by redundancy. for ii in range(self.num_redundancy_groups): cluster_id = str(IPAddress(0xFF000001 + ii)) redundancy_group = [] for jj in range(self.num_in_redundancy_group): rr = DockerHost('RR.%d.%d' % (ii, jj), start_calico=False) ip = "-e IP=%s" % rr.ip rr.execute( "docker load --input /code/calico_containers/routereflector.tar" ) rr.execute("docker run --privileged --net=host -d " "--name rr %s %s " "calico/routereflector" % (etcd_auth, ip)) # Invoke the suggested curl command to add the RR entry to # etcd. # # See https://github.com/projectcalico/calico-bird/tree/feature-ipinip/build_routereflector # for details. rr.execute( r'curl -L http://%s:2379/v2/keys/calico/bgp/v1/rr_v4/%s ' r'-XPUT -d value="{' r'\"ip\":\"%s\",' r'\"cluster_id\":\"%s\"' r'}"' % (get_ip(), rr.ip, rr.ip, cluster_id)) # Store the redundancy group. redundancy_group.append(rr) self.redundancy_groups.append(redundancy_group) return self
def main(): outputPath = str(sys.argv[1]) print("Your ip is: {}".format(utils.get_ip())) commands = [] Collector_Sending_Ports = [] # Generate needed random free ports finalCollectorPort = str(utils.get_ip()) + ":" + \ str(utils.find_free_port()) # Recieve Collector Ports from frst computer # try: ipPortConnecton = str(utils.SENDER) + ":" + utils.CONNECTION_PORT recieverSocket, recieverContext = utils.configure_port( ipPortConnecton, zmq.PULL, "connect") Collector_Sending_Ports = pickle.loads(recieverSocket.recv()) print("Port has been recieved from the sender's collector") # except: # print("Machine 2 (Reciever) ERROR IN RECIVING CONNECTION DATA," + # "Try Chaning the CONNECTION_PORT in utils.py file") # Generate needed Processes # Generate N Consumers2 for i in range(utils.N): commands.append('python Consumer2.py {} {}'.format( Collector_Sending_Ports[int(math.floor(i/2))], finalCollectorPort)) # Generate Final Collector commands.append('python Final_Collector.py {} {}'.format( outputPath, finalCollectorPort)) # Run in parallel processes = [Popen(cmd, shell=True) for cmd in commands] for p in processes: p.wait() recieverSocket.close() recieverContext.destroy()
def submit_textarea(): """ Endpoint to create a new transaction via our application. """ author = get_ip(request.remote_addr) questionid = request.form["questionid"] question = request.form["question"] answersList = request.form["answer"].split('|') opening_time = int(request.form["opening_time"]) * 60 answers = {} for answer in answersList: answers[answer] = [] post_object = { 'type': 'open', 'content': { 'questionid': questionid, 'question': question, 'answers': answers, 'opening_time': opening_time, 'status': 'opening', 'author': author + ':5000', 'timestamp': time.time() } } # Submit a transaction new_tx_address = "{}/new_transaction".format(CONNECTED_NODE_ADDRESS) requests.post(new_tx_address, json=post_object, headers={'Content-type': 'application/json'}) #call smart contract to count down contract_object = { 'type': 'execute', 'content': { 'contract': 'count_down_opening_time', 'arguments': [opening_time, author, questionid, CONNECTED_NODE_ADDRESS], 'author': author + ':5000' } } requests.post(new_tx_address, json=contract_object, headers={'Content-type': 'application/json'}) return redirect('/')
def post_ip(secret): secret = get_thing(secret) ip = get_ip() while True: try: print("Posting", ip) dweepy.dweet_for(secret, {'master_ip': ip}) break except Exception as e: print(e) print("Reposting!") time.sleep(random.randint(3, 40))
def __enter__(self): """ Set up the route reflector clusters when entering context. :return: self. """ # Construct the common environment variables passed in when starting # the route reflector. etcd_auth = "-e ETCD_AUTHORITY=%s:2379" % get_ip() # Create the route reflector hosts, grouped by redundancy. for ii in range(self.num_redundancy_groups): cluster_id = str(IPAddress(0xFF000001 + ii)) redundancy_group = [] for jj in range(self.num_in_redundancy_group): rr = DockerHost('RR.%d.%d' % (ii, jj), start_calico=False) ip = "-e IP=%s" % rr.ip rr.execute("docker load --input /code/calico_containers/routereflector.tar") rr.execute("docker run --privileged --net=host -d " "--name rr %s %s " "calico/routereflector" % (etcd_auth, ip)) # Invoke the suggested curl command to add the RR entry to # etcd. # # See https://github.com/projectcalico/calico-bird/tree/feature-ipinip/build_routereflector # for details. rr.execute(r'curl -L http://%s:2379/v2/keys/calico/bgp/v1/rr_v4/%s ' r'-XPUT -d value="{' r'\"ip\":\"%s\",' r'\"cluster_id\":\"%s\"' r'}"' % (get_ip(), rr.ip, rr.ip, cluster_id)) # Store the redundancy group. redundancy_group.append(rr) self.redundancy_groups.append(redundancy_group) return self
def run_cni(self, add_or_del, ip=None): adding = (add_or_del == "ADD") workload_pid = self.host.execute( "docker inspect --format '{{.State.Pid}}' %s" % self.name) container_id = self.host.execute( "docker inspect --format '{{.Id}}' %s" % self.name) ip_json = (',"args":{"ip":"%s"}' % ip) if (ip and adding) else '' ip_args = ('CNI_ARGS=IP=%s ' % ip) if (ip and adding) else '' etcd_json = '"etcd_endpoints":"http://%s:2379",' % get_ip() if ETCD_SCHEME == "https": etcd_json = ( '"etcd_endpoints":"https://%s:2379",' % ETCD_HOSTNAME_SSL + '"etcd_ca_cert_file":"%s",' % ETCD_CA + '"etcd_cert_file":"%s",' % ETCD_CERT + '"etcd_key_file":"%s",' % ETCD_KEY) command = ('echo \'{' + '"name":"%s",' % self.network + '"type":"calico-cni-plugin",' + etcd_json + '"ipam":{"type":"calico-ipam-plugin"%s}' % ip_json + '}\' | ' + 'CNI_COMMAND=%s ' % add_or_del + 'CNI_CONTAINERID=%s ' % container_id + 'CNI_NETNS=/proc/%s/ns/net ' % workload_pid + 'CNI_IFNAME=eth0 ' + 'CNI_PATH=/code/dist ') # Optionally add namespace (we want to be able to call CNI without specifying a # namespace to check CNI defaults correctly). if self.namespace: command = command + 'K8S_POD_NAMESPACE=%s ' % self.namespace command = command + ip_args + '/code/dist/calico-cni-plugin' output = self.host.execute(command) if adding: # The CNI plugin writes its logging to stderr and its JSON output - # including the IP address that we need - to stdout, but # unfortunately 'docker exec' combines these into its own stdout, # and that is what 'output' contains here. So we need heuristics # to ignore the logging lines and pick up the JSON. Writing out # the JSON is the last thing that the CNI plugin does, so it should # be robust to ignore everything before a line that begins with a # curly bracket. json_text = "" json_started = False for line in output.split('\n'): if not json_started and line.strip() == "{": json_started = True if json_started: json_text = json_text + line logger.debug("JSON text from Calico CNI = %s", json_text) result = json.loads(json_text) self.ip = result["ip4"]["ip"].split('/')[0]
def run_cni(self, add_or_del, ip=None): adding = (add_or_del == "ADD") workload_pid = self.host.execute( "docker inspect --format '{{.State.Pid}}' %s" % self.name) container_id = self.host.execute( "docker inspect --format '{{.Id}}' %s" % self.name) ip_json = (',"args":{"ip":"%s"}' % ip) if (ip and adding) else '' ip_args = ('CNI_ARGS=IP=%s ' % ip) if (ip and adding) else '' etcd_json = '"etcd_endpoints":"http://%s:2379",' % get_ip() if ETCD_SCHEME == "https": etcd_json = ('"etcd_endpoints":"https://%s:2379",' % ETCD_HOSTNAME_SSL + '"etcd_ca_cert_file":"%s",' % ETCD_CA + '"etcd_cert_file":"%s",' % ETCD_CERT + '"etcd_key_file":"%s",' % ETCD_KEY) command = ('echo \'{' + '"name":"%s",' % self.network + '"type":"calico-cni-plugin",' + etcd_json + '"ipam":{"type":"calico-ipam-plugin"%s}' % ip_json + '}\' | ' + 'CNI_COMMAND=%s ' % add_or_del + 'CNI_CONTAINERID=%s ' % container_id + 'CNI_NETNS=/proc/%s/ns/net ' % workload_pid + 'CNI_IFNAME=eth0 ' + 'CNI_PATH=/code/dist ' + ip_args + '/code/dist/calico-cni-plugin') output = self.host.execute(command) if adding: # The CNI plugin writes its logging to stderr and its JSON output - # including the IP address that we need - to stdout, but # unfortunately 'docker exec' combines these into its own stdout, # and that is what 'output' contains here. So we need heuristics # to ignore the logging lines and pick up the JSON. Writing out # the JSON is the last thing that the CNI plugin does, so it should # be robust to ignore everything before a line that begins with a # curly bracket. json_text = "" json_started = False for line in output.split('\n'): if not json_started and line.strip() == "{": json_started = True if json_started: json_text = json_text + line logger.debug("JSON text from Calico CNI = %s", json_text) result = json.loads(json_text) self.ip = result["ip4"]["ip"].split('/')[0]
def execute(self, command, use_powerstrip=False, **kwargs): """ Pass a command into a host container. Appends some environment variables and then calls out to DockerHost._listen. This uses stdin via 'bash -s' which is more forgiving of bash syntax than 'bash -c'. :param use_powerstrip: When true this sets the DOCKER_HOST env var. This routes through Powerstrip, so that Calico can be informed of the changes. """ etcd_auth = "export ETCD_AUTHORITY=%s:2379;" % get_ip() stdin = ' '.join([etcd_auth, command]) if use_powerstrip: docker_host = "export DOCKER_HOST=localhost:2377;" stdin = ' '.join([docker_host, stdin]) return self._listen(stdin, **kwargs)
def finish_callback(self, result, error): """Finish data retrieving""" if error: raise error elif result: data = { 'images': result, 'client_ip': utils.get_ip(self.request), 'sort_by': self.sort_by, 'sort_criterias': self.SORT_CRITERIAS } self.finish(data) else: # No data, trying to import existing files into database utils.import_files_to_mongo()
def post(self): body = json.loads(self.request.body) ext = body['image'].split('.')[-1] tmp_image_path = os.path.join(SETTINGS['upload_tmp'], body['image']) image_name = '{0}.{1}'.format(time.time(), ext) image_path = os.path.join(SETTINGS['saved_files'], image_name) small_image_path = os.path.join( SETTINGS['saved_files'], 's' + image_name ) os.rename(tmp_image_path, image_path) img = image.Image(filename=image_path) width, height = img.size() # Create cropped and resized thumbnail img.image.crop(width/2-120, height/2-120, width=width/3, height=width/3) img.resize(120) img.save(small_image_path) new_image = { 'src': image_name, 'date': utils.from_unix(image_name[:-4]), 'unixtime': float(image_name[:-4]), 'ip': utils.get_ip(self.request), 'likes': database.images['likes'] } # Inserting item into db self.insert(new_image.copy()) data = {'new_image': new_image} # Send new image data to websocket clients UpdatesHandler.send_updates(data) self.finish(data)
def video(solicitud, video_slug): # video por slug (nombre) video = get_object_or_404(Video, slug=video_slug) # si son datos del formulario de comentarios if solicitud.method == 'POST': form = VideoComentarioForm(solicitud.POST) # validar los datos if(form.is_valid()): ip = get_ip(solicitud.META) if not VideoComentarioSpamIP.objects.filter(ip=ip).exists(): # asignar el video comentario = form.save(commit=False) comentario.ip = ip comentario.video = video # detectar spam api = Akismet(key=settings.AKISMET_API_KEY, blog_url=settings.AKISMET_URL, agent=settings.AKISMET_AGENT) if api.verify_key(): # por si el usuario esta detras de un proxy if not api.comment_check(comment=comentario.content.encode('utf-8'), data={ 'user_ip': ip, 'user_agent': solicitud.META['HTTP_USER_AGENT'] }): # guardar el video comentario.save() else: form = VideoComentarioForm() comentarios = VideoComentario.objects.filter(video_id=video.id, activado=True).\ order_by('-fecha', '-id') return render_to_response('website/video.html', { 'video': video, # datos del video particular 'form': form, # formulario de comentarios 'comentarios': comentarios # comentarios al video })
def calicoctl(self, command, version=None): """ Convenience function for abstracting away calling the calicoctl command. Raises a CommandExecError() if the command returns a non-zero return code. :param command: The calicoctl command line parms as a single string. :param version: The calicoctl version to use (this is appended to the executable name. It is assumed the Makefile will ensure the required versions are downloaded. :return: The output from the command with leading and trailing whitespace removed. """ if not version: calicoctl = os.environ.get("CALICOCTL", "/code/dist/calicoctl") else: calicoctl = "/code/dist/calicoctl-" + version if ETCD_SCHEME == "https": etcd_auth = "%s:2379" % ETCD_HOSTNAME_SSL else: etcd_auth = "%s:2379" % get_ip() # Export the environment, in case the command has multiple parts, e.g. # use of | or ; # # Pass in all etcd params, the values will be empty if not set anyway calicoctl = "export ETCD_AUTHORITY=%s; " \ "export ETCD_SCHEME=%s; " \ "export ETCD_CA_CERT_FILE=%s; " \ "export ETCD_CERT_FILE=%s; " \ "export ETCD_KEY_FILE=%s; %s" % \ (etcd_auth, ETCD_SCHEME, ETCD_CA, ETCD_CERT, ETCD_KEY, calicoctl) # If the hostname is being overriden, then export the HOSTNAME # environment. if self.override_hostname: calicoctl = "export HOSTNAME=%s; %s" % ( self.override_hostname, calicoctl) return self.execute(calicoctl + " " + command)
def qdou3_video(request): title = request.REQUEST.get("title", "") if not title: return HttpResponse(u"资源文件不存在,请联系管理员") from django.db import connections cursor = connections["p2ps"].cursor() sql = "select path from p2p_video where title='%s'" % title cursor.execute(sql) row = cursor.fetchone() video_path = "" if row and row[0]: video_path = row[0] cursor.close() if not video_path: return HttpResponse(u"资源文件不存在,请联系管理员") #return HttpResponseRedirect(DOU_RES_HOST + video_path + '/vod_player.swf') from utils import get_ip ip_address = get_ip(request) stream_host = "800li.3qdou.com" if is_lan(ip_address): stream_host = "192.168.0.252" return render(request, "3qdou_video.html", {"stream_host":stream_host, "video_path":video_path})
def _upload(self, now, compress=False): ''' Upload log files. Hopefully just one, but maybe the last one failed so we need to pick it up too... ''' pattern = os.path.join( self.log_dir, '{}.*.archived'.format(SURICATA_LOGNAME) ) for file_path in glob.iglob(pattern): if compress: file_path = _compress_log(file_path) path = self.api.send_file(DATA_TYPE, file_path, now, suffix=self.log_type) data = { 'path': path, 'log_type': self.log_type, 'utcoffset': utcoffset(), 'ip': get_ip(), } self.api.send_signal(DATA_TYPE, data) os.remove(file_path)