Ejemplo n.º 1
0
def read_config(args):
    if not os.path.isfile("config.json"):
        print "config.json does not exist! Please copy config-sample.json to config.json and edit to your liking, then run the script."
        sys.exit(1)

    countries = args.country
    if isinstance(countries, basestring):
        countries = [countries]
    countries = [country.lower().strip() for country in countries]

    for country in countries:
        if not os.path.isfile("proxies/proxies-%s.json" % country):
            print "The proxy configuration file proxies-%s.json does not exist! Exiting." % country
            sys.exit(1)
    content = util.get_contents("config.json")
    config = util.json_decode(content)
    if args.ip:
        config["public_ip"] = args.ip
    if args.bind_ip:
        config["bind_ip"] = args.ip
    if args.base_ip:
        config["base_ip"] = args.base_ip
    if args.base_port:
        config["base_port"] = args.base_port

    if not config["public_ip"]:
        try:
            print("Autodetecting public IP address...")
            public_ip = urllib2.urlopen("http://l2.io/ip").read().strip()
            print("Detected public IP as %s. If it's wrong, please cancel the script now and set it in config.json or specify with --ip" % public_ip)
            time.sleep(1)
            config["public_ip"] = public_ip
        except:
            print("Could not detect public IP. Please update the public_ip setting in config.json or specify with --ip.")
            sys.exit(1)

    if args.save:
        util.put_contents('config.json', util.json_encode(config))

    groups = {}
    for country in countries:
        groups.update(util.json_decode(util.get_contents("proxies/proxies-%s.json" % country)))

    if args.only:
        only = set(args.only)
        for item in args.only:
            if item not in groups:
                print "Nonexistent Item: %s, exiting" % item
                sys.exit()
        for item in groups.keys():
            if item not in only:
                del groups[item]
    elif args.skip:
        for item in args.skip:
            del groups[item]

    config["groups"] = groups

    return config
Ejemplo n.º 2
0
def synchronize():
    """ driver to perform syncrhonize """
    global machine_path, errors_count
                                                 
    try:  
        # the sync target a given machine will be target
        machine_path = get_config()["target"]
        
        # if there is no connection to the target then exit
        if not fs_test( machine_path, verbose, get_config ):
            return 0
    
        # get the remote processed files so we can check for deletes
        if os.path.exists(remote_processed_files_name):
            for line in open(remote_processed_files_name):
                remote_processed_files[line.strip()] = True
            
        # start the logger thread
        start_logger()
    
        # fire up the worker threads
        start_workers()
    
        # loop over the paths provided and add them to the work queue
        for d in get_config()["dirs"]:
            sync_directory( d )
    
        # wait for queue to empty
        wait_for_workers()
        
        # drop all our sync markers after any copies complete
        for sync_marker_path,sync_marker_node in pending_markers:
            put_contents(sync_marker_path,sync_marker_node, "syncrhonized %s"%time.ctime(),dryrun,get_config,verbose)
            
        # write out the processed files
        if not dryrun:
            processed_out = open(remote_processed_files_name,"w")
            for fpath in processed_files.iterkeys():
                print >>processed_out,fpath
            processed_out.close()

        # wait for the logger to finish
        wait_for_logger()
        
    finally:
        stop_workers()
        stop_logger()
    
    if errors_count:
        return 1
    else:
        return 0
Ejemplo n.º 3
0
def backup():
    """ driver to perform backup """
    global machine_path, start_time, end_time, backup_path, remote_log_name, local_log_name, errors_count, backedup_files
                                                 
    try:  
        # check for any aborted backups and send an e-mail about them
        check_interrupted()
        
        # the backups for a given machine will be in s3://bucket/bkp/machine_name
        machine_path = bkp_conf.get_config()["bucket"]+"/bkp/"+platform.node()
        
        # get the backed up files for this machine
        backedup_files = get_backedup_files(machine_path)
    
        # the start time for the next backup is in the "next" file in the root for that machine
        # if it is empty or doesn't exist then we start from the beginning of time
        # first thing we do is write the current time to the "next" file for the next backup
        # even if two backups are running concurrently they shouldn't interfere since the files shouldn't overlap
        next = get_contents( machine_path, "next", verbose)
        if next:
            start_time = float(next)
        else:
            start_time = 0.0
        end_time = time.time()
        put_contents( machine_path, "next", end_time, dryrun, bkp_conf.get_config, verbose )
        end_time_t = time.localtime(end_time)
        bkp_conf.get_config()["start_time"] = start_time
        bkp_conf.get_config()["end_time"] = end_time
    
        # the backup root path is  s3://bucket/bkp/machine_name/datetime
        timestamp = "%04d.%02d.%02d.%02d.%02d.%02d"%(end_time_t.tm_year, end_time_t.tm_mon, end_time_t.tm_mday, end_time_t.tm_hour, end_time_t.tm_min, end_time_t.tm_sec)
        backup_path = machine_path + "/" + timestamp
    
        # we log locally and snapshot the log to a remote version in the backup
        # directory
        remote_log_name = backup_path + "/bkp/bkp."+ timestamp + ".log"
        local_log_name = os.path.expanduser("~/.bkp/bkp."+timestamp+".log")
    
        # write config and restart info to the start of the local log
        bkp_conf.save_config(open(local_log_name,"a+"),True)
    
        # start the logger thread
        start_logger( perform_logging )
    
        # fire up the worker threads
        start_workers()
    
        # loop over the paths provided and add them to the work queue
        for d in bkp_conf.get_config()["dirs"]:
            backup_directory( d )
    
        # wait for queue to empty
        wait_for_workers()
    
        # wait for the logger to finish
        wait_for_logger()
        
        # snapshot the log
        if not dryrun:
            fs_mod.fs_put(local_log_name,remote_log_name, verbose=verbose)
    finally:
        stop_workers()
        stop_logger()
    
    # send the log to the logging e-mail
    if errors_count:
        mail_error( None, open(local_log_name,"r"), verbose )
        os.remove(local_log_name)
        return 1
    else:
        mail_log( None, open(local_log_name,"r"), False, verbose )
        os.remove(local_log_name)
        return 0
Ejemplo n.º 4
0
def main(args):
    config = read_config(args)

    print ""

    # Empty the output directory
    shutil.rmtree(args.output_dir, ignore_errors=True)
    # Create the output dir if it doesn't exist
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    # Choose from the available modes
    if args.mode == "sni":
        files = ["haproxy", "dnsmasq", "hosts"]
        dnat = False
    elif args.mode == "dnat":
        files = ["haproxy", "dnsmasq", "hosts", "iptables", "iproute2"]
        dnat = True
    elif args.mode == "local":
        files = ["haproxy", "hosts", "rinetd", "netsh"]
        dnat = True
    else:
        files = args.output
        dnat = args.dnat
        # Work around an argparse bug that appends to the default list rather
        # than replace it.
        if len(files) > 1:
            files = files[1:]

    # Set dnat specific options, make sure required configuration is present
    if dnat:
        print "Please be aware that this is an advanced option. For most cases, pure-sni will be enough."
        if not config["base_ip"]:
            print "Missing base_ip! Update config.json and re-run the script."
            sys.exit(1)
        if not config["base_port"]:
            print "Missing base_port! Update config.json and re-run the script."
            sys.exit(1)
        dnat = True
        print_ips(config)

    for output in set(files):
        if output == "haproxy":
            print_firewall(config, dnat=dnat)
            if config["stats"]["enabled"] and not config["stats"]["password"]:
                print ""
                print "Missing haproxy stats password! Autogenerating one..."
                config["stats"]["password"] = ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(10))
                print("HAProxy stats password is %s, please make a note of it." % config["stats"]["password"])
            print ""
            haproxy_content = generators.generate_haproxy(config, dnat=dnat, test=args.test)
            util.put_contents(args.haproxy_filename, haproxy_content, base_dir=args.output_dir)
            print 'File generated: ' + args.haproxy_filename
        elif output == "dnsmasq":
            print ""
            print '***********************************************************************************************'
            print 'Caution: It\'s possible to run a (recursive) DNS forwarder on your remote server ' + config["public_ip"] + '.'
            print 'If you leave the DNS port wide open to everyone, your server will most likely get terminated'
            print 'sooner or later because of abuse (DDoS amplification attacks).'
            print '***********************************************************************************************'
            print ""

            dnsmasq_content = generators.generate_dnsmasq(config, dnat=dnat, test=args.test)
            util.put_contents(args.dnsmasq_filename, dnsmasq_content, base_dir=args.output_dir)
            print 'File generated: ' + args.dnsmasq_filename
        elif output == "hosts":
            hosts_content = generators.generate_hosts(config, dnat=dnat, test=args.test)
            util.put_contents(args.hosts_filename, hosts_content, base_dir=args.output_dir)
            print 'File generated: ' + args.hosts_filename
        elif not dnat:
            print "Output %s cannot be generated" % output
            continue
        elif output == "iptables":
            iptables_content = generators.generate_iptables(config)
            util.put_contents(args.iptables_filename, iptables_content, base_dir=args.output_dir)
            print 'File generated: ' + args.iptables_filename
        elif output == "iproute2":
            if not config.get("local_subnet", False) or not config.get("local_device", False):
                print 'Output iproute2 cannot be generated: Missing local_subnet and/or local_device in config.json'
            else:
                iproute2_content = generators.generate_iproute2(config)
                util.put_contents(args.iproute2_filename, iproute2_content, base_dir=args.output_dir)
                print 'File generated: ' + args.iproute2_filename
        elif output == "netsh":
            netsh_content = generators.generate_netsh(config)
            util.put_contents(args.netsh_filename, netsh_content, base_dir=args.output_dir)
            print 'File generated: ' + args.netsh_filename
        elif output == "rinetd":
            rinetd_content = generators.generate_rinetd(config)
            util.put_contents(args.rinetd_filename, rinetd_content, base_dir=args.output_dir)
            print 'File generated: ' + args.rinetd_filename
        else:
            print "Output %s cannot be generated" % output