Beispiel #1
0
    def run(self):
        self._logger.debug('Starting dnotify reading thread')
        (infd, outfd) = os.pipe()
        pid = os.fork()
        if pid == 0:
            os.close(infd)
            misc.dup2(outfd, 1)
            args = ['dnotify', '-m', '-c', '-d', '-a', '-r'] + list(self._dirs) + ['-e', 'printf', '"{}\\0"']
            os.execv('/usr/bin/dnotify', args)
            os.exit(1)

        os.close(outfd)
        stdout = os.fdopen(infd)
        c = 'x'
        while c != '':
            curline = ''
            c = stdout.read(1)
            while c != '' and c != '\0':
                curline += c
                c = stdout.read(1)
            if c == '':
                break
            self._logger.debug('Directory "%s" changed' % (curline,))
            self._queue.put(curline)
        (pid, status) = os.waitpid(pid, 0)
        if status is None:
            ecode = 0
        else:
            ecode = os.WEXITSTATUS(status)
        raise DnotifyException("dnotify exited with code %s" % (ecode,))
Beispiel #2
0
def main():
    logger = get_logger()
    if not PASSWORD:
        logger.error(u'Please write the password in the settings.py')
        sys.exit(2)
    if not DEBUG:
        try:
            with open(PIDPATH, 'r') as f: os.kill(int(f.read()), 9)
        except: pass
        try:
            pid = os.fork()
            if pid > 0: sys.exit(0)
        except OSError, e:
            logger.error("Fork #1 failed: %d (%s)", e.errno, e.strerror)
            sys.exit(1)
        os.setsid()
        os.umask(0)
        try:
            pid = os.fork()
            if pid > 0:
                logger.info("Daemon PID %d" , pid)
                with open(PIDPATH, 'w') as f: f.write(str(pid))
                sys.exit(0)
        except OSError, e:
            logger.error("Daemon started failed: %d (%s)", e.errno, e.strerror)
            os.exit(1)
Beispiel #3
0
def exec_cmd(cmd, role, taskid, pass_env):
    if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt':
        cmd[0] = './' + cmd[0]
    cmd = ' '.join(cmd)
    env = os.environ.copy()
    for k, v in pass_env.items():
        env[k] = str(v)

    env['DMLC_TASK_ID'] = str(taskid)
    env['DMLC_ROLE'] = role

    ntrial = 0
    while True:
        if os.name == 'nt':
            env['DMLC_NUM_ATTEMPT'] = str(ntrial)
            ret = subprocess.call(cmd, shell=True, env = env)
            if ret == 254:
                ntrial += 1
                continue
        else:
            bash = keepalive % (cmd)
            ret = subprocess.call(bash, shell=True, executable='bash', env = env)
        if ret == 0:
            logging.debug('Thread %d exit with 0')
            return
        else:
            if os.name == 'nt':
                os.exit(-1)
            else:
                raise Exception('Get nonzero return code=%d' % ret)
def acc_to_description_generator(filename1, descriptions,
                                 outfile):
    """opens up a fasta file and makes a tab separeted
    databse of gi to description for use with the diamond
    to tax info program."""
    f_out = open(outfile, "w")
    nr_fasta = open(filename1, "r")
    for line in nr_fasta:
        if line.startswith("#"):
            continue  # comment line
        if not line.strip():
            continue  # if the last line is blank
        if line.startswith(">"):
            line = line.rstrip("\n")
            acc_number = get_accession_number(line)
            annot = get_annotation(line, descriptions)
            try:
                data_formatted = "%s\t%s\n" %(acc_number,
                                              annot)
            except ValueError:
                print ("something failed in getting the descriptions")
                os.exit()
            # print (data_formatted)
            f_out.write(data_formatted)
    nr_fasta.close()
    f_out.close()
Beispiel #5
0
def main(argv):
    if not os.environ.get('NANOCUBE_SRC'):
        print("You must set the NANOCUBE_SRC environment variable before " +
                "running this script.")
        os.exit(1)

    # If the NANOCUBE_BIN environment variable is not set, set it to
    # $NANOCUBE_SRC/bin.
    if not os.environ.get('NANOCUBE_BIN'):
        nanobin = os.environ.get('NANOCUBE_SRC') + "/bin"
        print("Setting NANOCUBE_BIN to " + nanobin)
        os.environ['NANOCUBE_BIN'] = nanobin

    (numclusters, filename, sep, timecol, latcol, loncol, catcol, master_port) = getArgs()
    splitcsv(filename, numclusters, sep, timecol)
    executor = concurrent.futures.ThreadPoolExecutor(
        multiprocessing.cpu_count())
    futures = [
        executor.submit(
            hostdmp,
            filename,
            sep,
            timecol,
            latcol,
            loncol,
            catcol,
            i) for i in range(numclusters)]
    concurrent.futures.wait(futures)
    dist_call = "go run cmd/cli/distnano.go -p " + str(master_port) + " "
    for port in ports:
        dist_call += "-a http://localhost:" + str(port) + " "
    # now run distnano.go using the ports as arguments
    os.system(dist_call)
Beispiel #6
0
 def run(self):
   while 1:
      if host_list.empty() == True:
          break
          os.exit()
      time.sleep(1)
      host = str(host_list.get())
      msg("Pentest "+host+" rsync") 
      if ping(host, 873):
       msg("Rsync server running", 1)
       rsync_client = "bin\\rsync.exe  ";  # rsync client path
       pipe = os.popen(rsync_client + host + "::")
       msg_text = pipe.read()
       pipe.close()
       msg_arr=[]
       if msg_text :
         msg_arr = msg_text.split('\n')
       if len(msg_arr) > 0:
          msg("%d modules Found" % len(msg_arr), 1)
          for module in msg_arr :
            if module :
             msg("Test %s::%s" % (host,module));
             module = module.strip()
             p = Popen(rsync_client + host + "::" + module, stdin=PIPE, stdout=PIPE)  
             req = p.stdout.readline()
             if req and len(req and "@ERROR") :
               msg("Anonymous rsync module:" + module + " found !!!", 1)
             else :
               print req
            else :
              continue
       else :
         msg("No modules Found", 2)
def main():
    """
    Main stuff
    """
    # Catch ^C
    signal.signal(signal.SIGINT, abort)

    # Create our API instance
    api = Api()

    # Get e-mail address
    if 'GMUSIC_ADDRESS' in os.environ:
        email = os.environ['GMUSIC_ADDRESS']
        print "Detected Google Music Address: %s" % email
    else:
        email = ''
        while not validate_email(email):
            email = raw_input('Google Music E-mail Address: ')

    # Read password from user
    password = getpass.getpass()
    login(api, email, password)

    # Setup output directory
    if not os.path.exists(BASEDIR):
        try:
            os.mkdir(BASEDIR)
        except Exception, e:
            print "Unable to create directory %s." % BASEDIR
            os.exit(1)
Beispiel #8
0
    def _test(self, deltas):

        elapsed = time.time() - mydd.startTime
        if (elapsed > mydd.timeleft):
            os.exit(1)
            return self.PASS
        
        # Build input
        input = ""
        for (index, delta) in deltas:
            input = input + delta

        tname = str(os.getpid()) + ".test"
        # Write input to `input.c'
        out = open(tname, 'w')
        out.write(input)
        out.close()

        #print self.coerce(deltas)

        result = R.failSame(tname, self.originalOutput, operator.eq, self.js, self.timeout, self.verbose)
        os.remove(tname)

        if result:
            return self.FAIL
        else:
            return self.PASS
def startHelper(fromPort, toPort, btControl):
	pingHandler = PingHandler(btControl)
	path = os.path.abspath(os.path.join(os.environ['RESOURCEPATH'], ITORRENT_HELPER))
	for _ in range(2):
		pipe = popen2.Popen3('%s %d %d %d' % (path, fromPort, toPort, pingHandler.address[1]), False, 0)
		text = pipe.fromchild.read(5)
		if text == 'ready':
			pingHandler.start()
			return
		exitCode = os.WEXITSTATUS(pipe.wait())
		# If iTorrent-helper returns with EADDRINUSE, it may be because an old copy of
		# iTorrent-helper is still hanging around. Establishing a network connection to
		# it should be enough to kill it.
		if exitCode != errno.EADDRINUSE:
			break
		try:
			s = socket.socket()
			s.connect(('localhost', fromPort))
			s.close()
		except:
			pass
		# end try
	message = '%s: Port %d cannot be used.' % (os.strerror(exitCode), fromPort)
	if exitCode in [errno.EADDRINUSE, errno.EADDRNOTAVAIL, errno.EACCES]:
		message += '\n(Is another copy of iTorrent already running?)\n' \
			'Use --control_port option to select a different port.'
	# TODO use display here
	print message
	os.exit(1)
Beispiel #10
0
def doHashDirUpgrade():
	print "Upgrading your XDB structure to use hashed directories for speed...",

	# Do avatars...
	pre = os.path.join(os.path.abspath(config.spooldir), config.jid) + X + "avatars" + X
	if os.path.exists(pre):
		for file in os.listdir(pre):
			if os.path.isfile(pre + file):
				pre2 = pre + file[0:3] + X
				if not os.path.exists(pre2):
					os.makedirs(pre2)
				shutil.move(pre + file, pre2 + file)
	
	# Do spool files...
	pre = os.path.join(os.path.abspath(config.spooldir), config.jid) + X
	if os.path.exists(pre):
		for file in os.listdir(pre):
			if os.path.isfile(pre + file) and file.find(".xml"):
				hash = file[0:2]
				pre2 = pre + hash + X
				if not os.path.exists(pre2):
					os.makedirs(pre2)

				if os.path.exists(pre2 + file):
					print "Need to move", file, "to", pre2 + file, "but the latter exists!\nAborting!"
					os.exit(1)
				else:
					shutil.move(pre + file, pre2 + file)

	print "done"
Beispiel #11
0
def RunPager(globalConfig):
  global active

  if not os.isatty(0) or not os.isatty(1):
    return
  pager = _SelectPager(globalConfig)
  if pager == '' or pager == 'cat':
    return

  # This process turns into the pager; a child it forks will
  # do the real processing and output back to the pager. This
  # is necessary to keep the pager in control of the tty.
  #
  try:
    r, w = os.pipe()
    pid = os.fork()
    if not pid:
      os.dup2(w, 1)
      os.dup2(w, 2)
      os.close(r)
      os.close(w)
      active = True
      return

    os.dup2(r, 0)
    os.close(r)
    os.close(w)

    _BecomePager(pager)
  except Exception:
    print >>sys.stderr, "fatal: cannot start pager '%s'" % pager
    os.exit(255)
Beispiel #12
0
def start():
	global dev
	global arduino

	if use_arduino:
		# if device is not specified, pick the first one in the list
		if dev == '':
			devlist = subprocess.check_output(['python', '-m', 'serial.tools.list_ports'])
			dev = devlist.split()[0]

		# check or guess if Arduino is connected
		has_arduino = False
		for initial in DEV_INITIALS:
			if dev.startswith(initial):
				has_arduino = True
		
		# didn't find Arduino, so exit the program
		if not has_arduino:
			log.fail('Didn\'t find an Arduino port.')
			os.exit(1)

		log.msg('Connecting to Arduino at %s' % dev)
		arduino = serial.Serial(dev, 115200, timeout=1)
		arduino.read()
		log.ok('Arduino is connected')
Beispiel #13
0
 def handle(self):
     print 'worker <{0}> 连接了过来:'.format(self.client_address[0])
     self.task_num = 0
     self.data = self.request.recv(1).strip()
     if self.data == '1':
         if all_task:
             task = all_task.pop()
             data_length = len(json.dumps(task))
             data_length = (5 - len(str(data_length))) * '0' + str(data_length)
             self.request.sendall(data_length)
             self.request.sendall(json.dumps(task))
             print '派发了一份任务给 worker <{0}>'.format(self.client_address[0])
         else:
             self.request.sendall('99999')
     elif self.data == '2':
         data = self.request.recv(8).strip()
         if data == 'start':
             print 'worker <{0}> 开始工作了'.format(self.client_address[0])
         elif data == 'finish':
             print 'worker <{0}> 工作完成了'.format(self.client_address[0])
     elif self.data == '3':
         self.task_num += 1
         data_length = self.request.recv(5).strip()
         data = json.loads(self.request.recv(int(data_length)).strip())
         print '从 worker <{0}> 得到了一份gif链接列表,开始下载...'.format(self.client_address[0])
         multi_download_fig(gif_dir,data)
         self.task_num -= 1
     else:
         if self.task_num == 0 and all_task == []:
             print '任务全部完成了'
             os.exit(0)
def getInjByTime(time,injections):
    import itertools
    injections = itertools.ifilter(lambda a: abs(float(a.get_end()) - time) < 0.1, injections)
    if len(injections)!=1:
        print 'ERROR: Found more than one injection with end time %f\n Please specify input files in order and right number'%(time)
        os.exit(1)
    return injections.next()
Beispiel #15
0
def cron_stop(arg):
	try:
		stop_flag=0
		cron_save()
		rfile=open("cronsave.txt","r")
		while 1:
			line=rfile.readline()
			if line:
	                	index=line.find(str(arg.stop[0]))
				if index!=-1:
	                	        command='''crontab -l |sed "/^[^#].*%s/s/^/#/"|crontab''' % str(arg.stop[0])#sed+正则表达式将指定的任务修改--非#开头的该任务加上#号
	                	        os.system(command)
					stop_flag=stop_flag+1
			else:
				break
		if stop_flag==0:
			print("         crontab not has this job,please make sure jobname right")
		else:
			print("         stop %s successful" % str(arg.stop[0]))
		rfile.close()
	        cron_delt()
	except:
		print("		something error")
                rfile.close()
                cron_delt()
		os.exit()
Beispiel #16
0
def cron_start(arg):
	try:
		start_flag=0
		cron_save()
		rfile=open("cronsave.txt")
		while 1:
			line=rfile.readline()
			if line:
				index=line.find(str(arg.start[0]))#本行包含任务名
				if index!=-1:
					command='''crontab -l |sed "/^#.*%s/s/^#//"|crontab''' % str(arg.start[0])#sed+正则表达式将指定的任务修改--以#开头的该任务去掉#号
					os.system(command)
					start_flag=start_flag+1 #该任务名可能有多个任务
			else:
				break
		if start_flag==0:
			print("         crontab not has this job,please make sure jobname right")
		else:
			print("         start %s successful" % str(arg.start[0]))
		rfile.close()
		cron_delt()
	except:
		print("		something error")
		rfile.close()
		cron_delt()
		os.exit()
Beispiel #17
0
def __import_coregen_files():
	xilinx_dir =  __os.getenv("XILINX");
	if xilinx_dir == None:
		print("[genrams] FATAL ERROR: XILINX environment variable not set. It must provide the path to ISE_DS directory in ISE installation folder (follow Xilinx instructions).")
		__os.exit(-1)



	coregen_path = xilinx_dir + "/ISE/coregen/ip/xilinx/primary/com/xilinx/ip/"
	if not __os.path.isdir(coregen_path):
		print("[genrams]: FATAL ERROR: XILINX environment variable seems to be set incorrectly. It must point to ISE_DS directory in the ISE installation folder. For example: XILINX=/opt/Xilinx/ISE_DS")
		__os.exit(-1)

	work_dir = __manifest + "/coregen_ip";


	if __os.path.isdir(work_dir):
		return

	print("[genrams] creating workdir " + work_dir)
	__os.mkdir(work_dir);

	print("[genrams] copying ISE files...")			
	__import_coregen_module(coregen_path, "blk_mem_gen_v4_1", work_dir);
	__import_coregen_module(coregen_path, "fifo_generator_v6_1", work_dir);
 def verify(self, filename, sigfilename=None):
     (stdin, stdout) = os.pipe()
     pid = os.fork()
     if pid == 0:
         os.close(stdin)
         misc.dup2(stdout, 1)
         misc.dup2(stdout, 2)
         args = []
         for keyring in self._keyrings:
             args.append('--keyring')
             args.append(keyring)
         if sigfilename:
             args.append(sigfilename)
         args = [self._gpgv] + args + [filename]
         os.execv(self._gpgv, args)
         os.exit(1)
     os.close(stdout)
     output = os.fdopen(stdin).readlines()
     (pid, status) = os.waitpid(pid, 0)
     if not (status is None or (os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0)):
         if os.WIFEXITED(status):
             msg = "gpgv exited with error code %d" % (os.WEXITSTATUS(status),)
         elif os.WIFSTOPPED(status):
             msg = "gpgv stopped unexpectedly with signal %d" % (os.WSTOPSIG(status),)
         elif os.WIFSIGNALED(status):
             msg = "gpgv died with signal %d" % (os.WTERMSIG(status),)
         raise GPGSigVerificationFailure(msg, output)
     return output
Beispiel #19
0
def add_update_network_policy(policy):
    """
    Takes a new network policy from the Kubernetes API and
    creates the corresponding Calico policy configuration.
    """
    # Determine the name for this policy.
    name = "%s.%s" % (policy["metadata"]["namespace"],
                      policy["metadata"]["name"])
    _log.debug("Adding new network policy: %s", name)

    try:
        parser = PolicyParser(policy)
        selector = parser.calculate_pod_selector()
        inbound_rules = parser.calculate_inbound_rules()
    except Exception:
        # If the Policy is malformed, log the error and kill the controller.
        # Kubernetes will restart us.
        _log.exception("Error parsing policy: %s",
                       json.dumps(policy, indent=2))
        os.exit(1)
    else:
        rules = Rules(id=name,
                      inbound_rules=inbound_rules,
                      outbound_rules=[Rule(action="allow")])

        # Create the network policy using the calculated selector and rules.
        client.create_policy(NET_POL_TIER_NAME,
                             name,
                             selector,
                             order=NET_POL_ORDER,
                             rules=rules)
        _log.debug("Updated policy '%s' for NetworkPolicy", name)
Beispiel #20
0
def run():
    if os.path.exists('simkai.ttf'):
        font = pygame.font.Font('simkai.ttf',35)
    else:
        font = pygame.font.SysFont("宋体", 35)
    game_info = font.render(u'贪吃蛇',True,(0,255,255))
    screen.blit(game_info,(260,40))
    game_info = font.render(u'游戏说明:',True,(255,255,255))
    screen.blit(game_info,(100,100))
    game_info = font.render(u'1、使用方向进行操作。',True,(255,255,255))
    screen.blit(game_info,(130,150))
    game_info = font.render(u'2、按ESC键退出。',True,(255,255,255))
    screen.blit(game_info,(130,200))
    game_info = font.render(u'3、按P键暂停/继续。',True,(255,255,255))
    screen.blit(game_info,(130,250))
    game_info = font.render(u'>>按回车键开始游戏<<',True,(255,255,255))
    screen.blit(game_info,(260,330))
    pygame.display.update()
    while True:
        event = pygame.event.wait()
        if event.type == pygame.KEYDOWN:
            if event.key == pygame.K_ESCAPE:    #ESC键退出
                exit(0)
            if event.key == pygame.K_RETURN:    #回车键开始
                break
Beispiel #21
0
def decipher(ciphermsg, keytext, mode):
    hexIv=ciphermsg[:32]
    iv=hexIv.decode('hex')
    realCiphermsg=ciphermsg[32:].decode('hex')
    key=keytext.decode('hex')

    if mode==AES.MODE_CBC:
        aesObj=AES.new(key, mode, iv)
    elif mode==AES.MODE_CTR:
        first_value=int(hexIv, base=16)
        ctrObj=Counter.new(128,initial_value=first_value)
        aesObj=AES.new(key, mode, counter=ctrObj)
    else:
        print 'Invalid encryption mode specified!'
        os.exit(1)

    plainmsg=aesObj.decrypt(realCiphermsg)

    if mode==AES.MODE_CBC:                        # CBC padding handling
        lastValue=ord(plainmsg[-1])
        if lastValue >=1 or lastValue <=16:       #PKCS5 padding scheme
            paddingLength=lastValue
            plainmsg=plainmsg[:-paddingLength]

    print 'Msg deciphered: ' + plainmsg
Beispiel #22
0
def generate_context_feature(in_data_dir1, in_data_dir2, out_data_dir, dimension1, dimension2):

    if not os.path.exists(out_data_dir):
        os.makedirs(out_data_dir)

    file_paths, filenames = read_file_list(in_data_dir1)

    context_features = numpy

    i = 0
    for file_path, filename in zip(file_paths, filenames):
        features1, frame_number1 = load_binary_file(file_path, dimension1)
        features2, frame_number2 = load_binary_file(os.path.join(in_data_dir2, filename), dimension2)
        if frame_number1 != frame_number2:
            print(dimension2)
            print(filename)
            print("%s %d != %d" %(filename, frame_number1, frame_number2))
            print(features1.shape, features2.shape)
            os.exit(1)

        context_features = numpy.zeros((frame_number1, dimension1+dimension2))

        context_features[0:frame_number1, 0:dimension1] = features1
        context_features[0:frame_number2, dimension1:dimension1+dimension2] = features2

        print(filename, features1.shape, features2.shape, context_features.shape)

        context_filename = out_data_dir + '/' + filename

        context_features = numpy.asarray(context_features, 'float32')
        fid = open(context_filename, 'wb')
        context_features.tofile(fid)
        fid.close()
Beispiel #23
0
def run(name, connector, inventory):
    """Run the bot.

    By default will run with the first available connector.
    """
    connectors = get_connectors()
    if len(connectors) == 0:
        print("ERROR: No available connectors!")
        os.exit(1)

    conn_pkg = None
    for c in connectors:
        if c.name == connector:
            conn_pkg = c.load()

    if conn_pkg is None:
        conn_pkg = connectors[0].load()

    inventories = get_inventories()
    if len(inventories) == 0:
        print("ERROR: No available inventories!")
        os.exit(1)

    for i in inventories:
        if i.name == inventory:
            inventory_pkg = i.load()

    commands = get_commands()
    inventory = inventory_pkg.Inventory()
    bot = Bot(name, inventory, commands)
    connector = conn_pkg.Connector(bot)
    print("Listening for messages...")
    connector.listen()
Beispiel #24
0
        def do_read_config(self):
                config_result_all={}
                try:
                        #file_fd_open=io.TextIOBase.open(self.config_file_name,"r")
                        file_fd_open=open(self.config_file_name,"r")
                except:
                        print("Error to open the config server list file %s .." % (self.config_file_name))
                        return None

                #while file_fd_open:
                for temp_read_line in file_fd_open:
                        #temp_read_line=file_fd_open.readline()
                        if(temp_read_line is None):
                                return None

                        if(len(temp_read_line) < 10):
                                continue

                        if(temp_read_line[0] == '#'):
                                continue

                        temp_split_array=()
                        temp_split_array=temp_read_line.strip().split('=')
                        if(len(temp_split_array) !=2):
                                print "Error line read from config server file because of the number is not equal 2"
                                os.exit(1)
                                #continue

                        #config_result_all.append(temp_split_array)
                        config_result_all[temp_split_array[0]]=temp_split_array[1]
                        continue

                file_fd_open.close()
                return config_result_all
Beispiel #25
0
def main(filepath, fileglob):
    sockpath = os.environ['UZBL_SOCKET']
    url = urlparse.urlparse(os.environ['UZBL_URI'])

    if not url.hostname:
        return

    if os.path.isdir(filepath):
        fin = tempfile.TemporaryFile()

        for sett in sorted(glob.glob(os.path.join(filepath, fileglob))):
            with open(sett, 'r') as sfin:
                fin.write(sfin.read())

        fin.seek(0)
    elif os.path.isfile(filepath):
        mode = os.stat(filepath)[stat.ST_MODE]

        if mode & stat.S_IEXEC:
            fin = tempfile.TemporaryFile()
            subprocess.Popen([filepath], stdout=fin).wait()
        else:
            fin = open(filepath, 'r')
    else:
        print('Error: The given path (%s) is neither a directory nor a file' % filepath)

        os.exit(1)

    commands = grep_url(url.hostname, url.path, fin)

    fin.close()

    write_to_socket(commands, sockpath)
Beispiel #26
0
def release():
    if GITHUB_ACCESS_TOKEN == "":
        print >> sys.stderr, 'GITHUB_ACCESS_TOKEN is not set!'
        os.exit(1)

    auth = HTTPBasicAuth(GITHUB_USER, GITHUB_ACCESS_TOKEN)

    # create a new release
    url = 'https://api.github.com/repos/' + GITHUB_USER+ '/' + GITHUB_REPO +'/releases'
    payload = {
                'tag_name' : 'v2.0.6',
                'name' : 'v2.0.6',
                'body' : 'Is this now the latest?',
                'draft' : False,
                'prerelease' : False
              }
    r = requests.post(url, auth=auth, data=json.dumps(payload))
    releaseId = r.json()['id']
    uploadUrl = r.json()['upload_url'].split('{')[0]

    # upload the asset
    url = uploadUrl + "?name=ProgrammableFun.exe"
    headers = {
        'content-type': 'application/vnd.microsoft.portable-executable'
    }
    r = requests.post(url, auth=auth, headers=headers, data=open('../build/launch4j/ProgrammableFun.exe', 'rb'))
    print r.json()
Beispiel #27
0
    def exec_cmd(self, cmd, role, pass_env):
        env = os.environ.copy()
        for k, v in pass_env.items():
            env[k] = str(v)

        env['DMLC_ROLE'] = role

        ntrial = 0
        while True:
            if os.name == 'nt':
                env['DMLC_NUM_ATTEMPT'] = str(ntrial)
                ret = subprocess.call(cmd, shell=True, env = env)
                if ret == 254:
                    ntrial += 1
                    continue
            else:
                bash = keepalive % (cmd)
                ret = subprocess.call(bash, shell=True, executable='bash', env = env)
            if ret == 0:
                logging.debug('Thread exited with 0')
                return
            else:
                if os.name == 'nt':
                    os.exit(-1)
                else:
                    raise Exception('Get nonzero return code=%d' % ret)
Beispiel #28
0
    def handle(self, *args, **options):
        group = ContactGroup.all_groups.get(uuid=options["group_uuid"])

        search = {
            "_source": ["modified_on", "created_on", "uuid", "name"],
            "from": 0,
            "size": 10000,
            "query": {"bool": {"filter": [{"term": {"groups": options["group_uuid"]}}]}},
            "sort": [{"modified_on_mu": {"order": "desc"}}],
        }

        es_response = requests.get(settings.ELASTICSEARCH_URL + "/contacts/_search", json=search).json()
        if "hits" not in es_response:
            print(es_response)
            os.exit(1)

        es_contacts = es_response["hits"]["hits"]
        db_contacts = group.contacts.filter(is_test=False, is_active=True)

        es_map = {}
        for hit in es_contacts:
            es_map[hit["_source"]["uuid"]] = hit

        print("DB count: %d ES Count: %d" % (db_contacts.count(), len(es_contacts)))

        for contact in db_contacts:
            db_uuid = str(contact.uuid)
            if db_uuid not in es_map:
                print("Extra DB hit:", db_uuid, contact.created_on, contact.modified_on, contact.name)
            else:
                del es_map[db_uuid]

        for hit in es_map.values():
            c = hit["_source"]
            print("Extra ES hit:", c["uuid"], c["created_on"], c["modified_on"], c["name"])
Beispiel #29
0
def main():
    usage = """

%prog [options] <path to source> <path to output>   # <input> -> <output>
"""
    parser = OptionParser(usage = usage)
    # Enables trace logging.  our callback needs 4 parameters, so we just use a
    # lambda function as a wrapper
    parser.add_option("-d", "--debug",
                      help = "print out debugging trace information",
                      action = "callback",
                      callback = lambda w, x, y, z: kelvin.enable_logging())
    (options, args) = parser.parse_args()
    dirname = os.path.dirname(__file__)
    if len(args) != 2:
        parser.error("expected both an input path and an output path")
        os.exit(1)       
    elif len(args) == 2:
        source_dir = args[0]
        dest_dir = args[1]

    maybe_extend_pythonpath(source_dir)
    
    site = kelvin.Site(source_dir, dest_dir)
    site.transform()
Beispiel #30
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument("packname")
	args = parser.parse_args()
	packname = args.packname
	src_root = packname
	dst_root = "out/" + packname

	if not os.path.isdir(packname):
		print "packname not exist or not a dir"
		os.exit(0)

	print "start to packing %s" % packname

	# copy whole src tree to dst_root
	shutil.copytree(src_root, dst_root)

	# zip sample and full and then remove
	os.chdir(dst_root)
	shutil.make_archive("sample", "zip", ".", "sample")
	shutil.make_archive("full", "zip", ".", "full")
	shutil.rmtree("sample")
	shutil.rmtree("full")

	# zip all
	os.chdir("..")
	shutil.make_archive(packname, "zip", ".", packname)

	# copy finial zip out and remove work dir
	os.chdir("..")
	shutil.move("out/"+packname+".zip", ".")
	shutil.rmtree(dst_root)
                            

    # make sure the working directory exists and is writeable
    if not os.path.exists(config.directory):
        sys.stderr.write("Creating directory %s\n" % config.directory)
        os.makedirs(config.directory)
    elif not os.access(config.directory, os.R_OK|os.W_OK|os.X_OK):
        sys.stderr.write("Don't have access to directory %s\n" 
                        % config.directory)
        sys.stderr.write("Will attempt to change permissions\n")
        try:
            os.chmod(config.directory, 
                     stat.S_IREAD|stat.S_IWRITE|stat.S_IEXEC)
        except OSError:
            sys.stderr.write(" Unable to change permissions on directory\n")
            os.exit(-1)

    # We need a model containing just the protein and lipid, so we'll
    # make a psfgen script, run it, use the psf to make the AtomicGroup,
    temporary_psfname = os.path.join(config.directory, config.psfname) 
    psfgen_script = config.generate_psf(True, False, True, True, temporary_psfname)
    psfgen = PSFGen.PSFGen(psfgen_script, config.psfgen_binary)
    psfgen.run()
    system = loos.createSystem(temporary_psfname)

    # If the "protein" is actually a bunch of independent molecules (e.g. a bunch of peptides),
    # we'll want to scale them in x & y to match the expanded box.
    if config.protein is not None and config.protein.scale:
        protein_molecules = config.protein.model.splitByMolecule()
        for m in protein_molecules:
            centroid = m.centroid()
Beispiel #32
0
def start_daemon(proc, pid_fd=None):
    """
    start_daemon(proc, pid_fd = None) -> exit code
    Start a daemon process. Caller must pass a function, proc(), with
    prototype looks below:
        def proc():
            return <integer>
    Please make sure the return code of proc() follows Win32 system
    error code standard.

    If pid_fd is not None, it should be a valid file object. The
    file object should point to a lock file, so we can write real daemon
    PID there.
    """
    import resource
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if maxfd == resource.RLIM_INFINITY:
        maxfd = MAXFD
    # Make sure stdin, stdout and stderr are closed.
    os.close(STDIN_FD)
    os.open(NULL_TO, os.O_RDWR)
    os.dup2(STDIN_FD, STDOUT_FD)
    os.dup2(STDIN_FD, STDERR_FD)

    try:
        pid = os.fork()
    except OSError:
        msg = "start_daemon(): Failed on fork()"
        write_log(STDERR_FD, None, ERROR_PROC_NOT_FOUND, msg)
        raise ICAException(ERROR_PROC_NOT_FOUND, msg)
    if pid == 0:
        os.setsid()
        # TODO Shall we ignore SIGHUP?
        # import signal
        # signal.signal(signal.SIGHUP, signal.SIG_IGN)
        # TODO Not sure if it should be added. Ignoring child exit
        # signal can take load off icadaemon. However it looks like it's
        # supported only on Linux.
        # signal.signal(signal.SIGCHLD, signal.SIG_IGN)
        try:
            pid = os.fork()
        except OSError:
            msg = "start_daemon(): Failed on fork(), second time"
            write_log(STDERR_FD, None, ERROR_PROC_NOT_FOUND, msg)
            raise ICAException(ERROR_PROC_NOT_FOUND, msg)

        if pid == 0:
            os.chdir(WORKDIR)
            os.umask(UMASK)
            proc_params = "Daemon is running: pid:%d,uid:%d,euid:%d,gid:%d,egid:%d" % (
                os.getpid(), os.getuid(), os.geteuid(), os.getgid(),
                os.getegid())
            # Use ERR level to make sure the pid information is always
            # shown. In FreeBSD 8.2, the INFO level message does not go
            # to /var/log/message by default.
            syslog.syslog(syslog.LOG_ERR, proc_params)

            if pid_fd is not None:
                if type(pid_fd) is type(0):
                    os.write(pid_fd, "%d\n" % os.getpid())
                    os.fsync(pid_fd)
                else:
                    pid_fd.write("%d\n" % os.getpid())
                    pid_fd.flush()
                    os.fsync(pid_fd.fileno())

            # Start specific function.
            try:
                ret = proc()
            except Exception:
                import StringIO
                import traceback
                ret = ERROR_BAD_ENVIRONMENT
                exception_strfd = StringIO.StringIO()
                traceback.print_exc(file=exception_strfd)
                msg = "FATAL: Daemon got unhandled exception."
                write_log(STDERR_FD, None, ret, msg)
                for each_line in exception_strfd.getvalue().split("\n"):
                    write_log(STDERR_FD, None, ret, each_line)
                msg = "FATAL: Traceback printed. Exit gracefully."
                write_log(STDERR_FD, None, ret, msg)

            if ret != ERROR_SUCCESS:
                msg = "FATAL: proc() exit with code: %d" % ret
                write_log(STDERR_FD, None, ret, msg)
            os.exit(ret)  # We should do cleanup here.
        else:
            os._exit(ERROR_SUCCESS)
    else:
        os._exit(ERROR_SUCCESS)
Beispiel #33
0
			if len(cmus_instances) == 0:
				exit(0)
			elif len(cmus_instances) > 1:
				call(["cmus-remote", "--raw", "echo Media key support disabled "
					"because more than one cmus instance is running."])
				exit(1)
			sleep(1)

	def stop(self):
		self.running = False		

def get_cmus_instances():
	pids = []
	try:
		pids = [int(pid) for pid in check_output(
			["pgrep", "-x", "cmus"]).decode().split("\n") if pid != ""]
	except CalledProcessError:
		pass
	return pids

cmus_instances = get_cmus_instances()

if len(cmus_instances) == 1:
	app = KeySocketApp.sharedApplication()
	app.setActivationPolicy_(NSApplicationActivationPolicyProhibited)
	single_instance_checker = SingleInstanceChecker()
	single_instance_checker.start()
	AppHelper.runEventLoop()
else:
	exit(1)
            1)  # Project Record Feature class layer
        searchString = arcpy.GetParameterAsText(
            2)  # search string for NASIS project names
        selectedProjects = arcpy.GetParameter(
            3)  # selected project names in a list

        regionOwnership = os.path.join(
            os.path.join(os.path.dirname(sys.argv[0]),
                         "SSURGO_Soil_Survey_Area.gdb"),
            "Region_ownership_WGS84")

        if not arcpy.Exists(regionOwnership):
            AddMsgAndPrint(
                "Region ownership layer was not found under " +
                os.path.dirname(sys.argv[0]), 2)
            os.exit()

        searchString = searchString.replace("&", "?").replace(
            "*", "%"
        )  # Replace '&' for '?';ampersand in project named messed up URL parameter

        # Hardcode NASIS-LIMS Report Webservice
        # Runs SDJR Status Report: Returns projects with similar name
        theURL = r"https://nasis.sc.egov.usda.gov/NasisReportsWebSite/limsreport.aspx?report_name=WEB-Projectmapunits"

        # Get the database and location of the SSURGO mapunit
        theDB = GetWorkspace(
            ssurgoInput)  # more than likely it will return a GDB or FD path
        theDir = os.path.dirname(theDB)

        prjRecordFCpath = arcpy.Describe(prjRecordFC).CatalogPath
def main():
    """docstring for __main__"""
    parser = _argparse()
    cf = ConfigParser.ConfigParser(allow_no_value=True)
    cf.read(parser.config)
    excel_file = parser.data
    excel_file = os.path.abspath(excel_file)
    # check config file
    if not cf.has_section('Config'):
        os.exit("Error: your config file is not correct.")

    # read config file
    config_dict = {
        'python': cf.get('Config', 'python'),
        'software_path': cf.get('Config', 'software_path'),
        'database': cf.get('Config', 'database'),
        'project_name': cf.get("Config", "project_name"),
        'RNAfastqR1': cf.get('Config', "RNASeqFastqData").split(",")[0],
        'RNAfastqR2': cf.get('Config', "RNASeqFastqData").split(",")[1],
        'WESfastqR1': cf.get('Config', "WESSeqFastqData").split(",")[0],
        'WESfastqR2': cf.get('Config', "WESSeqFastqData").split(",")[1],

        #'WES_bam_data' : cf.get("Config", "WESBAMData"),
        'excel_file': excel_file,
    }
    #make directories:
    project_dir = os.path.abspath(".") + '/' + config_dict['project_name']
    make_dir(project_dir)
    print("# Create work directory")

    # generate shell
    shell_name = project_dir + '/work.' + config_dict['project_name'] + '.sh'
    #shell_name = shell_dir + '/work.' + config_dict['project_name'] + '.sh'
    # only open a file so use try:finally to close.
    # rawdata_dict = deal_rawdata(parser.data, data_dir)

    with open(shell_name, "w") as f:
        # deal with the standard input File format.
        f.write(
            "#{python} {software_path}/deal_input.STF.py {excel_file} > {project_name}.STF \n"
            .format(**config_dict))
        # replace mutation position in genome file.
        f.write(
            "#{python} {software_path}/replace_genome.novo.py {database}/hg19.fa {project_name}.STF indel.info.txt > genome_replaced.info.xls \n"
            .format(**config_dict))
        f.write("#echo \"finished step:genome_replace\"\n\n")
        f.write("#if [ -f genome_replaced_mutation.fa.fai ]; then\n")
        f.write(
            "#\t rm genome_replaced_mutation.fa.fai\n".format(**config_dict))
        f.write("#fi\n\n")
        f.write(
            "#{software_path}/gffread {database}/hg19.filtered.gtf -g genome_replaced_mutation.fa -x genome_replaced_mutation.cds.fa\n\n"
            .format(**config_dict))
        f.write("#echo \"finished step: gffread\"\n\n")

        # check the replaced fa.
        f.write(
            "#{python} {software_path}/compare_Two_Fasta.py --ref {database}/hg19.fa --treat genome_replaced_mutation.fa --flag long -o genome.VS.replaced.comparing.txt\n"
            .format(**config_dict))
        f.write(
            "#{python} {software_path}/compare_Two_Fasta.py --ref {database}/hg19.cds.fa --treat genome_replaced_mutation.cds.fa --flag short -o genome_CDS.VS.replaced.comparing.txt\n"
            .format(**config_dict))
        f.write(
            "#{python} {software_path}/check_replaced.report.py --ref genome.VS.replaced.comparing.txt --stf {project_name}.STF --flag genome -o genome_mutation.checked.out\n"
            .format(**config_dict))
        f.write(
            "#{python} {software_path}/check_replaced.report.py --ref genome_CDS.VS.replaced.comparing.txt --stf {project_name}.STF --flag CDS -o genome_CDS_mutation.checked.out\n"
            .format(**config_dict))

        f.write("#if [[ ! -s  indel.info.txt ]] ; then \n")  # no insert info.
        # remove N in cds
        f.write(
            "#\t {python} {software_path}/deal.CDS.insert.py -c genome_replaced_mutation.cds.fa --gtf {database}/hg19.filtered.gtf -o genome_replaced_mutation.rmN.cds.fa\n"
            .format(**config_dict))
        f.write("#else \n")  # insert info.
        f.write(
            "#\t {python} {software_path}/deal.CDS.insert.py -c genome_replaced_mutation.cds.fa --gtf {database}/hg19.filtered.gtf -l indel.info.txt -o genome_replaced_mutation.rmN.cds.fa\n"
            .format(**config_dict))
        f.write("#\t echo 'finished deal.cds.insert.py' \n")
        f.write("#fi\n\n")

        f.write(
            "#{software_path}/transeq genome_replaced_mutation.rmN.cds.fa genome_replaced_mutation.rmN.pep.fa\n\n"
            .format(**config_dict))
        f.write(
            "{python} {software_path}/compare_Two_Fasta.py --ref {database}/hg19.pep.fa --treat genome_replaced_mutation.rmN.pep.fa -o genome_pep.VS.replaced.comparing.txt\n"
            .format(**config_dict))
        f.write(
            "#{python} {software_path}/check_replaced.report.py --ref genome_pep.VS.replaced.comparing.txt --stf {project_name}.STF --flag PEP -o genome_PEP_mutation.checked.out\n"
            .format(**config_dict))

        f.write(
            "#{python} {software_path}/check.pep.py {database}/hg19.pep.fa {project_name}.STF genome_replaced_mutation.rmN.pep.fa >{project_name}.firstStep.Mutated.Tandem.Minigenes.xls\n"
            .format(**config_dict))
        f.write("#echo \"finished step: check pep\"\n\n")

        f.write(
            "{python} {software_path}/extract_bed.py {project_name}.firstStep.Mutated.Tandem.Minigenes.xls >{project_name}.snp.checked.bed\n"
            .format(**config_dict))
        f.write("echo \"finished step:extract bed\"\n\n")
        # step 1 deal RNAseq data.
        f.write(
            "# {software_path}/STAR --runThreadN 10 --genomeDir {database}/hg19_star2.7_index  --readFilesCommand zcat --readFilesIn {RNAfastqR1}  {RNAfastqR2}  --outFileNamePrefix {project_name}.RNA.  --outSAMtype BAM SortedByCoordinate \n"
            .format(**config_dict))
        f.write("# echo \"finished step: STAR RNA\"\n\n")
        f.write(
            "# {software_path}/samtools index {project_name}.RNA.Aligned.sortedByCoord.out.bam \n"
            .format(**config_dict))
        f.write("# echo \"finished step: samtools index\"\n\n")
        f.write(
            "{software_path}/samtools mpileup -l {project_name}.snp.checked.bed -f {database}/hg19.fa {project_name}.RNA.Aligned.sortedByCoord.out.bam  >{project_name}.RNAseq.mpileup.txt \n"
            .format(**config_dict))
        f.write("echo \"finished step: samtools mpileup\"\n\n")

        f.write(
            "# {software_path}/featureCounts -O -T 20 -t exon -g gene_id -a {database}/hg19.filtered.gtf -o {project_name}.gene.counts.txt  {project_name}.RNA.Aligned.sortedByCoord.out.bam\n"
            .format(**config_dict))
        f.write(
            "# {python} {software_path}/featureCounts2TPM.py -a {project_name}.gene.counts.txt -o {project_name}.RNAseq.gene.counts.TPM.txt\n"
            .format(**config_dict))
        f.write("# echo \"finished step: calculate TPM\"\n\n")

        # step 2 WES dat
        f.write(
            "# {software_path}/STAR --runThreadN 10 --genomeDir {database}/hg19_star2.7_index  --readFilesCommand zcat --readFilesIn {WESfastqR1}  {WESfastqR2}  --outFileNamePrefix {project_name}.WES.  --outSAMtype BAM SortedByCoordinate \n"
            .format(**config_dict))
        f.write(
            "# {software_path}/samtools index {project_name}.WES.Aligned.sortedByCoord.out.bam \n"
            .format(**config_dict))
        f.write(
            "{software_path}/samtools mpileup -l {project_name}.snp.checked.bed -f {database}/hg19.fa {project_name}.WES.Aligned.sortedByCoord.out.bam  >{project_name}.WES.mpileup.txt \n"
            .format(**config_dict))
        f.write("echo \"finished step: samtools mpileup WES\"\n\n")

        # step3 combin results:
        f.write(
            "{python} {software_path}/combine_WES_RNA_result.py {project_name}.firstStep.Mutated.Tandem.Minigenes.xls {project_name}.RNAseq.gene.counts.TPM.txt {project_name}.RNAseq.mpileup.txt {project_name}.WES.mpileup.txt > {project_name}.TableS4.Mutated.Tandem.Minigenes.V4.xls \n"
            .format(**config_dict))
        f.write("echo \"finished step: combine\"\n\n")
    print("all finished!")
Beispiel #36
0
#
# Pearce Phanawong
# guess_the_number.py
# Description: This program will ask the user to input a number, then
#              ask again for another number. If the second is the
#              same as the first, the user wins. The user has two
#              attempts, and if both are failed, the user loses.
#
from os import _exit as exit

n = input('Enter number to be guessed between 10 and 100, inclusive:\n')
if n.isnumeric() == True:
    n = int(n)
else:
    print(n, 'is not 10-100, inclusive.')
    exit(0)
if n < 10 or n > 100:
    print(n, 'is not 10-100, inclusive.')
    exit(0)
guess1 = input('First guess:\n')
if guess1.isnumeric() == False:
    print('Guess is invalid.')
    exit(0)
else:
    guess1 = int(guess1)
if guess1 == n:
    print(guess1, 'is correct! Ending game.')
elif guess1 != n:
    print(guess1, 'is incorrect.')
    guess2 = input('Second guess:\n')
    if guess2.isnumeric() == False:
Beispiel #37
0
 def do_quit(self, *args):
     """Exit the program"""
     self.debug("quit", *args)
     exit(0)
Beispiel #38
0
def main():
    # parse arg and start experiment
    global args
    best_ap = -1.
    best_iter = 0

    args = parser.parse_args()
    args.config_of_data = config.datasets[args.data]
    # args.num_classes = config.datasets[args.data]['num_classes']
    if configure is None:
        args.tensorboard = False
        print(Fore.RED +
              'WARNING: you don\'t have tesnorboard_logger installed' +
              Fore.RESET)

    # optionally resume from a checkpoint
    if args.resume:
        if args.resume and os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            old_args = checkpoint['args']
            print('Old args:')
            print(old_args)
            # set args based on checkpoint
            if args.start_iter <= 0:
                args.start_iter = checkpoint['iter'] + 1
            best_iter = args.start_iter - 1
            best_ap = checkpoint['best_ap']
            for name in arch_resume_names:
                if name in vars(args) and name in vars(old_args):
                    setattr(args, name, getattr(old_args, name))
            model = get_model(**vars(args))
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (iter {})"
                  .format(args.resume, checkpoint['iter']))
        else:
            print(
                "=> no checkpoint found at '{}'".format(
                    Fore.RED +
                    args.resume +
                    Fore.RESET),
                file=sys.stderr)
            return
    else:
        # create model
        print("=> creating model '{}'".format(args.arch))
        model = get_model(**vars(args))

    # cudnn.benchmark = True
    cudnn.enabled = False

    # create dataloader
    if args.evaluate == 'val':
        train_loader, val_loader, test_loader = getDataloaders(
            splits=('val'), **vars(args))
        validate(val_loader, model, best_iter)
        return
    elif args.evaluate == 'test':
        train_loader, val_loader, test_loader = getDataloaders(
            splits=('test'), **vars(args))
        validate(test_loader, model, best_iter)
        return
    else:
        train_loader, val_loader, test_loader = getDataloaders(
            splits=('train', 'val'), **vars(args))

    # define optimizer
    optimizer = get_optimizer(model, args)

    # check if the folder exists
    if os.path.exists(args.save):
        print(Fore.RED + args.save + Fore.RESET
              + ' already exists!', file=sys.stderr)
        if not args.force:
            ans = input('Do you want to overwrite it? [y/N]:')
            if ans not in ('y', 'Y', 'yes', 'Yes'):
                os.exit(1)
        print('remove existing ' + args.save)
        shutil.rmtree(args.save)
    os.makedirs(args.save)
    print('create folder: ' + Fore.GREEN + args.save + Fore.RESET)

    # copy code to save folder
    if args.save.find('debug') < 0:
        shutil.copytree(
            '.',
            os.path.join(
                args.save,
                'src'),
            symlinks=True,
            ignore=shutil.ignore_patterns(
                '*.pyc',
                '__pycache__',
                '*.path.tar',
                '*.pth',
                '*.ipynb',
                '.*',
                'data',
                'save',
                'save_backup'))

    # set up logging
    global log_print, f_log
    f_log = open(os.path.join(args.save, 'log.txt'), 'w')

    def log_print(*args):
        print(*args)
        print(*args, file=f_log)
    log_print('args:')
    log_print(args)
    print('model:', file=f_log)
    print(model, file=f_log, flush=True)
    # log_print('model:')
    # log_print(model)
    # log_print('optimizer:')
    # log_print(vars(optimizer))
    log_print('# of params:',
              str(sum([p.numel() for p in model.parameters()])))
    torch.save(args, os.path.join(args.save, 'args.pth'))
    scores = ['iter\tlr\ttrain_loss\tval_ap']
    if args.tensorboard:
        configure(args.save, flush_secs=5)

    for i in range(args.start_iter, args.niters + 1, args.eval_freq):
        # print('iter {:3d} lr = {:.6e}'.format(i, lr))
        # if args.tensorboard:
        #     log_value('lr', lr, i)

        # train for args.eval_freq iterations
        train_loss = train(train_loader, model, optimizer,
                           i, args.eval_freq)
        i += args.eval_freq - 1

        # evaluate on validation set
        val_ap = validate(val_loader, model, i)

        # save scores to a tsv file, rewrite the whole file to prevent
        # accidental deletion
        scores.append(('{}\t{}' + '\t{:.4f}' * 2)
                      .format(i, lr, train_loss, val_ap))
        with open(os.path.join(args.save, 'scores.tsv'), 'w') as f:
            print('\n'.join(scores), file=f)

        # remember best err@1 and save checkpoint
        # TODO: change this
        is_best = val_ap > best_ap
        if is_best:
            best_ap = val_ap
            best_iter = i
            print(Fore.GREEN + 'Best var_err1 {}'.format(best_ap) +
                  Fore.RESET)
        save_checkpoint({
            'args': args,
            'iter': i,
            'best_iter': best_iter,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_ap': best_ap,
        }, is_best, args.save)
        if not is_best and i - best_iter >= args.patience > 0:
            break
    print('Best val_ap: {:.4f} at iter {}'.format(best_ap, best_iter))
Beispiel #39
0
def main():
    """docstring for __main__"""
    parser = _argparse()
    cf = configparser.ConfigParser(allow_no_value=True)
    cf.read(parser.config)
    # check config file
    if not cf.has_section('Config'):
        os.exit("Error: your config file is not correct.")

    # read config file
    config_dict = {
        'gatk':
        cf.get('GATK', 'gatk'),
        'java_mem':
        cf.get('GATK', 'java_mem'),
        'known_dbSNP_vcf':
        cf.get('GATK', 'known_dbSNP_vcf'),
        'known_indel_sites_VCF':
        cf.get('GATK', 'known_indel_sites_VCF'),
        'known_hapmap_vcf':
        cf.get('GATK', 'known_hapmap_vcf'),
        'known_omni_vcf':
        cf.get('GATK', 'known_omni_vcf'),
        'known_1000G_phase1_snps_vcf':
        cf.get('GATK', 'known_1000G_phase1_snps_vcf'),

        # Mutect 2
        'af_only_gnomad_vcf':
        cf.get('GATK', 'af_only_gnomad_vcf'),
        'panel_of_normals_vcf':
        cf.get('GATK', 'panel_of_normals_vcf'),
        'variants_for_contamination':
        cf.get('GATK', 'variants_for_contamination'),

        # Funcotator
        'funcotator_dataSources':
        cf.get("GATK", "funcotator_dataSources"),
        'ref_version':
        cf.get("GATK", "ref_version"),

        # 'python' : cf.get('Config', 'python'),
        'bwa':
        cf.get('Config', 'bwa'),
        'bwa_threads':
        cf.get("Config", "bwa_threads"),
        'samtools':
        cf.get('Config', 'samtools'),
        'samtools_threads':
        cf.get('Config', 'samtools_threads'),
        'scripts_dir':
        cf.get('Config', 'scripts_dir'),

        # database
        'ref_fasta':
        cf.get('Config', 'ref_fasta'),
        'ref_fasta_dict':
        cf.get('Config', 'ref_fasta_dict'),
        'cds_fasta':
        cf.get('Config', 'cds_fasta'),
        'intervals':
        cf.get('Config', "intervals"),
        'annotated_intervals':
        cf.get('Config', "annotated_intervals"),
        'project_name':
        cf.get("Config", "project_name"),
        'sample_name':
        cf.get("Config", "sample_name"),
        'tumor_fastq':
        cf.get('Config', "tumor_fastq"),
        'normal_fastq':
        cf.get('Config', "normal_fastq"),
    }
    # make directories:
    project_dir = os.path.abspath(".") + '/' + config_dict['project_name']
    make_dir(project_dir)
    print("# Create work directory")

    # generate shell
    shell_name = project_dir + '/work.' + \
        config_dict['project_name'] + '.WES.sh'
    #shell_name = shell_dir + '/work.' + config_dict['project_name'] + '.sh'
    # only open a file so use try:finally to close.
    # rawdata_dict = deal_rawdata(parser.data, data_dir)

    with open(shell_name, "w") as f:
        # bwa.
        f.write(
            "{bwa} mem -t {bwa_threads} -Y -H \"@HD\\tVN:1.5\\tGO:none\\tSO:coordinate\" -R \"@RG\\tID:{sample_name}_Normal\\tSM:Normal\\tLB:WES\\tPL:ILLumina\\tPU:HVW2MCCXX:6:none\" {ref_fasta} {normal_fastq} | {samtools} view -@ {samtools_threads} -buhS -t {ref_fasta}.fai - | {samtools} sort -@ {samtools_threads} -o {sample_name}.Normal.sortedByCoord.bam - \n"
            .format(**config_dict))
        f.write(
            "{bwa} mem -t {bwa_threads} -Y -H \"@HD\\tVN:1.5\\tGO:none\\tSO:coordinate\" -R \"@RG\\tID:{sample_name}_Tumor\\tSM:Tumor\\tLB:WES\\tPL:ILLumina\\tPU:HVW2MCCXX:6:none\" {ref_fasta} {tumor_fastq} | {samtools} view -@ {samtools_threads} -buhS -t {ref_fasta}.fai - | {samtools} sort -@ {samtools_threads} -o {sample_name}.Tumor.sortedByCoord.bam - \n"
            .format(**config_dict))
        # MarkDuplicates
        f.write("""{gatk} --java-options \"-Xms{java_mem}G\" \\
            MarkDuplicates \\
            --INPUT {sample_name}.Normal.sortedByCoord.bam \\
            --OUTPUT {sample_name}.Normal.duplicates_marked.bam \\
            --METRICS_FILE {sample_name}.Normal.duplicate_metrics \\
            --VALIDATION_STRINGENCY SILENT \\
            --OPTICAL_DUPLICATE_PIXEL_DISTANCE 2500 \\
            --ASSUME_SORT_ORDER \"queryname\" \\
            --CREATE_MD5_FILE true \n""".format(**config_dict))
        f.write("""{gatk} --java-options \"-Xms{java_mem}G\" \\
            MarkDuplicates \\
            --INPUT {sample_name}.Tumor.sortedByCoord.bam \\
            --OUTPUT {sample_name}.Tumor.duplicates_marked.bam \\
            --METRICS_FILE {sample_name}.Tumor.duplicate_metrics \\
            --VALIDATION_STRINGENCY SILENT \\
            --OPTICAL_DUPLICATE_PIXEL_DISTANCE 2500 \\
            --ASSUME_SORT_ORDER \"queryname\" \\
            --CREATE_MD5_FILE true \n""".format(**config_dict))
        f.write("""{gatk} SortSam\\
            --INPUT {sample_name}.Normal.duplicates_marked.bam \\
            --OUTPUT {sample_name}.Normal.duplicates_marked_sorted.bam \\
            --SORT_ORDER "coordinate" \\
            --CREATE_INDEX true \\
            --CREATE_MD5_FILE false \n""".format(**config_dict))
        f.write("""{gatk} SetNmMdAndUqTags \\
            --INPUT  {sample_name}.Normal.duplicates_marked_sorted.bam\\
            --OUTPUT {sample_name}.Normal.duplicates_marked_sorted_fixed.bam \\
            --CREATE_INDEX true \\
            --CREATE_MD5_FILE true \\
            --REFERENCE_SEQUENCE {ref_fasta} \n""".format(**config_dict))
        f.write("""{gatk} SortSam\\
            --INPUT {sample_name}.Tumor.duplicates_marked.bam \\
            --OUTPUT {sample_name}.Tumor.duplicates_marked_sorted.bam \\
            --SORT_ORDER "coordinate" \\
            --CREATE_INDEX true \\
            --CREATE_MD5_FILE false \n""".format(**config_dict))
        f.write("""{gatk} SetNmMdAndUqTags \\
            --INPUT  {sample_name}.Tumor.duplicates_marked_sorted.bam \\
            --OUTPUT {sample_name}.Tumor.duplicates_marked_sorted_fixed.bam \\
            --CREATE_INDEX true \\
            --CREATE_MD5_FILE true \\
            --REFERENCE_SEQUENCE {ref_fasta} \n""".format(**config_dict))
        # # BaseRecalibrator
        f.write("""{gatk} --java-options \"-Xms{java_mem}G\" \\
            BaseRecalibrator \\
            -R {ref_fasta} \\
            -I {sample_name}.Normal.duplicates_marked_sorted_fixed.bam \\
            --use-original-qualities \\
            -O {sample_name}.Normal.recal_data.csv \\
            --known-sites {known_dbSNP_vcf} \\
            --known-sites {known_indel_sites_VCF} \\
            --known-sites {known_hapmap_vcf} \\
            --known-sites {known_omni_vcf} \\
            --known-sites {known_1000G_phase1_snps_vcf}\n""".format(
            **config_dict))
        f.write("""{gatk} --java-options \"-Xms{java_mem}G\" \\
            BaseRecalibrator \\
            -R {ref_fasta} \\
            -I {sample_name}.Tumor.duplicates_marked_sorted_fixed.bam \\
            --use-original-qualities \\
            -O {sample_name}.Tumor.recal_data.csv \\
            --known-sites {known_dbSNP_vcf} \\
            --known-sites {known_indel_sites_VCF} \\
            --known-sites {known_hapmap_vcf} \\
            --known-sites {known_omni_vcf} \\
            --known-sites {known_1000G_phase1_snps_vcf}\n""".format(
            **config_dict))
        # # ApplylyBQSR
        f.write("""{gatk} --java-options \"-Xms{java_mem}G\" \\
            ApplyBQSR \\
            -R {ref_fasta} \\
            -I  {sample_name}.Normal.duplicates_marked_sorted_fixed.bam \\
            -O  {sample_name}.Normal.duplicates_marked_sorted_fixed.BQSR.bam \\
            -bqsr {sample_name}.Normal.recal_data.csv \\
            --create-output-bam-index \\
            --static-quantized-quals 10 \\
            --static-quantized-quals 20 \\
            --static-quantized-quals 30 \\
            --add-output-sam-program-record \\
            --use-original-qualities \\
            --create-output-bam-md5 true\n""".format(**config_dict))
        f.write("""{gatk} --java-options \"-Xms{java_mem}G\"  \\
            ApplyBQSR \\
            -R {ref_fasta} \\
            -I  {sample_name}.Tumor.duplicates_marked_sorted_fixed.bam  \\
            -O  {sample_name}.Tumor.duplicates_marked_sorted_fixed.BQSR.bam \\
            -bqsr {sample_name}.Tumor.recal_data.csv  \\
            --static-quantized-quals 10 \\
            --static-quantized-quals 20 \\
            --static-quantized-quals 30 \\
            --add-output-sam-program-record \\
            --create-output-bam-index \\
            --use-original-qualities \\
            --create-output-bam-md5 true \n""".format(**config_dict))
        # statistic bam
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\" \\
            DepthOfCoverage \\
            --input {sample_name}.Tumor.duplicates_marked_sorted_fixed.BQSR.bam \\
            -L /cygene/work/00.test/pipeline/WES_cnv_somatic_pair_pipeline/database/whole_exome_illumina_hg38.targets.interval_list \\
            -O {sample_name}.Tumor.bam.DepthOfCoverage.txt \\
            -R {ref_fasta}\n""".format(**config_dict))
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\" \\
            DepthOfCoverage \\
            --input {sample_name}.Normal.duplicates_marked_sorted_fixed.BQSR.bam  \\
            -L /cygene/work/00.test/pipeline/WES_cnv_somatic_pair_pipeline/database/whole_exome_illumina_hg38.targets.interval_list \\
            -O {sample_name}.Normal.bam.DepthOfCoverage.txt \\
            -R {ref_fasta}\n""".format(**config_dict))
        f.write(
            """Rscript {scripts_dir}/DepthOfCoverage.step1.deal.data.R {sample_name}.Tumor.bam.DepthOfCoverage.txt {sample_name}.Normal.bam.DepthOfCoverage.txt \n"""
            .format(**config_dict))
        f.write(
            """Rscript {scripts_dir}/DepthOfCoverage.step2.draw.plot.R {sample_name}.Tumor.coverage.depth.rate.xls {sample_name}.Normal.coverage.depth.rate.xls \n"""
            .format(**config_dict))

        # # Mutect2
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            Mutect2 \\
            -R {ref_fasta} \\
            -I {sample_name}.Tumor.duplicates_marked_sorted_fixed.BQSR.bam \\
            -I {sample_name}.Normal.duplicates_marked_sorted_fixed.BQSR.bam \\
            -tumor Tumor \\
            -normal Normal \\
            -germline-resource {af_only_gnomad_vcf} \\
            -pon {panel_of_normals_vcf}   \\
            -O {sample_name}.unfiltered.vcf \\
            --f1r2-tar-gz {sample_name}.f1r2.tar.gz\n""".format(**config_dict))
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            LearnReadOrientationModel \\
            -I {sample_name}.f1r2.tar.gz \\
            -O {sample_name}.read-orientation-model.tar.gz \n""".format(
            **config_dict))
        # contamination.
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            GetPileupSummaries \\
            -I {sample_name}.Tumor.duplicates_marked_sorted_fixed.BQSR.bam  \\
            -V {variants_for_contamination} \\
            -L {variants_for_contamination} \\
            -O {sample_name}.Tumor.pileups.table \n""".format(**config_dict))
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            GetPileupSummaries \\
            -I {sample_name}.Normal.duplicates_marked_sorted_fixed.BQSR.bam  \\
            -V {variants_for_contamination} \\
            -L {variants_for_contamination} \\
            -O {sample_name}.Normal.pileups.table \n""".format(**config_dict))
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            CalculateContamination \\
            -I {sample_name}.Tumor.pileups.table  \\
            -matched {sample_name}.Normal.pileups.table \\
            -O {sample_name}.Tumor.contamination.table \\
            --tumor-segmentation {sample_name}.segments.table \n""".format(
            **config_dict))

        # filter mutation
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            FilterMutectCalls \\
            -R {ref_fasta} \\
            -V {sample_name}.unfiltered.vcf \\
            --contamination-table {sample_name}.Tumor.contamination.table \\
            --tumor-segmentation {sample_name}.segments.table \\
            --ob-priors {sample_name}.read-orientation-model.tar.gz \\
            -stats {sample_name}.unfiltered.vcf.stats \\
            --filtering-stats {sample_name}.filtering.stats\\
            -O {sample_name}.filtered.vcf \n""".format(**config_dict))

        # Function anatation.
        f.write("""{gatk} --java-options \"-Xmx{java_mem}G\"  \\
            Funcotator \\
            --data-sources-path {funcotator_dataSources} \\
            --ref-version {ref_version} \\
            --output-file-format MAF \\
            --reference {ref_fasta} \\
            --variant {sample_name}.filtered.vcf \\
            --output {sample_name}.variants.funcotated.MAF.xls \\
            --remove-filtered-variants true \\
            --add-output-vcf-command-line false \\
            --annotation-default normal_barcode:Normal \\
            --annotation-default tumor_barcode:Tumor \\
            --annotation-default Center:RootPath \\
            --annotation-default Sequencer:Miseq \n""".format(**config_dict))
        f.write(
            """grep -v \"^#\" {sample_name}.variants.funcotated.MAF.xls > {sample_name}.variants.funcotated.without.header.MAF.xls\n"""
            .format(**config_dict))
        f.write(
            """python3 {scripts_dir}/extract_minigene.py {cds_fasta} {sample_name}.variants.funcotated.without.header.MAF.xls {sample_name}.variants.funcotated.with.minigene.MAF.xls\n"""
            .format(**config_dict))
        #f.write("""less {sample_name}.variants.funcotated.with.minigene.MAF.xls | grep -v "Hugo_Symbol" |awk '{{print$5"\\t"$6-1"\\t"$7}}' > {sample_name}.snp.checked.bed\n""".format(**config_dict))
        f.write(
            """less {sample_name}.variants.funcotated.with.minigene.MAF.xls | grep -v "Hugo_Symbol" |awk '{{if ($5 == "MT") {{print"chrM\\t"$6-1"\\t"$7}}else{{print$5"\\t"$6-1"\\t"$7}}}}' >{sample_name}.snp.checked.bed\n"""
            .format(**config_dict))

        ###########################################################
        # For CNVs
        ###########################################################

        # CNVTasks.PreprocessIntervals
        # AnnotateIntervals
        # FilterIntervals
        # CollectCountsTumor
        f.write("""{gatk} --java-options "-Xmx30G" CollectReadCounts \\
            -L {intervals} \\
            --input {sample_name}.Tumor.duplicates_marked_sorted_fixed.BQSR.bam \\
            --reference {ref_fasta} \\
            --format HDF5 \\
            --interval-merging-rule OVERLAPPING_ONLY \\
            --output {sample_name}.Tumor.counts \n""".format(**config_dict))
        # CollectCountsNormal
        f.write("""{gatk} --java-options "-Xmx30G" CollectReadCounts \\
            -L {intervals} \\
            --input {sample_name}.Normal.duplicates_marked_sorted_fixed.BQSR.bam \\
            --reference {ref_fasta} \\
            --format HDF5 \\
            --interval-merging-rule OVERLAPPING_ONLY \\
            --output {sample_name}.Normal.counts \n""".format(**config_dict))

        # CreateReadCountPanelOfNormals
        f.write(
            """{gatk} --java-options "-Xmx{java_mem}G" CreateReadCountPanelOfNormals \\
            --input {sample_name}.Normal.counts \\
            --minimum-interval-median-percentile 10.0 \\
            --maximum-zeros-in-sample-percentage 5.0 \\
            --maximum-zeros-in-interval-percentage 5.0 \\
            --extreme-sample-median-percentile 2.5 \\
            --do-impute-zeros true \\
            --extreme-outlier-truncation-percentile 0.1 \\
            --number-of-eigensamples 20 \\
            --maximum-chunk-size 16777216 \\
            --annotated-intervals {annotated_intervals} \\
            --output {sample_name}.pon_entity_id.hdf5\n""".format(
                **config_dict))

        # CollectAllelicCountsTumor
        f.write(
            """{gatk} --java-options "-Xmx{java_mem}G" CollectAllelicCounts \\
            -L {intervals} \\
            --input {sample_name}.Tumor.duplicates_marked_sorted_fixed.BQSR.bam \\
            --reference {ref_fasta} \\
            --minimum-base-quality 20 \\
            --output {sample_name}.Tumor.allelic_counts_file.txt \n""".format(
                **config_dict))
        # CollectAllelicCountsNormal
        f.write(
            """{gatk} --java-options "-Xmx{java_mem}G" CollectAllelicCounts \\
            -L {intervals} \\
            --input {sample_name}.Normal.duplicates_marked_sorted_fixed.BQSR.bam \\
            --reference {ref_fasta} \\
            --minimum-base-quality 20 \\
            --output {sample_name}.Normal.allelic_counts_file.txt \n""".format(
                **config_dict))

        # DenoiseReadCountsTumor
        f.write("""{gatk} --java-options "-Xms{java_mem}G" DenoiseReadCounts \\
            --INPUT {sample_name}.Tumor.read_counts_file.txt \\
            --count-panel-of-normals {sample_name}.pon_entity_id.hdf5 \\
            --number-of-eigensamples 20 \\
            --standardized-copy-ratios {sample_name}.Tumor.standardizedCR.tsv \\
            --denoised-copy-ratios {sample_name}.Tumor.denoisedCR.tsv \n""".
                format(**config_dict))
        # DenoiseReadCountsNormal
        f.write("""{gatk} --java-options "-Xms{java_mem}G" DenoiseReadCounts \\
            --INPUT {sample_name}.Normal.read_counts_file.txt \\
            --count-panel-of-normals {sample_name}.pon_entity_id.hdf5 \\
            --number-of-eigensamples 20 \\
            --standardized-copy-ratios {sample_name}.Normal.standardizedCR.tsv \\
            --denoised-copy-ratios {sample_name}.Normal.denoisedCR.tsv  \n""".
                format(**config_dict))

        # ModelSegmentsTumor
        f.write("""{gatk} --java-options "-Xmx30G" ModelSegments \\
            --denoised-copy-ratios {sample_name}.Tumor.denoisedCR.tsv \\
            --allelic-counts {sample_name}.Tumor.allelic_counts_file.txt  \\
            --normal-allelic-counts  {sample_name}.Normal.allelic_counts_file.txt \\
            --minimum-total-allele-count-case 10 \\
            --minimum-total-allele-count-normal 30 \\
            --genotyping-homozygous-log-ratio-threshold "-10.0" \\
            --genotyping-base-error-rate 0.05 \\
            --maximum-number-of-segments-per-chromosome 1000 \\
            --kernel-variance-copy-ratio  0.0 \\
            --kernel-variance-allele-fraction 0.025 \\
            --kernel-scaling-allele-fraction 1.0 \\
            --kernel-approximation-dimension 100 \\
            --window-size 256 \\
            --number-of-changepoints-penalty-factor 1.0 \\
            --minor-allele-fraction-prior-alpha 25.0 \\
            --number-of-samples-copy-ratio 100 \\
            --number-of-burn-in-samples-copy-ratio 50 \\
            --number-of-samples-allele-fraction 100 \\
            --number-of-burn-in-samples-allele-fraction 50 \\
            --smoothing-credible-interval-threshold-copy-ratio 2.0\\
            --smoothing-credible-interval-threshold-allele-fraction 2.0 \\
            --maximum-number-of-smoothing-iterations 10 \\
            --number-of-smoothing-iterations-per-fit 0 \\
            --output ModelSegmentsTumor \\
            --output-prefix {sample_name}.Tumor \n""".format(**config_dict))
        # ModelSegmentsNormal
        f.write("""{gatk} --java-options "-Xmx30G" ModelSegments \\
            --denoised-copy-ratios {sample_name}.Normal.denoisedCR.tsv \\
            --allelic-counts {sample_name}.Normal.allelic_counts_file.txt \\
            --minimum-total-allele-count-case 5 \\
            --minimum-total-allele-count-normal 30 \\
            --genotyping-homozygous-log-ratio-threshold -10.0 \\
            --genotyping-base-error-rate 0.05 \\
            --maximum-number-of-segments-per-chromosome 1000 \\
            --kernel-variance-copy-ratio 0.0 \\
            --kernel-variance-allele-fraction 0.025 \\
            --kernel-scaling-allele-fraction 1.0 \\
            --kernel-approximation-dimension 100 \\
            --window-size 256 \\
            --number-of-changepoints-penalty-factor 1.0\\
            --minor-allele-fraction-prior-alpha 25.0 \\
            --number-of-samples-copy-ratio 100 \\
            --number-of-burn-in-samples-copy-ratio 50 \\
            --number-of-samples-allele-fraction 100 \\
            --number-of-burn-in-samples-allele-fraction 50 \\
            --smoothing-credible-interval-threshold-copy-ratio 2.0 \\
            --smoothing-credible-interval-threshold-allele-fraction 2.0 \\
            --maximum-number-of-smoothing-iterations 10 \\
            --number-of-smoothing-iterations-per-fit 0 \\
            --output ModelSegmentsNormal \\
            --output-prefix {sample_name}.Normal \n""".format(**config_dict))

        # CallCopyRatioSegmentsTumor
        f.write("""{gatk} --java-options "-Xmx30G" CallCopyRatioSegments \\
            --input {copy_ratio_segments} \\
            --neutral-segment-copy-ratio-lower-bound 0.9\\
            --neutral-segment-copy-ratio-upper-bound 1.1 \\
            --outlier-neutral-segment-copy-ratio-z-score-threshold 2.0 \\
            --calling-copy-ratio-z-score-threshold 2.0 \\
            --output {sample_name}.called.seg \n""".format(**config_dict))
        # CallCopyRatioSegmentsNormal
        f.write("""{gatk} --java-options "-Xmx30G" CallCopyRatioSegments \\
            --input {copy_ratio_segments} \\
            --neutral-segment-copy-ratio-lower-bound 0.9 \
            --neutral-segment-copy-ratio-upper-bound 1.1 \
            --outlier-neutral-segment-copy-ratio-z-score-threshold 2.0 \
            --calling-copy-ratio-z-score-threshold 2.0 \
            --output {sample_name}.called.seg \n""".format(**config_dict))
        # output {
        #        File called_copy_ratio_segments = "{sample_name}.called.seg"
        #        File called_copy_ratio_legacy_segments = "{sample_name}.called.igv.seg"
        #     }
        # PlotDenoisedCopyRatiosTumor
        f.write("""{gatk} --java-options "-Xmx30G" PlotDenoisedCopyRatios \
            --standardized-copy-ratios {standardized_copy_ratios} \
            --denoised-copy-ratios {denoised_copy_ratios} \
            --sequence-dictionary {ref_fasta_dict} \
            --minimum-contig-length 1000000 \
            --output {output_dir_} \
            --output-prefix {sample_name} \n""".format(**config_dict))
        # PlotDenoisedCopyRatiosNormal
        f.write(
            """{gatk} --java-options "-Xmx{java_mem}G" PlotDenoisedCopyRatios \
            --standardized-copy-ratios {standardized_copy_ratios} \
            --denoised-copy-ratios {denoised_copy_ratios} \
            --sequence-dictionary {ref_fasta_dict} \
            --minimum-contig-length 1000000 \
            --output {output_dir_} \
            --output-prefix {sample_name} \n""".format(**config_dict))
        # PlotModeledSegmentsTumor
        f.write(
            """{gatk} --java-options "-Xmx{java_mem}G" PlotModeledSegments \
            --denoised-copy-ratios {denoised_copy_ratios} \
            --allelic-counts {het_allelic_counts} \
            --segments {modeled_segments} \
            --sequence-dictionary {ref_fasta_dict} \
            --minimum-contig-length 1000000 \
            --output {output_dir_} \
            --output-prefix {sample_name} \n""".format(**config_dict))
        # PlotModeledSegmentsNormal
        f.write(
            """{gatk} --java-options "-Xmx{java_mem}G" PlotModeledSegments \
            --denoised-copy-ratios {denoised_copy_ratios} \
            --allelic-counts {het_allelic_counts} \
            --segments {modeled_segments} \
            --sequence-dictionary {ref_fasta_dict} \
            --minimum-contig-length 1000000 \
            --output {output_dir_} \
            --output-prefix {sample_name} \n""".format(**config_dict))
        # CNVFuncotateSegments
        f.write("""{gatk} --java-options "-Xmx{java_mem}G" FuncotateSegments \
             --data-sources-path $DATA_SOURCES_FOLDER \
             --ref-version {funcotator_ref_version} \
             --output-file-format SEG \
             -R {ref_fasta} \
             --segments {input_seg_file} \
             -O {basename_input_seg_file}.funcotated.tsv \
             {interval_list_arg} " interval_list} \
             {"--transcript-selection-mode " + transcript_selection_mode} \
             {transcript_selection_arg}" sep=" --transcript-list " transcript_selection_list} \
             {annotation_def_arg}" sep=" --annotation-default " annotation_defaults} \
             {annotation_over_arg}" sep=" --annotation-override " annotation_overrides} \
             {excluded_fields_args}" sep=" --exclude-field " funcotator_excluded_fields} \
             {extra_args_arg}""".format(**config_dict))
Beispiel #40
0
                #restore default handler for child process so it doesn't try to do shutdown.
                for sig in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):
                    signal.signal(sig, signal.SIG_DFL)
                setproctitle.setproctitle(setproctitle.getproctitle() + " " +
                                          self.workername)
                consumer = event_consumer.EventConsumer(*self.args)
                q.application.appname = "../%s/eventconsumer/%s" % (
                    self.appname, self.workername)
                q.application.start()
                try:
                    consumer.consume()
                except Exception, e:
                    q.logger.log("Consumer process died: %s" % e)
                finally:
                    q.application.stop(0)
                os.exit(0)

            self.pid = pid

    for workerPool in q.system.fs.listDirsInDir(workersPool):
        workerName = q.system.fs.getBaseName(workerPool)
        cfgFilePath = q.system.fs.joinPaths(workerPool, "consumer.cfg")
        if not q.system.fs.exists(cfgFilePath):
            continue

        cfgFile = q.tools.inifile.open(cfgFilePath)
        workers = cfgFile.getIntValue('main', 'workers')
        bindingKey = cfgFile.getValue('main', 'eventKey')
        enabled = True
        if cfgFile.checkParam('main', 'enabled'):
            enabled = cfgFile.getBooleanValue('main', 'enabled')
Beispiel #41
0
def main():
    global args
    parser = argparse.ArgumentParser(
        description='update cloudflare dns records')

    parser.add_argument('Domain',
                        metavar='domain',
                        type=str,
                        help='domain to update')

    parser.add_argument('--dryrun', action='store_true', help='dry run')

    parser.add_argument('--kind',
                        action='store',
                        type=str,
                        default='A',
                        help='type of record, e.g. A')

    parser.add_argument('--zone', action='store', type=str, help='dns zone')

    parser.add_argument('--id',
                        action='store',
                        type=str,
                        help='ID of record to update')

    parser.add_argument('--email', action='store', type=str, help='email')

    parser.add_argument('--api-token',
                        action='store',
                        type=str,
                        help='API token')

    parser.add_argument('--api-key', action='store', type=str, help='API key')

    parser.add_argument('--a-name',
                        action='store',
                        type=str,
                        help='record  A name')

    parser.add_argument('--a-ip',
                        action='store',
                        type=str,
                        help='record  A IP')

    args = parser.parse_args()

    domain = args.Domain

    if (args.api_token == None):
        atoken = os.getenv('CF_API_TOKEN')
        if (atoken == None):
            print("can't find CF_API_TOKEN")
            os.exit(1)
        args.api_token = atoken

    if (args.api_key == None):
        akey = os.getenv('CF_API_KEY')
        if (akey == None):
            print("can't find CF_API_KEY")
            os.exit(1)
        args.api_key = akey

    if (args.zone == None):
        azone = os.getenv('CF_ZONE_ID')
        if (azone == None):
            print("can't find CF_ZONE_ID")
            os.exit(1)
        args.zone = azone

    find_gcp_vms_ip(callback1)
Beispiel #42
0
 def game_quit(self):
     os.exit(0)
Beispiel #43
0
def main(argv=sys.argv):
    if len(argv) < 1:
        print('Can not reached line')
        os.exit(0)
    # Check binaries
    binaries = ['adb']
    check_binary(binaries)
    # Check dirs
    dirs = ['./output/mp4']
    check_dirs(dirs)
    # Clear env
    print('checked all binaries, dirs')

    # Get list of target apps
    if not os.path.exists('app_list.csv'):
        raise Exception('Need app_list.csv')
    app_list = list()
    with open('app_list.csv', 'r') as f:
        reader = csv.DictReader(f)
        for row in reader:
            print(row['package_name'])
            app_list.append(row['package_name'])
    # package_name = app_list[0]

    for package_name in app_list:
        pss = ['screenrecord']
        clear_env(pss)
        # clear cache without user data
        # adb shell pm clear APKNAME
        # adb shell run-as APKNAME rm -rf /data/data/APKNAME/cache/*
        # adb shell su - c rm - rf /data/data/com.amazon.mShop.android.shopping/cache/*
        command = 'adb shell su -c rm -rf /data/data/{0}/cache/*'.format(package_name)
        try:
            command_check(command)
        except subprocess.CalledProcessError:
            pass
        command = 'adb shell su -c rm /sdcard/*.xml'
        try:
            command_check(command)
        except subprocess.CalledProcessError:
            pass
        command = 'adb shell su -c rm /sdcard/*.mp4'
        try:
            command_check(command)
        except subprocess.CalledProcessError:
            pass
        command = 'adb shell su -c rm /sdcard/*.pcap'
        try:
            command_check(command)
        except subprocess.CalledProcessError:
            pass
        print('removed cache')

        # time_list
        timing_list = list()

        # execute screenrecord
        command = 'adb shell screenrecord /sdcard/{0}.mp4'.format(package_name)
        screenrecord_proc = command_popen(command)

        # launch app
        start_time = datetime.datetime.now()
        command = 'adb shell monkey -p {0} -c android.intent.category.LAUNCHER 1'.format(package_name)
        command_check(command)

        # sleep
        time.sleep(10)

        # stop app
        for index in range(0, 1):
            command = 'adb shell input keyevent KEYCODE_HOME'
            command_check(command)

        command = 'adb shell monkey -p {0} -c android.intent.category.LAUNCHER 1'.format(package_name)
        command_check(command)

        # sleep
        time.sleep(10)

        # stop app
        for index in range(0, 5):
            command = 'adb shell input keyevent KEYCODE_BACK'
            command_check(command)

        command = 'adb shell monkey -p {0} -c android.intent.category.LAUNCHER 1'.format(package_name)
        command_check(command)

        # sleep
        time.sleep(10)

        command = 'adb shell am force-stop {0}'.format(package_name)
        command_check(command)

        # terminate screenrecord
        screenrecord_proc.terminate()
        screenrecord_proc.kill()

        # why?
        pss = ['screenrecord']
        terminate_env(pss)

        time.sleep(5)

        # pull mp4
        command = 'adb pull /sdcard/{0}.mp4 ./output/mp4/'.format(package_name)
        command_check(command)
Beispiel #44
0
        description='Execute binary with some inherited sockets')
    parser.add_argument('-f',
                        '--fdname',
                        default='hello-world-svc',
                        help='fdnames of socket')
    args = parser.parse_args(argv)

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.set_inheritable(True)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
    s.bind(('127.0.0.1', 0))
    s.listen(16)

    os.system('inet-tool register %s' % (args.fdname))

    p = select.poll()
    p.register(s, select.POLLIN)

    while True:
        sockets = p.poll()
        for fd, _ in sockets:
            sd, _ = s.accept()
            sd.send(b"Hello world!\r\n")
            sd.close()


if __name__ == '__main__':
    i = main(sys.argv[1:])
    os.exit(i)
 def game_won(self):
     print(self.term.green('CONGRATULATIONS YOU PASS DE THE LEVEL\nPRESS ANY KEY + INTRO TO EXIT:'))
     input()
     os.exit()
Beispiel #46
0
def hybrid_mc():
    #Initialization
    #===================================
    pi_0   = heatbath_pi()
    phi_0  = np.zeros(pi_0.shape)
    
    a_t = np.append(N_saves,np.array(phi_0.shape))
    Saves=np.zeros(a_t)
    dE=np.zeros(N_saves)
    #===================================
    
    H0=H_MD(phi_0,pi_0)
    print(H0, 'H0')    

    
    rej=0
    temprej=0
    i=0
    while (i<N_therm):
        phi_new,pi_new = leapfrog(phi_0,pi_0,Tmax_MD)
        H_new          = H_MD(phi_0,pi_0)
        
        deltaH = H_new - H0
        P_acc = np.exp(-deltaH)
        if (np.random.rand()<=P_acc):
            #print(H_new,'H_new',P_acc,'exp dH','ACCEPTED SAVE %.3f'%(i/N_saves),iii)
            H0 =  H_new
            phi_0 = phi_new
            temprej=0
            i+=1
        else:
            #print(H_new,'H_new',P_acc,'exp dH','REJECTED SAVE')
            temprej+=1 
            if temprej>rejmax:
                os.exit()
        pi_0 = heatbath_pi()  
        #----------------------------------------------
    #print('saving',iii)
    i=0
    while (i<N_saves):
        #Thermalizing
        phi_0,pi_0,H_0=thermalize(phi_0,pi_0,H0,T_therm)
        #---------------------------------------
        #now saving
        #---------------------------------------------        
        phi_new,pi_new = leapfrog(phi_0,pi_0,Tmax_MD)
        H_new          = H_MD(phi_0,pi_0)
        
        deltaH = H_new - H0
        P_acc = np.exp(-deltaH)
        if (np.random.rand()<=P_acc):
            print(H_new,'H_new',P_acc,'exp dH','ACCEPTED SAVE %.3f'%(i/N_saves),iii)
            H0 =  H_new
            phi_0 = phi_new
            Saves[i]=phi_new
            dE[i] = P_acc
            temprej=0
            i+=1
        else:
            print(H_new,'H_new',P_acc,'exp dH','REJECTED SAVE')
            temprej+=1 
            rej +=1
            if temprej>rejmax:
                os.exit()
        pi_0 = heatbath_pi()  
        #----------------------------------------------
        
        
        
    rate = (N_saves/(rej+N_saves))
    return(Saves,rate,dE)
Beispiel #47
0
def report_error(message):
    messagebox.showinfo("FEL!", message)
    os.exit(1)
Beispiel #48
0
def usage():
    print("gen.py fx|fx_fwd|eq_fwd")
    os.exit(1)
Beispiel #49
0
def main():
  if sys.platform[:5] != 'linux': # Run this only on Linux
    print 'This script is supported only on Linux'
    os.exit(1)

  # Command line parsing
  parser = optparse.OptionParser()
  parser.add_option('-p',
                    '--platform',
                    dest='platform',
                    default=None,
                    help=('Platform that the locv file was generated on. Must'
                          'be one of {win32, linux2, linux3, macosx}'))
  parser.add_option('-s',
                    '--source',
                    dest='src_dir',
                    default=None,
                    help='Path to the source code and symbols')
  parser.add_option('-d',
                    '--dash_root',
                    dest='dash_root',
                    default=None,
                    help='Root directory for the dashboard')
  parser.add_option('-l',
                    '--lcov',
                    dest='lcov_path',
                    default=None,
                    help='Location of the LCOV file to process')
  parser.add_option('-u',
                    '--post_url',
                    dest='post_url',
                    default=None,
                    help='Base URL of the coverage dashboard')
  (options, args) = parser.parse_args()

  if options.platform == None:
    parser.error('Platform not specified')
  if options.lcov_path == None:
    parser.error('lcov file path not specified')
  if options.src_dir == None:
    parser.error('Source directory not specified')
  if options.dash_root == None:
    parser.error('Dashboard root not specified')
  if options.post_url == None:
    parser.error('Post URL not specified')
  if options.platform == 'win32':
    CleanWin32Lcov(options.lcov_path, options.src_dir)
    percent = GenerateHtml(options.lcov_path, options.dash_root)
    if percent == None:
      # TODO(niranjan): Add logging.
      print 'Failed to generate code coverage'
      os.exit(1)
    else:
      # TODO(niranjan): Do something with the code coverage numbers
      pass
  else:
    print 'Unsupported platform'
    os.exit(1)

  # Prep coverage results for dashboard and post new set.
  parsed_data = ParseCoverageDataForDashboard(options.lcov_path)
  PostResultsToDashboard(options.lcov_path, parsed_data, options.post_url)
Beispiel #50
0
def main():
    events = []

    try:
        opts, args = getopt.getopt(sys.argv[1:], "krwoa")
        opts = dict(opts)

        while args and args[0] != "--":
            event = args[0]
            parameter = None
            if ":" in event:
                event, parameter = event.split(":", 1)

            if event and event[0] == "~":
                negate = True
                event = event[1:]
            else:
                negate = False

            handler = None
            for cls in OnEvent.__subclasses__():
                if cls.PREFIX == event:
                    handler = cls
                    break

            if handler:
                events.append((handler, parameter, negate))
                args.pop(0)
            else:
                break

        if args and args[0] == "--":
            args.pop(0)

        action = args
        assert events
    except:
        print_help()
        sys.exit(1)

    if action and "sudo" in action[0]:
        init_sudo(action[0])

    global_condition = threading.Condition(threading.RLock())
    event_objects = []
    for handler, parameter, negate_condition in events:
        instance = handler(parameter,
                           negate_condition=negate_condition,
                           global_condition=global_condition)
        event_objects.append(instance)

    ptarget = subprocess.PIPE if "-o" in opts else None
    proc = None
    while True:
        global_condition.acquire()
        global_condition.wait(
            99999
        )  # timeout required for C-c to work in Py 2.x, see python issue 8844
        condition_met = (all if "-a" in opts else any)(
            (x.is_event_set() for x in event_objects))
        global_condition.release()
        if not condition_met:
            continue

        if "-k" in opts and proc:
            if proc.poll() is None:
                status(1, "on", "Killing old action instance %d" % proc.pid)
                os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
                proc.terminate()
                proc.wait()
        if action:
            if "-r" not in opts and "-o" not in opts:
                if not is_executable(action[0]):
                    action = ["/bin/sh", "-c"] + action
                os.execvp(action[0], action)
                status(2, "on", "Failed to execute command")
                os.exit(1)
            if is_executable(action[0]):
                proc = subprocess.Popen(action,
                                        stdout=ptarget,
                                        stderr=ptarget,
                                        preexec_fn=preexec_fn)
            else:
                proc = subprocess.Popen(action,
                                        shell=True,
                                        stdout=ptarget,
                                        stderr=ptarget,
                                        preexec_fn=preexec_fn)
            if ptarget:
                proc_thread = threading.Thread(target=format_output_thread,
                                               args=(proc, ))
                proc_thread.start()
                if "-w" in opts:
                    proc.wait()
            else:
                if "-w" in opts:
                    proc.communicate()
        if "-r" not in opts:
            break

        global_condition.acquire()
        for x in event_objects:
            x.reset()
        global_condition.release()
Beispiel #51
0
def DECoNparser(PATHS, logger_name, q, txt):
    input_path = PATHS["DIR_TREE"][6]
    out_dir = PATHS["DIR_TREE"][5]
    main_out = PATHS["DIR_TREE"][0]
    anno_pkl = cfg.annotCNV
    targets_pkl = cfg.bedCNV
    targets_excel = cfg.cnvTargets
    fail_file = input_path + "/DECoN_Failures.txt"
    custom_file = input_path + "/DECoN_custom.txt"
    sex_path = input_path + "/sex.txt"

    cnv_logger = logging.getLogger(logger_name)
    cnv_logger.info('DECoNparser session started')
    cnv_logger.info('input path: {}'.format(input_path))

    def newPickle():
        annotation = pd.read_excel(targets_excel, sheet_name=cfg.CNVAnno)
        annotation['del_length'] = annotation['Stop'] - annotation['Start']

        targetsExon_raw = pd.read_excel(targets_excel,
                                        sheet_name=cfg.CNVtargets)
        targetsExon_raw.index = np.arange(1, len(targetsExon_raw) + 1)
        targetsExon = targetsExon_raw.dropna()

        annotation.to_pickle(anno_pkl)
        targetsExon.to_pickle(targets_pkl)
        cnv_logger.info('Created new pickles')
        return annotation, targetsExon

    def oldPickle():
        try:
            annotation = pd.read_pickle(anno_pkl)
            targetsExon = pd.read_pickle(targets_pkl)
            cnv_logger.info('Using pickles')
            return annotation, targetsExon
        except Exception as e:
            cnv_logger.error("Missing pickles in path\n{}".format(e))
            print("Missing pickles in path")
            os.exit(1)

    if os.path.exists(targets_excel) and os.path.exists(targets_pkl):
        if os.path.getmtime(targets_excel) > os.path.getmtime(targets_pkl):
            cnv_logger.info('New targets excel file')
            annotation, targetsExon = newPickle()
        else:
            annotation, targetsExon = oldPickle()
    elif os.path.exists(targets_excel) and not os.path.exists(targets_pkl):
        cnv_logger.info('No pickles')
        annotation, targetsExon = newPickle()
    else:
        annotation, targetsExon = oldPickle()

    try:
        sex_file = open(sex_path, 'w')
    except Exception as e:
        cnv_logger.error("Unable to create gender file\n{}".format(e))
        print("Unable to create gender file")
        os.exit(1)

    for filename in os.listdir(PATHS["BAM_PATH"]):
        if filename.endswith(".bam"):
            file_path = "{}/{}".format(PATHS["BAM_PATH"], filename)
            try:
                out = subprocess.check_output(sam_cmd.format(file_path),
                                              stderr=subprocess.STDOUT,
                                              shell=True)
            except:
                try:
                    out = subprocess.check_output(
                        sam_cmd_michal.format(file_path),
                        stderr=subprocess.STDOUT,
                        shell=True)
                except:
                    print("Unable to run samtools {}".format(filename))
                    cnv_logger.info(
                        "Unable to run samtools {}".format(filename))
                    exit(1)

            out = out.decode('utf-8')
            sex_file.write("{}\n".format(filename.split('.bam')[0]))
            sex_file.write("{}".format(out))
        else:
            continue

    sex_file.close()

    try:
        sex_raw = pd.DataFrame(pd.read_csv(sex_path, sep=' ', header=None))
    except:
        print("Unable to open sex.txt...")
        cnv_logger.error("Unable to open sex.txt")
        os.exit(1)

    sex_mask = sex_raw[0].str.contains("chr")
    depth = sex_raw[sex_mask]
    sample = sex_raw[~sex_mask]
    sample = sample.rename(columns={0: 'Sample'})
    depth = pd.DataFrame(depth[0].apply(SampleDepth), columns=[0])
    depth.loc[:, 0] = depth[0].apply(returnSex)
    depth = depth.rename(columns={0: 'Gender'})
    depth.index -= 1
    sampleSex = sample.join(depth, sort=False)
    cnv_frame = sampleSex.fillna("Female")
    cnv_logger.info('Cnv frame created')

    try:
        failures = pd.DataFrame(pd.read_csv(fail_file, sep='\t'))
    except:
        print("Unable to locate DECoN_Failures.txt...")
        cnv_logger.error("Unable to locate DECoN_Failures.txt")
        exit(0)
    failures = failures[~failures.Gene.str.contains("gene")]
    failures.set_index('Exon', inplace=True)

    failed_exon_mask = failures.Type == "Whole exon"
    failures_exon = failures[failed_exon_mask]
    failures_sample = failures[~failed_exon_mask]

    # print(failures_sample)
    failed_samples = []
    if not failures_sample.empty:
        for index, row in failures_sample.iterrows():
            with open("{}/errors.log".format(main_out), 'a+') as f:
                f.write("Sample {} failed during CNV detection\n".format(
                    row.Sample))
                print("Sample {} failed during CNV detection".format(
                    row.Sample))
                tools.put_text(
                    "Sample {} failed during CNV detection".format(row.Sample),
                    q, txt)
                failed_samples.append(row.Sample)

    failures_exon.insert(len(failures_exon.columns), 'FPKM',
                         failures_exon.Info.apply(getFPKM))
    failures_exon = failures_exon.drop(columns=['Type', 'Info'])

    failures_sample.insert(0, 'Sample_short',
                           failures_sample.Sample.apply(SampleName))
    failures_sample.insert(len(failures_sample.columns), 'Info_list',
                           failures_sample.Info.apply(getInfo))
    tags = failures_sample.Info_list.apply(pd.Series)
    try:
        tags.rename(columns={0: 'Correlation', 1: 'median FPKM'}, inplace=True)
    except:
        pass
    failures_sample = pd.concat([failures_sample[:], tags[:]], axis=1)
    failures_sample.drop(columns=['Info', 'Info_list', 'Type', 'Sample'],
                         inplace=True)
    failures_sample.rename(columns={'Sample_short': 'Sample'}, inplace=True)
    cnv_logger.info('Processed failing samples and exons')

    custom_failed_exons = pd.DataFrame()
    custom_exons = targetsExon.index.tolist()
    for index, row in failures_exon.iterrows():
        index = int(index)
        if index in custom_exons:
            cust_ex = int(targetsExon.loc[index]['Custom.Exons'])
            chr = targetsExon.loc[index].Chr
            start = targetsExon.loc[index].Start
            stop = targetsExon.loc[index].End
            temp_df = pd.DataFrame({
                'Sample': [row.Sample],
                'Gene': [row.Gene],
                'Exon': [cust_ex],
                'FPKM': [row.FPKM],
                'Chr': [chr],
                'Start': [start],
                'Stop': [stop]
            })
            custom_failed_exons = custom_failed_exons.append(temp_df)

    try:
        custom = pd.DataFrame(pd.read_csv(custom_file, sep='\t'))
    except:
        print("Unable to locate DECoN_Custom.txt...")
        cnv_logger.error("Unable to locate DECoN_Custom.txt")
        exit(0)
    custom.Sample = custom.Sample.apply(SampleName)
    custom.drop(columns=['CNV.ID', 'Start.b', 'End.b'], inplace=True)
    custom['cnv_length'] = custom.End - custom.Start

    cnv_frame_ext = pd.merge(custom, cnv_frame, on='Sample')
    cnv_frame_ext.insert(0, 'Genotype', np.nan, allow_duplicates=True)
    cnv_frame_ext.insert(0, 'AGID', np.nan, allow_duplicates=True)
    cnv_frame_ext.insert(0, 'Classification', np.nan, allow_duplicates=True)
    # cnv_frame_ext = cnv_frame_ext[(cnv_frame_ext['BF'] > bf_limit_min)]

    femaleDMD = cnv_frame_ext[(cnv_frame_ext.Gender == 'Female')
                              & (cnv_frame_ext.Gene == 'DMD')].copy(deep=True)
    femaleDMD.iloc[:, 1] = 'AG5062'
    femaleDMD.loc[femaleDMD['CNV.type'] == 'duplication', 'AGID'] = 'AG5100'
    femaleDMD.insert(len(femaleDMD.columns),
                     'Annotation1',
                     'DMD:Duchenne muscular dystrophy',
                     allow_duplicates=True)
    femaleDMD.reset_index(drop=True, inplace=True)
    femaleDMD = femaleDMD[(femaleDMD['BF'] > bf_limit_SMN_DMD)]

    del_notDMD = cnv_frame_ext[(cnv_frame_ext['CNV.type'] == 'deletion')
                               & (cnv_frame_ext.Gene != 'DMD')]

    del_cftr = del_notDMD[(del_notDMD.Gene == 'CFTR')]
    del_cftr = del_cftr[(del_cftr['BF'] > bf_limit_min)]
    del_notDMD = del_notDMD[~(del_notDMD.Gene == 'CFTR')]

    del_smn = del_notDMD[(del_notDMD.Gene == 'SMN1')].copy(deep=True)
    del_smn.iloc[:, 1] = 'AG3522'
    del_smn = del_smn[(del_smn['BF'] > bf_limit_SMN_DMD)]
    del_smn.insert(len(del_smn.columns),
                   'Annotation1',
                   'SMN1:Spinal muscular atrophy-1',
                   allow_duplicates=True)
    del_smn.reset_index(drop=True, inplace=True)
    del_notDMD = del_notDMD[~(del_notDMD.Gene == 'SMN1')]
    del_notDMD = del_notDMD[(del_notDMD['BF'] > bf_limit_min)]

    for index, row in del_notDMD.iterrows():
        row_first = row['Custom.first']
        row_last = row['Custom.last']
        exons = row_first, row_last
        subset = annotation.where(annotation.Gene == row.Gene)
        subset.dropna(inplace=True, how='all')
        subset.reset_index(drop=True, inplace=True)
        if subset.empty:
            del_notDMD.drop(index, inplace=True)
            continue
        if subset.shape[0] > 1:
            first = int(subset.loc[0, 'Custom.first']), int(
                subset.loc[0, 'Custom.last'])
            second = int(subset.loc[1, 'Custom.first']), int(
                subset.loc[1, 'Custom.last'])
            points = point.nearest(exons, [first, second])
            subset = subset.where(subset['Custom.first'] == points[0])
            subset = subset.where(subset['Custom.last'] == points[1])
            subset.dropna(inplace=True, how='all')
            subset.reset_index(drop=True, inplace=True)
        row.Gene = str(subset.loc[0, 'Annotation1']).split(':')[0]
        anno_first = int(subset.loc[0, 'Custom.first'])
        anno_last = int(subset.loc[0, 'Custom.last'])
        range_first = [anno_first - 1, anno_first + 2]
        range_last = [anno_last - 1, anno_last + 2]
        if row_first not in range(range_first[0],
                                  range_first[1]) and row_last not in range(
                                      range_last[0], range_last[1]):
            del_notDMD.loc[
                index,
                'Classification'] = 'Big Del Boundaries Different as Reported'
        if int(row.BF) < bf_limit_min:
            del_notDMD = del_notDMD.drop(index)
        else:
            del_notDMD.loc[index, 'AGID'] = subset.loc[0, 'AGID']
            del_notDMD.loc[index, 'Annotation1'] = subset.loc[0, 'Annotation1']
    del_notDMD.reset_index(drop=True, inplace=True)

    cftr_subset = annotation.where(annotation.Gene == 'CFTR')
    cftr_subset.dropna(inplace=True)
    for index, row in del_cftr.iterrows():
        try:
            subset = cftr_subset.where(
                cftr_subset['Custom.first'] == row['Custom.first'])
            subset = cftr_subset.where(
                cftr_subset['Custom.last'] == row['Custom.last'])
            subset.dropna(inplace=True)
            subset.reset_index(drop=True, inplace=True)
            if int(row.BF) < bf_limit_min:
                del_cftr.drop(index, inplace=True)
            else:
                del_cftr.loc[index, 'AGID'] = subset.loc[0, 'AGID']
                del_cftr.loc[index, 'Annotation1'] = subset.loc[0,
                                                                'Annotation1']
        except:
            try:
                subset = cftr_subset.where(
                    cftr_subset['Custom.first'] == row['Custom.first'])
                subset.dropna(inplace=True)
                subset.reset_index(drop=True, inplace=True)
                if int(row.BF) < bf_limit_min:
                    del_cftr.drop(index, inplace=True)
                else:
                    del_cftr.loc[index, 'AGID'] = subset.loc[0, 'AGID']
                    del_cftr.loc[index,
                                 'Annotation1'] = subset.loc[0, 'Annotation1']
            except:
                subset = cftr_subset.where(
                    cftr_subset['Custom.last'] == row['Custom.last'])
                subset.dropna(inplace=True)
                subset.reset_index(drop=True, inplace=True)
                if int(row.BF) < bf_limit_min:
                    del_cftr.drop(index, inplace=True)
                else:
                    del_cftr.loc[index, 'AGID'] = subset.loc[0, 'AGID']
                    del_cftr.loc[index,
                                 'Annotation1'] = subset.loc[0, 'Annotation1']
            if (row['Custom.first'] != subset.loc[0,'Custom.first'] and row['Custom.first'] != (subset.loc[0,'Custom.first']+1)) \
             or (row['Custom.last'] != subset.loc[0,'Custom.last'] and row['Custom.last'] != (subset.loc[0,'Custom.last']-1)):
                del_cftr.loc[
                    index,
                    'Classification'] = 'Big Del Boundaries Different as Reported'
    del_cftr.reset_index(drop=True, inplace=True)

    cnv = pd.concat([del_notDMD, del_cftr, femaleDMD, del_smn], sort=False)
    cnv.reset_index(drop=True, inplace=True)
    for index, row in cnv.iterrows():
        if int(row.BF) < bf_limit_max:
            tag_clas(cnv, row, index, 'CNV-Problem')
        else:
            tag_clas(cnv, row, index, 'CNV')

        if row['Reads.ratio'] <= 0.08:
            cnv.loc[index, 'Genotype'] = 'hom'
        elif row['Reads.ratio'] > 0.29 and row['Reads.ratio'] < 0.71:
            cnv.loc[index, 'Genotype'] = 'het'
        elif row['Reads.ratio'] >= 0.71 and row['Reads.ratio'] < 1.3:
            cnv.loc[index, 'Genotype'] = 'het'
            tag_clas(cnv, row, index, 'CNV-Problem')
        elif row['Reads.ratio'] >= 1.3 and row['Reads.ratio'] < 1.71:
            cnv.loc[index, 'Genotype'] = 'het'
        else:
            cnv.loc[index, 'Genotype'] = 'het'
            tag_clas(cnv, row, index, 'CNV-Problem')

        if row.Sample in failures_sample.Sample.tolist():
            cnv.loc[index, 'Info'] = 'Failed Sample'
            tag_clas(cnv, row, index, 'CNV-Problem')

    cnv.reset_index(drop=True, inplace=True)
    cnv.rename(columns={
        'End': 'Observed Stop',
        'Start': 'Observed Start'
    },
               inplace=True)
    cnv_logger.info('Processed CNVs')

    cnv.insert(0, 'Annotation2', np.nan)
    cnv.insert(0, 'Annotation3', np.nan)
    cnv.insert(0, 'Annotation4', np.nan)
    cnv.insert(0, 'gdna', np.nan)
    cnv.insert(0, 'Clalit Disease Makat', np.nan)
    cnv.insert(0, 'Clalit Mutation Makat', np.nan)
    for index, row in cnv.iterrows():
        subset = annotation.where(annotation['AGID'] == row['AGID'])
        subset.dropna(inplace=True, how='all')
        subset.reset_index(drop=True, inplace=True)
        cnv.loc[index, 'Annotation2'] = subset.loc[0, 'Annotation2']
        cnv.loc[index, 'Annotation3'] = subset.loc[0, 'Annotation3']
        cnv.loc[index, 'Annotation4'] = subset.loc[0, 'Annotation4']
        cnv.loc[index, 'gdna'] = subset.loc[0, 'gdna']
        cnv.loc[index,
                'Clalit Disease Makat'] = subset.loc[0, 'Clalit Disease Makat']
        cnv.loc[index,
                'Clalit Mutation Makat'] = subset.loc[0,
                                                      'Clalit Mutation Makat']

    cnv_excel = cnv.reindex(columns=[
        'Sample', 'AGID', 'Annotation1', 'CNV.type', 'Gene', 'Genotype',
        'Custom.first', 'Custom.last', 'Chromosome', 'Observed Start',
        'Observed Stop', 'Correlation', 'N.comp', 'BF', 'Reads.expected',
        'Reads.observed', 'Reads.ratio', 'Gender', 'Classification', 'Info',
        'Annotation2', 'Annotation3', 'Annotation4', 'gdna',
        'Clalit Disease Makat', 'Clalit Mutation Makat'
    ])
    cnv_excel.sort_values(by=['Classification'], inplace=True)

    cnv_tsv = pd.merge(cnv, cnv_frame, on=['Sample', 'Gender'], how='right')
    cnv_tsv = cnv_tsv.reindex(columns=[
        'Sample', 'AGID', 'Annotation1', 'CNV.type', 'Gene', 'Genotype',
        'Custom.first', 'Custom.last', 'Chromosome', 'Observed Start',
        'Observed Stop', 'Correlation', 'N.comp', 'BF', 'Reads.expected',
        'Reads.observed', 'Reads.ratio', 'Gender', 'Classification', 'Info',
        'Annotation2', 'Annotation3', 'Annotation4', 'gdna',
        'Clalit Disease Makat', 'Clalit Mutation Makat'
    ])

    # cnv_tsv.to_csv(out_dir + 'DECoN_results.tsv', sep='\t', encoding='utf-8', index=False)
    # cnv_logger.info('Generated tsv')
    tools.compressed_pickle("{}/{}".format(out_dir, cfg.FullCNV), cnv_tsv)
    cnv_logger.info('Generated compressed DECoN results')

    pickles = cfg.CNVPickles
    dfs = {
        pickles[0]: cnv_excel,
        pickles[1]: failures_sample,
        pickles[2]: custom_failed_exons
    }
    for name, df in dfs.items():
        # df.to_pickle('{}/{}.pkl'.format(input_path, name))
        tools.compressed_pickle('{}/{}'.format(input_path, name), df)
        cnv_logger.info("Pickled compressed {}".format(name))

    # writer = pd.ExcelWriter(out_dir + 'CNV-DECoN.xlsx', engine='xlsxwriter')
    # cnv_excel.to_excel(writer, sheet_name='Calls', index=False)
    # failures_sample.to_excel(writer, sheet_name='Failures_samples', index=False)
    # custom_failed_exons.to_excel(writer, sheet_name='Failures_exons', index=False)
    # writer.save()
    # cnv_logger.info('Generated excel')
    cnv_logger.info('Completed DECoNParser')

    # print("CNV detection completed!")
    return (failed_samples)
Beispiel #52
0
    "names.c",
]
GENRULE_TEMPLATE = """
genrule(
    name = "{name}",
    outs = ["{filename}"],
    cmd = r\"\"\"
cat << 'EOF' > $@
{body}
EOF
\"\"\",
)
"""

if not os.path.exists('/dbxce'):
    os.exit('must run in container')

subprocess.check_call(['tar', 'xf', NCURSES_VERSION + ".tar.gz"])
subprocess.check_call(['tar', 'xf', DRTE_BUILD_SYSROOT])
CC = os.path.realpath('root/bin/gcc')

if os.path.exists('build'):
    shutil.rmtree('build')
os.mkdir('build')

os.chdir('build')

subprocess.check_call([
    os.path.join(SRC_DIR, 'configure'),
    "--without-debug",
    "--disable-rpath",
def quit():
    os.exit()
Beispiel #54
0
    def run_auto_java_DB_func(self):
        #这个标识用来是否更新标识表,和插入跑批数据
        auto_java_flag = False

        print("[%s] DB not auto java! will run ... " % (self.name))
        sys.stdout.flush()

        ##取跑批开始时间
        start_date_time_stamp = (datetime.datetime.now() +
                                 datetime.timedelta(-0))
        ## 跑批时间的秒格式
        start_date_time_stamp_second = start_date_time_stamp
        start_date_time_stamp = start_date_time_stamp.strftime(
            "%Y/%m/%d %H:%M:%S")

        #正式调用java程序跑批
        arg1 = "java -jar -Xms512m -Xmx1024m"
        arg2 = "/server/scripts/auto_java_DB_everyday/auto_java_properties/freight20161219.jar"
        arg3 = "/server/scripts/auto_java_DB_everyday/auto_java_properties/"
        arg3 = "%s%s.properties" % (arg3, self.name)
        arg4 = time_stamp

        cmd = "%s %s %s %s" % (arg1, arg2, arg3, arg4)

        print(cmd)
        sys.stdout.flush()

        try:
            auto_java_flag = True
            a = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
            for l1 in a.stdout.readlines():
                l1 = l1.strip()
                l1 = l1.decode()

        except Exception:
            pass
            # print("can't run auto java,pls check the code")

    #计算出跑批完成时间
        end_date_time_stamp = (datetime.datetime.now() +
                               datetime.timedelta(-0))
        end_date_time_stamp_second = end_date_time_stamp
        end_date_time_stamp = end_date_time_stamp.strftime("%Y/%m/%d %H:%M:%S")
        # print("[%s] auto java end time:%s" % (self.name, end_date_time_stamp))

        #计算出跑批时间
        cost_time = end_date_time_stamp_second - start_date_time_stamp_second
        cost_time = (cost_time.seconds / 60)
        cost_time = round(cost_time, 2)

        if auto_java_flag:
            #写入跑批标识标(本地mysql)
            print("写入跑批标识标(本地mysql)")
            self.sql.insert_into_sql_func(start_date_time_stamp,
                                          end_date_time_stamp, cost_time)

            #写入跑批标识表(oracle,孙振等看)
            print("写入跑批标识表(oracle,孙振等看")
            self.sql.update_sql_func(time_stamp, end_date_time_stamp)
        else:
            print("auto java is fail, will exit!")
            os.exit()
Beispiel #55
0
 def __init__(self, message=""):
     print(f"\033[31m\033[1m{self.__class__.__name__}:\033[0m {message}")
     exit(0)
Beispiel #56
0
    def get_damage(self, node_dataset, link_dataset, tornado_dataset,
                   tornado_id):
        """

        Args:
            node_dataset (obj): Node dataset.
            link_dataset (obj): Link dataset.
            tornado_dataset (obj): Tornado dataset.
            tornado_id (str): Tornado id.

        """
        self.set_tornado_variables(tornado_dataset)
        self.set_node_variables(node_dataset)

        # get fragility curves set - tower for transmission, pole for distribution
        fragility_set_tower = FragilityCurveSet(
            self.fragilitysvc.get_dfr3_set(self.fragility_tower_id))
        assert fragility_set_tower.id == self.fragility_tower_id
        fragility_set_pole = FragilityCurveSet(
            self.fragilitysvc.get_dfr3_set(self.fragility_pole_id))
        assert fragility_set_pole.id == self.fragility_pole_id

        # network test
        node_id_validation = NetworkUtil.validate_network_node_ids(
            node_dataset, link_dataset, self.fromnode_fld_name,
            self.tonode_fld_name, self.nodenwid_fld_name)
        if node_id_validation is False:
            print(
                "ID in from or to node field doesn't exist in the node dataset"
            )
            os.exit(0)

        # getting network graph and node coordinates
        is_directed_graph = True

        graph, node_coords = NetworkUtil.create_network_graph_from_field(
            link_dataset, self.fromnode_fld_name, self.tonode_fld_name,
            is_directed_graph)

        # reverse the graph to acculate the damage to next to node
        graph = nx.DiGraph.reverse(graph, copy=True)

        # check the connection as a list
        connection_sets = []
        if is_directed_graph:
            connection_sets = list(nx.weakly_connected_components(graph))
        else:
            connection_sets = list(nx.connected_components(graph))

        # check the first node of the each network line, this first node should lead each separated network
        # also convert connection set to list
        first_node_list = []
        connection_list = []
        for c in connection_sets:
            connection_list.append(list(c))
            first_node_list.append(list(c)[0])

        intersection_list = []
        poly_list = []
        totalcost2repair = []
        totalpoles2repair = []
        totaltime2repair = []

        # construct guid field
        guid_list = []
        nodenwid_list = []
        for node_feature in node_dataset:
            # get guid colum
            guid_fld_val = ''
            if self.guid_fldname.lower() in node_feature['properties']:
                guid_fld_val = node_feature['properties'][
                    self.guid_fldname.lower()]
            elif self.guid_fldname in node_feature['properties']:
                guid_fld_val = node_feature['properties'][self.guid_fldname]
            guid_list.append(guid_fld_val)

            # get nodenwid colum
            nodenwid_fld_val = ''
            if self.nodenwid_fld_name.lower() in node_feature['properties']:
                nodenwid_fld_val = int(
                    node_feature['properties'][self.nodenwid_fld_name.lower()])
            elif self.nodenwid_fld_name in node_feature['properties']:
                nodenwid_fld_val = int(
                    node_feature['properties'][self.nodenwid_fld_name])
            nodenwid_list.append(nodenwid_fld_val)

        for z in range(self.nmcs):
            nodedam = [
                0
            ] * self.nnode  # placeholder for recording number of damaged pole for each node
            noderepair = [
                0
            ] * self.nnode  # placeholder for recording repair cost for each node
            poles2repair = [
                0
            ] * self.nnode  # placeholder for recording total number of poles to repair
            cost2repairpath = [
                0
            ] * self.nnode  # placeholder for recording total repair cost for the network
            time2repairpath = [
                0
            ] * self.nnode  # placeholder for recording total repair time for the network
            nodetimerep = [0] * self.nnode
            hazardval = [[
                0
            ]] * self.nnode  # placeholder for recording hazard values
            demandtypes = [[
                ""
            ]] * self.nnode  # placeholder for recording demand types
            demandunits = [[
                ""
            ]] * self.nnode  # placeholder for recording demand units

            # iterate link
            for line_feature in link_dataset:
                ndamage = 0  # number of damaged poles in each link
                repaircost = 0  # repair cost value
                repairtime = 0  # repair time value
                to_node_val = ""
                linetype_val = ""
                tor_hazard_values = [0]  # random wind speed in EF
                demand_types = [""]
                demand_units = [""]

                if self.tonode_fld_name.lower() in line_feature['properties']:
                    to_node_val = line_feature['properties'][
                        self.tonode_fld_name.lower()]
                elif self.tonode_fld_name in line_feature['properties']:
                    to_node_val = line_feature['properties'][
                        self.tonode_fld_name]

                if self.linetype_fld_name in line_feature['properties']:
                    linetype_val = line_feature['properties'][
                        self.linetype_fld_name]
                elif self.linetype_fld_name.lower(
                ) in line_feature['properties']:
                    linetype_val = line_feature['properties'][
                        self.linetype_fld_name.lower()]

                line = shape(line_feature['geometry'])

                # iterate tornado
                for tornado_feature in tornado_dataset:
                    resistivity_probability = 0  # resistivity value at the point of windSpeed
                    random_resistivity = 0  # random resistivity value between 0 and one

                    sim_fld_val = ""
                    ef_fld_val = ""

                    # get EF rating and simulation number column
                    if self.tornado_sim_field_name.lower(
                    ) in tornado_feature['properties']:
                        sim_fld_val = int(tornado_feature['properties'][
                            self.tornado_sim_field_name.lower()])
                    elif self.tornado_sim_field_name in tornado_feature[
                            'properties']:
                        sim_fld_val = int(tornado_feature['properties'][
                            self.tornado_sim_field_name])

                    if self.tornado_ef_field_name.lower(
                    ) in tornado_feature['properties']:
                        ef_fld_val = tornado_feature['properties'][
                            self.tornado_ef_field_name.lower()]
                    elif self.tornado_ef_field_name in tornado_feature[
                            'properties']:
                        ef_fld_val = tornado_feature['properties'][
                            self.tornado_ef_field_name]

                    if sim_fld_val == "" or ef_fld_val == "":
                        print(
                            "unable to convert tornado simulation field value to integer"
                        )
                        sys.exit(0)

                    # get Tornado EF polygon
                    # assumes that the polygon is not a multipolygon
                    poly = shape(tornado_feature['geometry'])
                    poly_list.append(poly)

                    # loop for ef ranges
                    for f in range(self.tornado_ef_rate):
                        npoles = 0  # number of poles in tornado ef box
                        poleresist = 0  # pole's resistance value
                        # setting EF rate value string to match in the tornado dataset's attribute table
                        ef_content = "EF" + str(f)

                        # compute the intersections between link line and ef polygon
                        # also figure out the length of the line that ovelapped with EF box

                        # compute the intersection between tornado polygon and line
                        if sim_fld_val == z and ef_fld_val.lower(
                        ) == ef_content.lower():
                            if poly is not None and line is not None:
                                if poly.intersects(line):
                                    intersection = poly.intersection(line)
                                    any_point = None
                                    intersection_length = intersection.length
                                    if intersection.length > 0:
                                        # print(intersection.__class__.__name__)
                                        # calculate the length of intersected line
                                        # since this is a geographic, it has to be projected to meters to be calcuated
                                        inter_length_meter = GeoUtil.calc_geog_distance_from_linestring(
                                            intersection)
                                        if isinstance(intersection,
                                                      MultiLineString):
                                            intersection_list.append(
                                                intersection)
                                            for inter_line in intersection.geoms:
                                                any_point = inter_line.centroid
                                                break
                                        elif isinstance(
                                                intersection, LineString):
                                            intersection_list.append(
                                                intersection)
                                            any_point = intersection.centroid

                                            # also, random point can be possible
                                            # by changing the following lines value 0.5
                                            # any_point = intersection.interpolate(0.5, normalized=True)

                                    if any_point is not None:
                                        # check if any_point is in the polygon
                                        if poly.contains(any_point) is False:
                                            # this is very hardly happen but should be needed just in case
                                            any_point = poly.centroid

                                    # check if the line is tower or transmission
                                    if linetype_val.lower(
                                    ) == self.line_transmission:
                                        fragility_set_used = fragility_set_tower
                                    else:
                                        fragility_set_used = fragility_set_pole

                                    values_payload = [{
                                        "demands": [
                                            x.lower() for x in
                                            fragility_set_used.demand_types
                                        ],
                                        "units": [
                                            x.lower() for x in
                                            fragility_set_used.demand_units
                                        ],
                                        "loc":
                                        str(any_point.coords[0][1]) + "," +
                                        str(any_point.coords[0][0])
                                    }]

                                    h_vals = self.hazardsvc.post_tornado_hazard_values(
                                        tornado_id, values_payload,
                                        self.get_parameter('seed'))
                                    tor_hazard_values = AnalysisUtil.update_precision_of_lists(
                                        h_vals[0]["hazardValues"])
                                    demand_types = h_vals[0]["demands"]
                                    demand_units = h_vals[0]["units"]
                                    hval_dict = dict()
                                    j = 0
                                    for d in h_vals[0]["demands"]:
                                        hval_dict[d] = tor_hazard_values[j]
                                        j += 1
                                    if isinstance(
                                            fragility_set_used.
                                            fragility_curves[0], DFR3Curve):
                                        inventory_args = fragility_set_used.construct_expression_args_from_inventory(
                                            tornado_feature)
                                        resistivity_probability = \
                                            fragility_set_used.calculate_limit_state(
                                                hval_dict,
                                                inventory_type=fragility_set_used.inventory_type, **inventory_args)
                                    else:
                                        raise ValueError(
                                            "One of the fragilities is in deprecated format. This should not happen. "
                                            "If you are seeing this please report the issue."
                                        )

                                    # randomly generated capacity of each poles ; 1 m/s is 2.23694 mph
                                    poleresist = resistivity_probability.get(
                                        'LS_0') * 2.23694
                                    npoles = int(
                                        round(inter_length_meter /
                                              self.pole_distance))
                                    repairtime_list = []

                                    for k in range(npoles):
                                        repair_time = 0
                                        random_resistivity = random.uniform(
                                            0, 1)

                                        if random_resistivity <= poleresist:
                                            ndamage += 1
                                            # following codes can't be converted from matlab to python
                                            # however, the cross product <=3 or == 24 almost doesn't happen
                                            # since the time and cost differs when it is pole or tower,
                                            # this could be changed by see if it is tower or pole
                                            # if numpy.cross(k, z) <= 3 or numpy.cross(k, z) == 24:
                                            if linetype_val.lower(
                                            ) == self.line_transmission:
                                                mu = self.mut
                                                sigma = self.sigmat
                                                tmu = self.tmut
                                                tsigma = self.tsigmat
                                            else:
                                                mu = self.mud
                                                sigma = self.sigmad
                                                tmu = self.tmud
                                                tsigma = self.tsigmad

                                            repairtime_list.append(
                                                numpy.random.normal(
                                                    tmu, tsigma))

                                    for k in range(ndamage):
                                        repaircost += numpy.random.lognormal(
                                            mu, sigma)

                                    # max of the repair time among different poles is taken
                                    # as the repair time for that line
                                    if len(repairtime_list) > 0:
                                        repairtime = max(repairtime_list)
                noderepair[to_node_val - 1] = repaircost
                nodedam[to_node_val - 1] = ndamage
                nodetimerep[to_node_val - 1] = repairtime
                hazardval[to_node_val - 1] = tor_hazard_values
                demandtypes[to_node_val - 1] = demand_types
                demandunits[to_node_val - 1] = demand_units

            # Calculate damage and repair cost based on network
            for i in range(len(first_node_list)):
                for j in range(len(connection_list[i])):
                    # print(connection_list[i][j], first_node_list[i])
                    pathij = list(
                        nx.all_simple_paths(graph, connection_list[i][j],
                                            first_node_list[i]))
                    poler = 0
                    coster = 0
                    timer = []
                    # print(pathij)
                    if len(pathij) > 0:
                        for k in range(len(pathij)):
                            for var1 in range(len(pathij[k])):
                                poler = poler + nodedam[pathij[k][var1]]
                                coster = coster + noderepair[pathij[k][var1]]
                                # max of the time for different lines is taken as the repair time for that path.
                                # -- path is constituted of different lines.
                                timer.append(nodetimerep[pathij[k][var1]])
                    poles2repair[connection_list[i][j]] = poler
                    cost2repairpath[connection_list[i][j]] = coster
                    if len(timer) > 0:
                        time2repairpath[connection_list[i][j]] = max(timer)
                    else:
                        time2repairpath[connection_list[i][j]] = 0
            totalcost2repair.append(cost2repairpath)
            totalpoles2repair.append(poles2repair)
            totaltime2repair.append(time2repairpath)

        # create guid field from node dataset

        # calculate mean and standard deviation
        meanpoles = numpy.mean(numpy.asarray(totalpoles2repair), axis=0)
        stdpoles = numpy.std(numpy.asarray(totalpoles2repair), axis=0)
        meancost = numpy.mean(numpy.asarray(totalcost2repair), axis=0)
        stdcost = numpy.std(numpy.asarray(totalcost2repair), axis=0)
        meantime = numpy.mean(numpy.asarray(totaltime2repair), axis=0)
        stdtime = numpy.std(numpy.asarray(totaltime2repair), axis=0)

        # create result
        ds_results = []
        damage_results = []

        for i in range(len(meanpoles)):
            ds_result = dict()
            damage_result = dict()

            ds_result['guid'] = guid_list[i]
            ds_result["meanpoles"] = meanpoles[i]
            ds_result["stdpoles"] = stdpoles[i]
            ds_result["meancost"] = meancost[i]
            ds_result["stdcost"] = stdcost[i]
            ds_result["meantime"] = meantime[i]
            ds_result["stdtime"] = stdtime[i]
            ds_result[
                'haz_expose'] = AnalysisUtil.get_exposure_from_hazard_values(
                    hazardval[i], "tornado")

            damage_result['guid'] = guid_list[i]
            damage_result["fragility_tower_id"] = self.fragility_tower_id
            damage_result["fragility_pole_id"] = self.fragility_pole_id
            damage_result["hazardtype"] = "Tornado"
            damage_result['hazardvals'] = hazardval[i]
            damage_result['demandtypes'] = demandtypes[i]
            damage_result['demandunits'] = demandunits[i]

            ds_results.append(ds_result)
            damage_results.append(damage_result)

        return ds_results, damage_results
Beispiel #57
0
    nworkers = comm.Get_size()
    rank = comm.Get_rank()
    print('assigning the rank and nworkers', nworkers, rank)
    return "child"

if __name__ == "__main__":
  parser = argparse.ArgumentParser(description=('Train policy on OpenAI Gym environment '
                                                'using pepg, ses, openes, ga, cma'))

  parser.add_argument('-o', '--optimizer', type=str, help='ses, pepg, openes, ga, cma.', default='cma')
  parser.add_argument('--num_episode', type=int, default=1, help='num episodes per trial')  # was 16
  parser.add_argument('--eval_steps', type=int, default=1, help='evaluate every eval_steps step')  # was 25, each eval correspond to each worker doing num_episodes
  parser.add_argument('-n', '--num_worker', type=int, default=12)  # was 64
  parser.add_argument('-t', '--num_worker_trial', type=int, help='trials per worker', default=1)  # was 1
  parser.add_argument('--antithetic', type=int, default=1, help='set to 0 to disable antithetic sampling')
  parser.add_argument('--cap_time', type=int, default=0, help='set to 0 to disable capping timesteps to 2x of average.')
  parser.add_argument('--retrain', type=int, default=0, help='set to 0 to disable retraining every eval_steps if results suck.\n only works w/ ses, openes, pepg.')
  parser.add_argument('-s', '--seed_start', type=int, default=0, help='initial seed')
  parser.add_argument('--sigma_init', type=float, default=0.1, help='sigma_init')
  parser.add_argument('--sigma_decay', type=float, default=0.999, help='sigma_decay')
  parser.add_argument('--name', type=str, required=True, help='model name')
  parser.add_argument('--novelty_search', default=False, action='store_true', help='novelty fitness')
  parser.add_argument('--unique_id', type=str, required=True)
  parser.add_argument('--novelty_mode', type=str, default='', help='either h, z or h_concat')
  parser.add_argument('--ns_mode', type=str, default='', help='either NS, NSR or NSRA') # NSRA supposes that eval_step is set to 1

  args = parser.parse_args()
  if "parent" == mpi_fork(args.num_worker+1): os.exit()
  print('Training with fixed map:', FIXED_MAP)
  main(args)
Beispiel #58
0
import judge
import os

basedir = os.path.split(os.path.realpath(__file__))[0]

rundir = basedir + '/demo_judge_py'
exe_path = rundir + '/main.py'

if os.path.exists(rundir) == False:
    os.mkdir(rundir)

if os.system(" ".join(["python3", "-m", "py_compile", "aplusb.py"])) != 0:
    print('编译失败')
    os.exit(1)

os.system("cp in ./demo_judge_py")
os.system("cp aplusb.py ./demo_judge_py/main.py")

res = judge.run_program(
    tl=1,  # time_limit 单位s 
    ml=128,  # memory_limit 单位 mb
    ol=128,  # output_limit 单位 mb
    sl=1024,  # stack limit 单位 mb
    _in="in",  # 输入文件
    out="out",  # 输出文件
    err="stderr",  # 错误输出
    work_path=rundir,  # 工作目录
    _type="python3.5",  # type default or python3.5
    show_trace_details=False,  # 显示详细的信息
    allow_proc=False,  #  允许 fork exec
    unsafe=False,  #  不安全模式
Beispiel #59
0
def format_gold(component,
                raw_gold_file,
                formatted_gold_file,
                seen,
                append=False):
    delim = ";"
    with open(raw_gold_file, "r") as csvinput, open(
            formatted_gold_file, "a") if append else open(
                formatted_gold_file, "w") as csvoutput:
        writer = csv.writer(csvoutput, lineterminator="\n")
        reader = csv.reader(csvinput)
        next(reader, None)  # Skip header row
        for line in reader:
            if component == "transistor":
                # This reads in our "Hardware Gold" raw gold CSV where a line
                # is:
                (
                    doc_name,
                    part_family,
                    part_num,
                    manufacturer,
                    polarity,
                    ce_v_max,  # Collector-Emitter Voltage (MAX) (Vceo)
                    cb_v_max,  # Collector-Base Voltage (MAX)
                    eb_v_max,  # Emitter-Base Voltage (MAX)
                    c_current_max,  # Collector Current Continuous (MAX)
                    dev_dissipation,  # Total Device Dissipation
                    stg_temp_min,
                    stg_temp_max,
                    dc_gain_min,  # DC Current Gain (MIN)
                    notes,
                    annotator,
                ) = line

                # Map each attribute to its corresponding normalizer
                name_attr_norm = [
                    ("part_family", part_family, part_family_normalizer),
                    ("polarity", polarity, polarity_normalizer),
                    ("ce_v_max", ce_v_max, voltage_normalizer),
                    ("cb_v_max", cb_v_max, voltage_normalizer),
                    ("eb_v_max", eb_v_max, voltage_normalizer),
                    ("c_current_max", c_current_max, current_normalizer),
                    ("dev_dissipation", dev_dissipation,
                     dissipation_normalizer),
                    ("stg_temp_min", stg_temp_min, temperature_normalizer),
                    ("stg_temp_max", stg_temp_max, temperature_normalizer),
                    ("dc_gain_min", dc_gain_min, gain_normalizer),
                ]

                doc_name = doc_normalizer(doc_name)
                manuf = manuf_normalizer(manufacturer)
                part_num = transistor_part_normalizer(part_num)

                # Output tuples of each normalized attribute
                for name, attr, normalizer in name_attr_norm:
                    if "N/A" not in attr:
                        for a in attr.split(delim):
                            if len(a.strip()) > 0:
                                output = [
                                    doc_name,
                                    manuf,
                                    part_num,
                                    name,
                                    normalizer(a),
                                ]
                                if tuple(output) not in seen:
                                    writer.writerow(output)
                                    seen.add(tuple(output))

            elif component == "transistor2":
                # This reads in our "Small-signal Bipolar Transistors Gold Data"
                # raw gold CSV where a line is:
                delim = " "
                (
                    doc_name,
                    part_num,
                    manufacturer,
                    polarity,
                    pin_count,
                    ce_v_max,  # Collector-Emitter Voltage (MAX) (Vceo)
                    cb_v_max,  # Collector-Base Voltage (MAX)
                    eb_v_max,  # Emitter-Base Voltage (MAX)
                    c_current_max,  # Collector Current Continuous (MAX)
                    dev_dissipation,  # Total Device Dissipation
                    stg_temp_min,
                    stg_temp_max,
                    stg_temp_unit,
                    dc_gain_min,  # DC Current Gain (MIN)
                    max_freq,
                    output_resistance,
                    cb_capacitance,
                    base_resistance,
                    done,
                    _,
                ) = line

                part_family = "N/A"

                # Map each attribute to its corresponding normalizer
                name_attr_norm = [
                    ("part_family", part_family, part_family_normalizer),
                    ("polarity", polarity, polarity_normalizer),
                    ("ce_v_max", ce_v_max, voltage_normalizer),
                    ("cb_v_max", cb_v_max, voltage_normalizer),
                    ("eb_v_max", eb_v_max, voltage_normalizer),
                    ("c_current_max", c_current_max, current_normalizer),
                    ("dev_dissipation", dev_dissipation,
                     dissipation_normalizer),
                    ("stg_temp_min", stg_temp_min, transistor_temp_normalizer),
                    ("stg_temp_max", stg_temp_max, transistor_temp_normalizer),
                    ("dc_gain_min", dc_gain_min, gain_normalizer),
                ]

                doc_name = doc_normalizer(doc_name)
                manuf = manuf_normalizer(manufacturer)
                part_num = transistor_part_normalizer(part_num)

                # Output tuples of each normalized attribute
                for name, attr, normalizer in name_attr_norm:
                    if "N/A" not in attr:
                        # Only dc_gain_min has multiple values for our
                        # "Small-signal Bipolar Transistors Gold Data" CSV
                        # (where we unwisely used a space as the delimiter)
                        if name == "dc_gain_min":
                            for a in attr.split(delim):
                                if len(a.strip()) > 0:
                                    output = [
                                        doc_name,
                                        manuf,
                                        part_num,
                                        name,
                                        normalizer(a),
                                    ]
                                    if tuple(output) not in seen:
                                        writer.writerow(output)
                                        seen.add(tuple(output))
                        else:
                            if len(attr.strip()) > 0:
                                output = [
                                    doc_name,
                                    manuf,
                                    part_num,
                                    name,
                                    normalizer(attr),
                                ]
                                if tuple(output) not in seen:
                                    writer.writerow(output)
                                    seen.add(tuple(output))

            else:
                print(f"[ERROR]: Invalid hardware component {component}")
                os.exit()
Beispiel #60
0
import os
import signal
from flask import Flask
from buzz import generator

app = Flask(__name__)

signal.signal(signal.SIGINT, lambda s, f: os.exit(0))


@app.route("/")
def generate_buzz():
    page = '<html><body><h1  style="color:red;">'
    page += generator.generate_buzz()
    page += '</h1></body></html>'
    return page


if __name__ == "__main__":
    app.run(host='0.0.0.0', port=os.getenv('PORT'))