コード例 #1
0
ファイル: PlotCOHP.py プロジェクト: krytosmfk/ScriptsForVASP
def plot_helper_figure_assert(args, ISPIN):
    if ISPIN == 2:
        assert args.figure is None or (isinstance(args.figure, list) and len(args.figure) == 2), \
            'The number of figures should be 2!'
    elif ISPIN == 1:
        assert args.figure is None or (isinstance(args.figure, list) and len(args.figure) == 1), \
            'The number of figures should be 1!'
コード例 #2
0
ファイル: word_search_2.py プロジェクト: RobLeggett/codes
def search_in_board(words, board):
    trie = Trie.create(words+words[::-1])
    acc_hash = {}
    handled_paths = []
    pos_list = [(i,j) for i in range(len(board)) for j in range(len(board[0]))]
    while len(pos_list) > 0:
        i,j = pos_list.pop(0)
        cur_char = board[i][j]
        # ((0,0),'o',[])
        cur_word_point = ([(i,j)], cur_char)
        # [((1,0),'e'),((0,1),'a')]
        neighbors = find_neighbors((i,j),board)
        cur_words = acc_hash.get((i,j), [])
        # remove all the paths which have been handled
        cur_words = filter(lambda x: x[0] not in handled_paths, cur_words)
        filtered_prefixs = filter_by_prefix(
                cur_words+[cur_word_point], neighbors, trie)
        # [((0,1),'oa',[(0,0)])]
        update_acc_hash(acc_hash, filtered_prefixs)
        # add all the paths which have been handled
        map(lambda x: handled_paths.append(x[0]), cur_words)
        # add some position for new path
        for cur_word_point in filtered_prefixs:
            cur_pos = cur_word_point[0][-1]
            if cur_pos not in pos_list:
                pos_list.append(cur_pos)


    # return acc_hash
    word_points = filter_words(acc_hash)
    return map(lambda x: (x[1], x[0]), word_points)
コード例 #3
0
ファイル: word_search_2.py プロジェクト: RobLeggett/codes
def find_neighbors(point, board):
    i, j = point
    m, n = len(board), len(board[0])
    neighbor_points = [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]
    neighbor_points = filter(lambda p: check_in_board_range(p, (m,n)), 
            neighbor_points)
    return [(p, board[p[0]][p[1]]) for p in neighbor_points]
コード例 #4
0
ファイル: console_pager.py プロジェクト: pdbradley/dotfiles
  def _GetSearchCommand(self, c):
    """Consumes a search command and returns the equivalent pager command.

    The search pattern is an RE that is pre-compiled and cached for subsequent
    /<newline>, ?<newline>, n, or N commands.

    Args:
      c: The search command char.

    Returns:
      The pager command char.
    """
    self._out.write(c)
    buf = ''
    while True:
      p = self._attr.GetRawKey()
      if p in (None, '\n', '\r') or len(p) != 1:
        break
      self._out.write(p)
      buf += p
    self._out.write('\r' + ' ' * len(buf) + '\r')
    if buf:
      try:
        self._search_pattern = re.compile(buf)
      except re.error:
        # Silently ignore pattern errors.
        self._search_pattern = None
        return ''
    self._search_direction = 'n' if c == '/' else 'N'
    return 'n'
コード例 #5
0
ファイル: browse_norb.py プロジェクト: 123fengye741/pylearn2
        def incr_index(step):
            assert step in (0, -1, 1), ("Step was %d" % step)

            image_index = (blank_image_index
                           if is_blank(grid_indices)
                           else object_image_index)

            if grid_dimension[0] == 5:  # i.e. the image index
                row_indices = get_row_indices(grid_indices)
                if row_indices is None:
                    image_index[0] = 0
                else:
                    # increment the image index
                    image_index[0] = add_mod(image_index[0],
                                             step,
                                             len(row_indices))
            else:
                # increment one of the grid indices
                gd = grid_dimension[0]
                grid_indices[gd] = add_mod(grid_indices[gd],
                                           step,
                                           len(grid_to_short_label[gd]))

                row_indices = get_row_indices(grid_indices)
                if row_indices is None:
                    image_index[0] = 0
                else:
                    # some grid indices have 2 images instead of 3.
                    image_index[0] = min(image_index[0], len(row_indices))
コード例 #6
0
ファイル: dataset.py プロジェクト: milkpku/BetaElephant
 def __f2tpos(self, fen, frdpos, emypos):
     self.__init_clayer()
     poslist = fen.split()[0].split('/')
     player = fen.split()[1]
     for i in range(len(poslist)):
         item = poslist[9 - i]
         index = 0
         for j in range(len(item)):
             if item[j].isupper():
                 if player == 'w':
                     frdpos[index][i][self.__chesslayer[item[j]]] = 1
                 else:
                     emypos[index][i][self.__chesslayer[item[j]]] = 1
                 self.__chesslayer[item[j]] += 1
                 index += 1
             elif item[j].islower():
                 if player == 'w':
                     emypos[index][i][self.__chesslayer[item[j]]] = 1
                 else:
                     frdpos[index][i][self.__chesslayer[item[j]]] = 1
                 self.__chesslayer[item[j]] += 1
                 index += 1
             else:
                 index += int(item[j])
     return frdpos, emypos
コード例 #7
0
ファイル: GtkHelp.py プロジェクト: AlexaVillaume/ginga
def build_info(captions):
    vbox = gtk.VBox(spacing=2)

    numrows = len(captions)
    numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
    table = gtk.Table(rows=numrows, columns=numcols)
    table.set_row_spacings(2)
    table.set_col_spacings(4)
    vbox.pack_start(table, expand=False)

    wb = Bunch.Bunch()
    row = 0
    for tup in captions:
        col = 0
        while col < numcols:
            if col < len(tup):
                tup1 = tup[col:col+2]
                w1, w2 = _make_widget(tup1, wb)
                table.attach(w1, col, col+1, row, row+1,
                             xoptions=gtk.FILL, yoptions=gtk.FILL,
                             xpadding=1, ypadding=1)
                table.attach(w2, col+1, col+2, row, row+1,
                             xoptions=gtk.FILL, yoptions=gtk.FILL,
                             xpadding=1, ypadding=1)
            col += 2
        row += 1

    vbox.show_all()

    return vbox, wb
コード例 #8
0
def show_cmaps(names):
    matplotlib.rc('text', usetex=False)
    a=np.outer(np.arange(0,1,0.01),np.ones(10))   # pseudo image data
    f=figure(figsize=(10,5))
    f.subplots_adjust(top=0.8,bottom=0.05,left=0.01,right=0.99)
    # get list of all colormap names
    # this only obtains names of built-in colormaps:
    maps=[m for m in cm.datad if not m.endswith("_r")]
    # use undocumented cmap_d dictionary instead
    maps = [m for m in cm.cmap_d if not m.endswith("_r")]
    maps.sort()
    # determine number of subplots to make
    l=len(maps)+1
    if names is not None: l=len(names)  # assume all names are correct!
    # loop over maps and plot the selected ones
    i=0
    for m in maps:
        if names is None or m in names:
            i+=1
            ax = subplot(1,l,i)
            ax.axis("off")
            imshow(a,aspect='auto',cmap=cm.get_cmap(m),origin="lower")
            title(m,rotation=90,fontsize=10,verticalalignment='bottom')
#    savefig("colormaps.png",dpi=100,facecolor='gray')
    show()
コード例 #9
0
   def read(self,filename):
      observation=[]
      MPC = open(filename, "r")
      n = 0
      for ast in MPC.readlines():
	if len(ast)!=81 and len(ast)!=82 :
		print "FAIL",len(ast)
		continue
	print ast[:-1]
        n += 1
        MPnumber=ast[0:5]
	Provisional=ast[5:12]
	Discovery=ast[12]
        Note1=ast[13]
        Note2=ast[14]
	Date=ast[15:32]
	RA=ast[32:44]
	DEC=ast[44:56]
	Mag=float(ast[65:70])
	Band=ast[70]
	Observatory=ast[77:80]

	obs=[MPnumber,Provisional,Discovery,Note1,Note2,Date,RA,DEC,Mag,Band,Observatory]

	observation.append(obs)
      print("\n Numero de observaciones:",n)

      self.observations.extend(observation)
コード例 #10
0
def testStandingsBeforeMatches():
    """
    Test to ensure players are properly represented in standings prior
    to any matches being reported.
    """
    deleteMatches()
    deletePlayers()
    registerPlayer("Melpomene Murray")
    registerPlayer("Randy Schwartz")
    standings = playerStandings()
    if len(standings) < 2:
        raise ValueError("Players should appear in playerStandings even before "
                         "they have played any matches.")
    elif len(standings) > 2:
        raise ValueError("Only registered players should appear in standings.")
    if len(standings[0]) != 4:
        raise ValueError("Each playerStandings row should have four columns.")
    [(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings
    if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0:
        raise ValueError(
            "Newly registered players should have no matches or wins.")
    if set([name1, name2]) != set(["Melpomene Murray", "Randy Schwartz"]):
        raise ValueError("Registered players' names should appear in standings, "
                         "even if they have no matches played.")
    print "6. Newly registered players appear in the standings with no matches."
コード例 #11
0
ファイル: extractabc.py プロジェクト: haibocheng/fusion
def main():
    if len(sys.argv) == 2:
        filename = sys.argv[1]
    else:
        error('no filename passed')

    filename = os.path.abspath(filename)
    nex, ext = os.path.splitext(filename)

    if not os.path.exists(filename):
        error('cannot find file %s' % (filename,))

    if ext == ".swf":
        abcs = SwfData.from_filename(filename).read_tags((DoABC, DoABCDefine))
    else:
        error('cannot parse a %s file' % (ext,))

    for i, abc in enumerate(abcs):
        name = getattr(abc, "name", None) or "%s_%d" % (nex, i)
        abc  = getattr(abc, "abc", abc)
        data = abc.serialize(optimize=False)
        f = open(name+".abc", "w")
        f.write(data)
        f.close()
        print "wrote %s.abc, %s" % (name, sizeof_fmt(len(data)))
コード例 #12
0
ファイル: popular_url.py プロジェクト: kuhaku/atango
 def run(self, hour_range=HOUR_RANGE):
     date_range = kuzuha.build_date_filter_by_range({'hours': hour_range})
     posts = kuzuha.search('http', _filter=date_range, sort=[])
     tweet = ''
     for (url, count) in self._count_url(posts).most_common():
         if url.startswith('https://twitter.com/'):
             tweet_id = self.extract_tweet_id(url)
             if tweet_id:
                 logger.info('RT: id=%s (%s)' % (tweet_id, url))
                 if not self.debug:
                     try:
                         self.twitter.api.statuses.retweet(id=tweet_id)
                     except TwitterHTTPError as e:
                         logger.warn('%s %s' % (type(e), str(e)))
                 continue
         title = self._get_title(url)
         new_url_info = TWEET_FORMAT % (title, url, count)
         expected_length = self.calc_tweet_length(tweet, title, count)
         if expected_length < (MAX_TWEET_LENGTH - len(HASH_TAG)):
             tweet += new_url_info
         else:
             tweet = tweet[:-len(DELIMITER)] + HASH_TAG
             if tweet != HASH_TAG:
                 tweet = tweet.replace('\n', '').replace('\r', '')
                 yield tweet
             tweet = new_url_info
     if tweet:
         if tweet.endswith(DELIMITER):
             tweet = tweet[:-len(DELIMITER)]
         tweet = tweet.replace('\n', '').replace('\r', '')
         yield tweet + HASH_TAG
コード例 #13
0
ファイル: 200.py プロジェクト: chenpengcheng/leetcode
    def numIslands(self, grid):
        """
        :type grid: List[List[str]]
        :rtype: int
        """

        num_rows = len( grid )
        num_cols = len( grid[ 0 ] ) if grid else 0
        num_islands = 0

        neighbors = [ ( -1, 0 ), ( 0, -1 ), ( 0, 1 ), ( 1, 0 ) ]


        def on_map( r, c ):
            return 0 <= r < num_rows and 0 <= c < num_cols

        def bfs( r, c ):
            grid[ r ][ c ] = '*'
            for dr, dc in neighbors:
                r1, c1 = r + dr, c + dc
                if on_map( r1, c1 ) and grid[ r1 ][ c1 ] == '1':
                    bfs( r1, c1 )

        for r in xrange( num_rows ):
            for c in xrange( num_cols ):
                if grid[ r ][ c ] == '1':
                    bfs( r, c )
                    num_islands += 1

        for r in xrange( num_rows ):
            for c in xrange( num_cols ):
                if grid[ r ][ c ] == '*':
                    grid[ r ][ c ] = '1'

        return num_islands
コード例 #14
0
ファイル: faceAverage.py プロジェクト: vovanmozg/average-face
def calcPoints(path) :
    predictor_path = '/Users/vovanmozg/Downloads/bigdata/shape_predictor_68_face_landmarks.dat'
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    print("Processing file: {}".format(path))
    img = io.imread(path)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
    if len(dets) != 1 :
        return False;

    print("Number of faces detected: {}".format(len(dets)))
    points = [];
    for k, d in enumerate(dets):
        #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
        #    k, d.left(), d.top(), d.right(), d.bottom()))
        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        #print(numpy.matrix([[p.x, p.y] for p in shape.parts()]))
        for p in shape.parts():
            points.append((p.x, p.y))

    return points
コード例 #15
0
ファイル: netcdf.py プロジェクト: ChadFulton/scipy
    def _write_var_metadata(self, name):
        var = self.variables[name]

        self._pack_string(name)
        self._pack_int(len(var.dimensions))
        for dimname in var.dimensions:
            dimid = self._dims.index(dimname)
            self._pack_int(dimid)

        self._write_att_array(var._attributes)

        nc_type = REVERSE[var.typecode(), var.itemsize()]
        self.fp.write(asbytes(nc_type))

        if not var.isrec:
            vsize = var.data.size * var.data.itemsize
            vsize += -vsize % 4
        else:  # record variable
            try:
                vsize = var.data[0].size * var.data.itemsize
            except IndexError:
                vsize = 0
            rec_vars = len([v for v in self.variables.values()
                            if v.isrec])
            if rec_vars > 1:
                vsize += -vsize % 4
        self.variables[name].__dict__['_vsize'] = vsize
        self._pack_int(vsize)

        # Pack a bogus begin, and set the real value later.
        self.variables[name].__dict__['_begin'] = self.fp.tell()
        self._pack_begin(0)
コード例 #16
0
ファイル: hermes_run_script.py プロジェクト: fototo/hermes
    def consolidate_results(self):

        dicts = []
        for file in os.listdir(self.results_directory):
            if file.startswith(self.data_name + '_results_'):
                f1 = open(self.results_directory+ file, 'r')
                my_dict = eval(f1.read())
                dicts.append(my_dict)

        run_nums = [' ']
        run_nums.extend([str(r) for r in range(0,len(dicts))])

        print 'Found ' + str(len(dicts)) + ' result sets'

        full_results_loc = self.results_directory + self.data_name + '_full_results_transpose.csv'

        with open(full_results_loc, 'wb') as ofile:
            writer = csv.writer(ofile, delimiter=',')
            writer.writerow(run_nums)
            for key in dicts[0].iterkeys():
                writer.writerow([key] + [d[key] for d in dicts])

        #this file has all the info - but to bring into pandas we want to transpose the data
        df = pd.read_csv(full_results_loc, index_col=0)
        df2 = df.transpose()
        #save off the results file
        full_results_loc2 = self.results_directory + self.data_name + '_full_results.csv'
        print 'Saving: ' + full_results_loc2
        df2.to_csv(full_results_loc2, delimiter=',')
コード例 #17
0
ファイル: utils.py プロジェクト: hennyere/youtube-dl
 def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
             encoding='utf-8', errors='replace'):
     qs, _coerce_result = qs, unicode
     pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
     r = []
     for name_value in pairs:
         if not name_value and not strict_parsing:
             continue
         nv = name_value.split('=', 1)
         if len(nv) != 2:
             if strict_parsing:
                 raise ValueError("bad query field: %r" % (name_value,))
             # Handle case of a control-name with no equal sign
             if keep_blank_values:
                 nv.append('')
             else:
                 continue
         if len(nv[1]) or keep_blank_values:
             name = nv[0].replace('+', ' ')
             name = _unquote(name, encoding=encoding, errors=errors)
             name = _coerce_result(name)
             value = nv[1].replace('+', ' ')
             value = _unquote(value, encoding=encoding, errors=errors)
             value = _coerce_result(value)
             r.append((name, value))
     return r
コード例 #18
0
ファイル: peakPicker.py プロジェクト: blackw1ng/pyFAI
    def contour(self, data):
        """
        Overlay a contour-plot

        @param data: 2darray with the 2theta values in radians...
        """
        if self.fig is None:
            logging.warning("No diffraction image available => not showing the contour")
        else:
            while len(self.msp.images) > 1:
                self.msp.images.pop()
            while len(self.ct.images) > 1:
                self.ct.images.pop()
            while len(self.ct.collections) > 0:
                self.ct.collections.pop()

            if self.points.dSpacing and  self.points._wavelength:
                angles = list(2.0 * numpy.arcsin(5e9 * self.points._wavelength / numpy.array(self.points.dSpacing)))
            else:
                angles = None
            try:
                xlim, ylim = self.ax.get_xlim(), self.ax.get_ylim()
                self.ct.contour(data, levels=angles)
                self.ax.set_xlim(xlim);self.ax.set_ylim(ylim);
                print("Visually check that the curve overlays with the Debye-Sherrer rings of the image")
                print("Check also for correct indexing of rings")
            except MemoryError:
                logging.error("Sorry but your computer does NOT have enough memory to display the 2-theta contour plot")
            self.fig.show()
コード例 #19
0
ファイル: peakPicker.py プロジェクト: blackw1ng/pyFAI
 def check(self):
     """
     check internal consistency of the class
     """
     if len(self._angles) != len(self._points):
         logger.error("in ControlPoints: length of the two arrays are not consistent!!! angle: %i points: %s ",
                        len(self._angles), len(self._points))
コード例 #20
0
ファイル: run_RmpRiTau.py プロジェクト: Evanedyr/UANeurons
    init_simulation()

    current_amplitude = -0.01
    stim_start = 1000
    stim_end = 2000

    time, soma_voltage, stim_start, stim_end = run_RmpRiTau_step(
        stim_start, stim_end, current_amplitude, plot_traces=plot_traces)

    analyse_RmpRiTau_trace(
        time,
        soma_voltage,
        stim_start,
        stim_end,
        current_amplitude)

    if plot_traces:
        import pylab
        pylab.show()

if __name__ == '__main__':
    if len(sys.argv) == 1:
        main(plot_traces=True)
    elif len(sys.argv) == 2 and sys.argv[1] == '--no-plots':
        main(plot_traces=False)
    else:
        raise Exception(
            "Script only accepts one argument: --no-plots, not %s" %
            str(sys.argv))
	def set_Tags(self, Tags):
		for depth1 in range(len(Tags)):
			if Tags[depth1].get('Key') is not None:
				self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
			if Tags[depth1].get('Value') is not None:
				self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
	def set_NetworkInterfaceIds(self, NetworkInterfaceIds):
		for depth1 in range(len(NetworkInterfaceIds)):
			if NetworkInterfaceIds[depth1] is not None:
				self.add_query_param('NetworkInterfaceId.' + str(depth1 + 1) , NetworkInterfaceIds[depth1])
	def set_PrivateIpAddresss(self, PrivateIpAddresss):
		for depth1 in range(len(PrivateIpAddresss)):
			if PrivateIpAddresss[depth1] is not None:
				self.add_query_param('PrivateIpAddress.' + str(depth1 + 1) , PrivateIpAddresss[depth1])
コード例 #24
0
def ScanRobots(ev):
    global panelFrame, socket, robot_adres, video_show

    ip_adress_s = sc.gethostbyname(sc.gethostname())
    print(ip_adress_s)
    ip_adress = ip_adress_s.split(".")
    ip_adress[0] = "192"
    ip_adress[1] = "168"
    ip_adress[2] = "88"
    if robot_adres != "-1":
        Stop(ev)
        print("drop robot")
        socket = context.socket(zmq.REQ)
        print(robot_adres)
        socket.connect("tcp://" + robot_adres + ":%s" % port)
        print("send", "tcp://" + robot_adres + ":%s" % port)
        try:
            socket.send_string("drop")
            print(socket.recv_string())
        except:
            pass

        robot_adres = "0"
        video_show = 0

    list_combobox = ["none"]
    dropVar = StringVar()
    dropVar.set(list_combobox[0])

    for i in range(20, 30):

        socket = context.socket(zmq.REQ)
        ip_adress_ping = str(ip_adress[0] + "." + ip_adress[1] + "." +
                             ip_adress[2] + "." + str(i))
        # socket.connect("tcp://"+ip_adress[0]+"."+ip_adress[1]+"."+ip_adress[2]+"."+str(i)+":%s" % port)
        socket.connect("tcp://" + ip_adress_ping + ":%s" % port)
        print("ping", ip_adress_ping)
        # print("send")
        try:
            socket.send_string("ping")
        except:
            pass
        time.sleep(0.7)

        s = ""
        try:
            # print("recv...")
            s = socket.recv_string(zmq.NOBLOCK)
            # print("....ok")
        except zmq.ZMQError as e:
            if e.errno == zmq.ETERM:
                return  # shutting down, quit
                print("no server")

        data = s.split("|")
        if len(data) > 1:
            s = data[0] + " " + data[1] + " " + str(ip_adress_ping) + "\n"
            if len(s) > 2:
                print(FgMagenta + s + Reset)

            if data[1] == ip_adress_s:
                dropVar.set(ip_adress_ping)
                robot_adres = ip_adress_ping
                socket = context.socket(zmq.REQ)
                socket.connect("tcp://" + robot_adres + ":%s" % port)
                # data[1] = "Connected"
                list_combobox.append(data[1])
                connect_keyboard(robot_adres)
                print(FgBlue + "Connected to robot: " + BgGreen + data[0] +
                      Reset)
                # дальше не ищем
                break

            if data[1] == "0":
                data[1] = ip_adress_ping
                list_combobox.append(data)

    # combobox = OptionMenu(panelFrame, dropVar, *list)
    # combobox.place(x=250, y=10, width=250, height=40)  # Позиционируем Combobox на форме

    # var = StringVar()
    # combobox = OptionMenu(panelFrame, dropVar, *(list), command=OptionMenu_SelectionEvent)
    combobox = OptionMenu(panelFrame,
                          dropVar,
                          *(list_combobox),
                          command=OptionMenu_SelectionEvent)
    combobox.place(x=260, y=10, width=150,
                   height=40)  # Позиционируем Combobox на форме

    # fn = tkFileDialog.SaveAs(root, filetypes=[('*.py files', '.py')]).show()
    # if fn == '':
    #     return
    # if not fn.endswith(".txt"):
    #     fn += ".txt"
    # open(fn, 'wt').write(textbox.get('1.0', 'end'))
    pass
コード例 #25
0
ファイル: campaign.py プロジェクト: delta/mailer
def CampaignFactory(from_addr, subject, mailing_list,
                    template_file, global_vars_file=""):
    '''
    Factory to construct BulkMailCampaign or TransactionMailCampaign object

    Creates the appropriate Campaign object based on the input parameters.

    Args:
        from_addr (str): The "From" address for the mail being sent.
        subject (str): The subject string for the mails being sent (can be template).
        mailing_list (list/str): A list of strings or a string (filename).
            If it's a list of strings, the strings are taken as email-ids.
            Otherwise, if a string is passed, it is taken as the filename of
            the mailing-list file (.ml file). In latter case, TransactionMailCampaign
            is returned. In former case, BulkMailCampaign is returned.
        template_file (str): The filename that contains the template that is to
            be used to send the mails.
        global_vars_file (Optional[str]): The filename that contains the global variables.

    Returns:
        Campaign: BulkMailCampaign or TransactionMailCampaign based on whether
            mailing_list is a list of email-ids, or is the filename of the .ml
            file.

    Raises:
        Exception: If template file doesn't exist.
            If global_vars_file is given and the file doesn't exist.
            If the mailing-list file doesn't exist (if mailing_list is filename).
            If first field of mailing list file isn't "email"

    TODO (thakkarparth007): Use better exception-names.
    '''
    # The following variables are used to initialise the appropriate class
    #
    #   template_str : The template string read from the template file
    #   global_vars  : The dictionary containing the global variables
    #   mailing_list : The list of dictionaries that contains the
    #                 mailing list

    # read the template file

    if os.path.exists(template_file) is False:
        raise Exception("Template list file ({0}) doesn't exist"
                        .format(os.path.abspath(template_file)))

    ftmpl = open(template_file, "r")
    template_str = ftmpl.read()
    ftmpl.close()

    # read the global vars file
    if global_vars_file != "" and os.path.exists(global_vars_file) is False:
        raise Exception("Global variables file ({0}) doesn't exist"
                        .format(os.path.abspath(global_vars_file)))

    contents = ""
    if global_vars_file != "":
        fglbl = open(global_vars_file, "r")
        contents = fglbl.read()
        fglbl.close()

    global_vars = dict([
        (line.split("=")[0].strip(), line.split("=")[1])
        for line in contents.splitlines() if line.rstrip() != ""
    ])

    # read the mailing_list if it is a string
    # in any case, convert it to a list of
    # dictionaries of form:
    #   { "email": "...", "variables": "" }
    if isinstance(mailing_list, str):
        mailing_list_file = mailing_list

        if os.path.exists(mailing_list_file) is False:
            raise Exception("Mailing list file ({0}) doesn't exist"
                            .format(os.path.abspath(mailing_list_file)))

        fml = open(mailing_list_file, "r")
        contents = fml.read().splitlines()
        fml.close()

        headers, rows = contents[0].split("\t"), contents[1:]

        if headers[0] != "email":
            raise Exception("First field of mailing list file is "
                            "required to be `email`")

        mailing_list = []
        for row in rows:
            if row.strip() == "":
                continue
            row = row.split("\t")
            if len(row) != len(headers):
                raise Exception("%s: Mismatch in number of columns." % (mailing_list_file,))
            mailing_list.append({
                "email": row[0],
                "variables": dict([
                    (headers[i], row[i])
                    for i in range(1, len(headers))
                ])
            })

        mail_constructor = TransactionMailCampaign

    else:
        mail_constructor = BulkMailCampaign

    return mail_constructor(from_addr, subject, mailing_list, template_str, global_vars)
コード例 #26
0
def robot_recive_work():
    global socket, video_show2, recive_flag, started_flag, flag_inet_work, ic, selected_file_no_dir, selected_file
    color_log = FgBlack
    # ic = InetConnection.InetConnect(sc.gethostname() + "_r", "client")
    # ic.connect()
    time_recive = time.time()
    while 1:
        # print("recive_flag", recive_flag)
        # time.sleep(1)
        if recive_flag == 1:
            message_s = ""
            if flag_inet_work == True:
                message_s = ic.send_and_wait_answer(robot_adres_inet, "d")
                time_recive = time.time()
                pass
            else:
                # try:
                #     #print("send..")
                #     socket.send_string("data")
                #     message_s = str(socket.recv_string())
                #     #print("recive ok")
                # except:
                #     pass

                t = time.time()
                while 1:
                    f = 0
                    try:
                        socket.send_string("data", zmq.NOBLOCK)  # zmq.NOBLOCK
                        f = 1
                    except zmq.ZMQError as e:
                        if e.errno == zmq.ETERM:
                            # print("error", e)
                            pass
                    if f == 1:
                        break
                    if time.time() - t > 1:
                        break
                message_s = ""
                t = time.time()
                while 1:
                    f = 0
                    try:
                        message_s = socket.recv_string(zmq.NOBLOCK)
                        time_recive = time.time()
                        f = 1
                    except zmq.ZMQError as e:
                        if e.errno == zmq.ETERM:
                            pass
                            # print("error", e)
                    if message_s != "" or f == 1:
                        break

                    if time.time() - t > 1:
                        break
            # print(message_s.encode('utf-8'))
            # message_s=message_s.replace("/n", "")

            if message_s == None:
                time.sleep(0.01)
                continue

            if time.time() - time_recive > 10:
                print("lost connect ..", time.time() - time_recive)
                if flag_inet_work == True:
                    ic.disconnect()
                    ic.connect()
                    pass
                else:
                    time_recive = time.time()
                    socket.close()
                    socket = context.socket(zmq.REQ)
                    socket.connect("tcp://" + robot_adres + ":%s" % port)
                    socket.send_string("take|" + robot_adres)
                    print("Connected to robot: " + BgGreen +
                          socket.recv_string() + Reset)
                print("reconected")
                if started_flag:
                    recive_flag = 1
                else:
                    recive_flag = 0

            if message_s.find("stoping") >= 0:
                recive_flag = -1

            if message_s.find("STOPED") >= 0 or message_s.find("stoping") >= 0:

                if started_flag == 1:
                    message_s = message_s.replace("STOPED",
                                                  FgRed + "STOPED" + Reset)
                    print(color_log + message_s.rstrip())

                # print("reciv1_stope")
                # message_s = ""
                # video_show2 = -1
                color_log = FgBlack

                # if video_show2 != 3:
                #     video_show2 = -1
                video_show2 = -1
                time.sleep(0.3)
                # while video_show2 != 3 or video_show2 != 0:
                #     print("stop_wideo", video_show2 )
                #     time.sleep(0.3)
                cv2.destroyAllWindows()

                recive_flag = 0
                started_flag = 0
                time.sleep(0.01)
                continue

            if message_s != "" and len(message_s) > 0:
                # обрезаем конец сообщения, спец символ
                if message_s.find("Traceback") >= 0 or message_s.find(
                        "Error:") >= 0:
                    color_log = FgRed
                    video_show2 = -1

                print(color_log + message_s.rstrip())

            time.sleep(0.01)

        if recive_flag == -1:
            # print("reciv-1")
            color_log = FgBlack
            ret = ""
            if flag_inet_work == True:
                ret = ic.send_and_wait_answer(robot_adres_inet, "stop")
                ic.send_and_wait_answer(robot_adres_inet, "stopvideo")
                pass
            else:
                try:
                    socket.send_string("stop")
                    ret = socket.recv_string()
                except:
                    pass
            # if started_flag == 1:
            #     print(ret.replace("STOPED", FgRed + "STOPED" + Reset))
            # recive_flag = 0
            recive_flag = 1
            time.sleep(0.01)

        if recive_flag == 3:
            if flag_inet_work:
                time.sleep(0.5)
                ic.send_and_wait_answer(robot_adres_inet,
                                        "start|" + selected_file_no_dir)
                time.sleep(0.5)
                ic.send_and_wait_answer(robot_adres_inet, "startvideo")
            else:
                try:
                    socket.send_string("start|" + selected_file_no_dir)
                    res = socket.recv_string()
                except:
                    pass
                if res == 0:
                    print(FgRed + "Start fail... try again" + Reset)
            recive_flag = 0

        if recive_flag == 4:
            if flag_inet_work == True:
                with open(selected_file, 'rb') as myfile:
                    data = myfile.read()
                    # print(ic.send_and_wait_answer(robot_adres_inet, "file|" + selected_file_no_dir + "|" + data.decode("utf-8")))
                #        z = zlib.compress(data, 1).decode("utf-8")
                ic.send_and_wait_answer(
                    robot_adres_inet,
                    "file|" + selected_file_no_dir + "|" + str(
                        base64.b64encode(zlib.compress(data,
                                                       1)).decode("utf-8")))
            else:
                try:
                    socket.send_string("file|" + selected_file_no_dir)
                    res = socket.recv_string()
                except:
                    pass
                if res == 0:
                    print(FgRed + "Fail send name file.. try again" + Reset)
                    return
            recive_flag = 0

        if recive_flag == 5:
            if flag_inet_work == True:
                pass
            else:
                with open(selected_file, 'rb') as myfile:
                    data = myfile.read()
                # print(data)
                # s1 = fastlz.compress(data)
                # s2 = fastlz.decompress(s1)
                # print(len(data), len(s1), len(s2))

                # data = zlib.compress(data, 1)
                data = zlib.compress(data, 1)
                res = 0
                try:
                    socket.send(data)
                    res = socket.recv_string()
                except:
                    pass

                if res == 0:
                    print(FgRed + "Fail send file.. try again" + Reset)
                    return
            recive_flag = 0

        # if recive_flag == 0:
        #     #print("recive flag=0")
        #     time.sleep(0.05)

        time.sleep(0.05)
コード例 #27
0
print(aList[::])  # 返回包含原列表中所有元素的新列表  这里是含有2个冒号的 也就是第三个参数代表步长
print('---------1----------')
print(aList[::-1])  # 返回包含原列表中所有元素的逆序列表 这个也就是反序列化
print('---------2----------')
print(aList[::2])  # 隔一个取一个,获取偶数位置的元素
print('---------3--------')
print(aList[1::2])  # 隔一个取一个,获取奇数位置的元素
print('---------4---------')
print(aList[3:6])  # 指定切片的开始和结束位置
print('---------5--------')
print(aList[0:100])  # 切片结束位置大于列表长度时, -----> 从列表尾部截断 也就是翻译过来就是alist[0:9] 切片的内部机制来取
print('---------6---------')
print(aList[100:])  # 切片开始位置大于列表长度时,-------> 返回空列表[9:0] 其实也就是返回一个空的元素


aList[len(aList):] = [9]  # 在列表尾部增加元素
print('------7------------')
aList[:0] = [1, 2]  # 在列表头部插入元素
print(aList)
aList = [1, 2] + aList  # 这两种方式是相同的
print(aList)
print('-----------8---------')

aList[3:3] = [4]  # 在列表中间位置插入元素
aList[:3] = [1, 2]  # 替换列表元素,等号两边的列表长度相等
aList[3:] = [4, 5, 6]  # 等号两边的列表长度也可以不相等
aList[::2] = [0] * 3  # 隔一个修改一个
print(aList)
aList[::2] = ['a', 'b', 'c']  # 隔一个修改一个
print(aList)
# aList[::2] = [1, 2]  # 左侧切片不连续,等号两边列表长度必须相等
コード例 #28
0
    def testLoadSettingsFromDict(self):
        """
        Tests the OneLogin_Saml2_Settings Constructor.
        Case load setting from dict
        """
        settings_info = self.loadSettingsJSON()
        settings = OneLogin_Saml2_Settings(settings_info)
        self.assertEqual(len(settings.get_errors()), 0)

        del settings_info['contactPerson']
        del settings_info['organization']
        settings = OneLogin_Saml2_Settings(settings_info)
        self.assertEqual(len(settings.get_errors()), 0)

        del settings_info['security']
        settings = OneLogin_Saml2_Settings(settings_info)
        self.assertEqual(len(settings.get_errors()), 0)

        del settings_info['sp']['NameIDFormat']
        del settings_info['idp']['x509cert']
        settings_info['idp'][
            'certFingerprint'] = 'afe71c28ef740bc87425be13a2263d37971daA1f9'
        settings = OneLogin_Saml2_Settings(settings_info)
        self.assertEqual(len(settings.get_errors()), 0)

        settings_info['idp']['singleSignOnService']['url'] = 'invalid_url'
        try:
            settings_2 = OneLogin_Saml2_Settings(settings_info)
            self.assertNotEqual(len(settings_2.get_errors()), 0)
        except Exception as e:
            self.assertIn('Invalid dict settings: idp_sso_url_invalid', str(e))

        settings_info['idp']['singleSignOnService'][
            'url'] = 'http://invalid_domain'
        try:
            settings_3 = OneLogin_Saml2_Settings(settings_info)
            self.assertNotEqual(len(settings_3.get_errors()), 0)
        except Exception as e:
            self.assertIn('Invalid dict settings: idp_sso_url_invalid', str(e))

        del settings_info['sp']
        del settings_info['idp']
        try:
            settings_4 = OneLogin_Saml2_Settings(settings_info)
            self.assertNotEqual(len(settings_4.get_errors()), 0)
        except Exception as e:
            self.assertIn('Invalid dict settings', str(e))
            self.assertIn('idp_not_found', str(e))
            self.assertIn('sp_not_found', str(e))

        settings_info = self.loadSettingsJSON()
        settings_info['security']['authnRequestsSigned'] = True
        settings_info['custom_base_path'] = dirname(__file__)
        try:
            settings_5 = OneLogin_Saml2_Settings(settings_info)
            self.assertNotEqual(len(settings_5.get_errors()), 0)
        except Exception as e:
            self.assertIn(
                'Invalid dict settings: sp_cert_not_found_and_required',
                str(e))

        settings_info = self.loadSettingsJSON()
        settings_info['security']['nameIdEncrypted'] = True
        del settings_info['idp']['x509cert']
        try:
            settings_6 = OneLogin_Saml2_Settings(settings_info)
            self.assertNotEqual(len(settings_6.get_errors()), 0)
        except Exception as e:
            self.assertIn(
                'Invalid dict settings: idp_cert_not_found_and_required',
                str(e))
コード例 #29
0
ファイル: qos.py プロジェクト: AbhishekUoR/AxBench
    HEADER = '\033[95m'
    OKBLUE = '\033[94m'
    OKGREEN = '\033[92m'
    WARNING = '\033[93m'
    FAIL = '\033[91m'
    ENDC = '\033[0m'
    BOLD = '\033[1m'
    UNDERLINE = '\033[4m'

def printUsage():
	print "Usage: python qos.py <original file> <nn file>"
	exit(1)
pass;


if(len(sys.argv) != 3):
	printUsage()

origFilename 	= sys.argv[1]
nnFilename		= sys.argv[2]

origLines 		= open(origFilename).readlines()
nnLines			= open(nnFilename).readlines()


e = 0.0
absError = 0.0
MaxAbsoulteErrorVar1=0.0
#MaxAbsoulteErrorVar2=0.0
AbsoulteError=0.0
index1=1
コード例 #30
0
ファイル: run_classifier.py プロジェクト: HuXiangkun/dgl-bert
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
    parser.add_argument("--bert_model", default=None, type=str, required=True,
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
                        "bert-base-multilingual-cased, bert-base-chinese.")
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument("--output_dir",
                        default=None,
                        type=str,
                        required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")

    ## Other parameters
    parser.add_argument("--cache_dir",
                        default="",
                        type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length",
                        default=128,
                        type=int,
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
                             "Sequences longer than this will be truncated, and sequences shorter \n"
                             "than this will be padded.")
    parser.add_argument("--do_train",
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument("--do_lower_case",
                        action='store_true',
                        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--warmup_proportion",
                        default=0.1,
                        type=float,
                        help="Proportion of training to perform linear learning rate warmup for. "
                             "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--overwrite_output_dir',
                        action='store_true',
                        help="Overwrite the content of the output directory")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                             "0 (default value): dynamic loss scaling.\n"
                             "Positive power of 2: static loss scaling value.\n")
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
    args = parser.parse_args()

    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    args.device = device

    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt = '%m/%d/%Y %H:%M:%S',
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)

    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
        os.makedirs(args.output_dir)

    task_name = args.task_name.lower()

    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    output_mode = output_modes[task_name]

    label_list = processor.get_labels()
    num_labels = len(label_list)

    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
    model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
    if args.local_rank == 0:
        torch.distributed.barrier()

    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        model = torch.nn.parallel.DistributedDataParallel(model,
                                                          device_ids=[args.local_rank],
                                                          output_device=args.local_rank,
                                                          find_unused_parameters=True)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    global_step = 0
    nb_tr_steps = 0
    tr_loss = 0

    if args.do_train:
        if args.local_rank in [-1, 0]:
            tb_writer = SummaryWriter()

        # Prepare data loader
        train_examples = processor.get_train_examples(args.data_dir)
        cached_train_features_file = os.path.join(args.data_dir, 'train_{0}_{1}_{2}'.format(
            list(filter(None, args.bert_model.split('/'))).pop(),
                        str(args.max_seq_length),
                        str(task_name)))
        try:
            with open(cached_train_features_file, "rb") as reader:
                train_features = pickle.load(reader)
        except:
            train_features = convert_examples_to_features(
                train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            if args.local_rank == -1 or torch.distributed.get_rank() == 0:
                logger.info("  Saving train features into cached file %s", cached_train_features_file)
                with open(cached_train_features_file, "wb") as writer:
                    pickle.dump(train_features, writer)

        label_dtype = None
        if output_mode == "classification":
            label_dtype = torch.long
        elif output_mode == "regression":
            label_dtype = torch.float

        if args.local_rank == -1:
            train_sampler = RandomSampler
        else:
            train_sampler = DistributedSampler

        all_input_ids = [f.input_ids for f in train_features]
        all_segment_ids = [f.segment_ids for f in train_features]
        all_label_ids = [f.label_id for f in train_features]

        train_dataloader = SentPairClsDataLoader(all_input_ids,
                                                 all_segment_ids,
                                                 all_label_ids,
                                                 args.train_batch_size,
                                                 train_sampler,
                                                 device,
                                                 label_dtype)

        num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs

        # Prepare optimizer

        param_optimizer = list(model.named_parameters())
        no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
            {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
            ]
        if args.fp16:
            try:
                from apex.optimizers import FP16_Optimizer
                from apex.optimizers import FusedAdam
            except ImportError:
                raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

            optimizer = FusedAdam(optimizer_grouped_parameters,
                                  lr=args.learning_rate,
                                  bias_correction=False,
                                  max_grad_norm=1.0)
            if args.loss_scale == 0:
                optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
            else:
                optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
            warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
                                                 t_total=num_train_optimization_steps)

        else:
            optimizer = BertAdam(optimizer_grouped_parameters,
                                 lr=args.learning_rate,
                                 warmup=args.warmup_proportion,
                                 t_total=num_train_optimization_steps)

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_optimization_steps)

        model.train()
        for epoch in range(int(args.num_train_epochs)):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(train_dataloader):
                # graph = batch[0]
                # batch = tuple(t.to(device) for t in batch[1:])
                graph, label_ids = batch

                # define a new function to compute loss values for both output_modes
                logits = model(graph)

                if output_mode == "classification":
                    loss_fct = CrossEntropyLoss()
                    loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
                elif output_mode == "regression":
                    loss_fct = MSELoss()
                    loss = loss_fct(logits.view(-1), label_ids.view(-1))

                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                if step % 20 == 0:
                    print(f"Epoch {epoch}, step {step}, loss {loss.cpu().data.numpy()}, lr {optimizer.get_lr()[0]}")

                tr_loss += loss.item()
                nb_tr_examples += label_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    if args.fp16:
                        # modify learning rate with special warm up BERT uses
                        # if args.fp16 is False, BertAdam is used that handles this automatically
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)
                        for param_group in optimizer.param_groups:
                            param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1
                    if args.local_rank in [-1, 0]:
                        tb_writer.add_scalar('lr', optimizer.get_lr()[0], global_step)
                        tb_writer.add_scalar('loss', loss.item(), global_step)

    ### Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
    ### Example:
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
        # Save a trained model, configuration and tokenizer
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

        torch.save(model_to_save.state_dict(), output_model_file)
        model_to_save.config.to_json_file(output_config_file)
        tokenizer.save_vocabulary(args.output_dir)

        # Load a trained model and vocabulary that you have fine-tuned
        model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)

        # Good practice: save your training arguments together with the trained model
        output_args_file = os.path.join(args.output_dir, 'training_args.bin')
        torch.save(args, output_args_file)

    ### Evaluation
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
        model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        if os.path.exists(model_file):
            model.load_state_dict(torch.load(model_file, map_location=device))

        eval_examples = processor.get_dev_examples(args.data_dir)
        cached_eval_features_file = os.path.join(args.data_dir, 'dev_{0}_{1}_{2}'.format(
            list(filter(None, args.bert_model.split('/'))).pop(),
                        str(args.max_seq_length),
                        str(task_name)))
        try:
            with open(cached_eval_features_file, "rb") as reader:
                eval_features = pickle.load(reader)
        except:
            eval_features = convert_examples_to_features(
                eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            if args.local_rank == -1 or torch.distributed.get_rank() == 0:
                logger.info("  Saving eval features into cached file %s", cached_eval_features_file)
                with open(cached_eval_features_file, "wb") as writer:
                    pickle.dump(eval_features, writer)

        all_input_ids = [f.input_ids for f in eval_features]
        all_segment_ids = [f.segment_ids for f in eval_features]
        all_label_ids = [f.label_id for f in eval_features]

        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)

        label_dtype = None
        if output_mode == "classification":
            label_dtype = torch.long
        elif output_mode == "regression":
            label_dtype = torch.float

        # Run prediction for full data
        if args.local_rank == -1:
            eval_sampler = SequentialSampler
        else:
            eval_sampler = DistributedSampler  # Note that this sampler samples randomly

        eval_dataloader = SentPairClsDataLoader(all_input_ids,
                                                all_segment_ids,
                                                all_label_ids,
                                                args.eval_batch_size,
                                                eval_sampler,
                                                device,
                                                label_dtype)

        model.eval()
        eval_loss = 0
        nb_eval_steps = 0
        preds = []
        out_label_ids = None

        for graph, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
            with torch.no_grad():
                logits = model(graph)

            # create eval loss and other metric required by the task
            if output_mode == "classification":
                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
            elif output_mode == "regression":
                loss_fct = MSELoss()
                tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))

            eval_loss += tmp_eval_loss.mean().item()
            nb_eval_steps += 1
            if len(preds) == 0:
                preds.append(logits.detach().cpu().numpy())
                out_label_ids = label_ids.detach().cpu().numpy()
            else:
                preds[0] = np.append(
                    preds[0], logits.detach().cpu().numpy(), axis=0)
                out_label_ids = np.append(
                    out_label_ids, label_ids.detach().cpu().numpy(), axis=0)

        eval_loss = eval_loss / nb_eval_steps
        preds = preds[0]
        if output_mode == "classification":
            preds = np.argmax(preds, axis=1)
        elif output_mode == "regression":
            preds = np.squeeze(preds)
        result = compute_metrics(task_name, preds, out_label_ids)

        loss = tr_loss/global_step if args.do_train else None

        result['eval_loss'] = eval_loss
        result['global_step'] = global_step
        result['loss'] = loss

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

        # hack for MNLI-MM
        if task_name == "mnli":
            task_name = "mnli-mm"
            processor = processors[task_name]()

            if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train:
                raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
            if not os.path.exists(args.output_dir + '-MM'):
                os.makedirs(args.output_dir + '-MM')

            eval_examples = processor.get_dev_examples(args.data_dir)
            eval_features = convert_examples_to_features(
                eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
            logger.info("***** Running evaluation *****")
            logger.info("  Num examples = %d", len(eval_examples))
            logger.info("  Batch size = %d", args.eval_batch_size)
            all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
            all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
            all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)

            eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
            # Run prediction for full data
            eval_sampler = SequentialSampler(eval_data)
            eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

            model.eval()
            eval_loss = 0
            nb_eval_steps = 0
            preds = []
            out_label_ids = None

            for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
                input_ids = input_ids.to(device)
                input_mask = input_mask.to(device)
                segment_ids = segment_ids.to(device)
                label_ids = label_ids.to(device)

                with torch.no_grad():
                    logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=None)

                loss_fct = CrossEntropyLoss()
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))

                eval_loss += tmp_eval_loss.mean().item()
                nb_eval_steps += 1
                if len(preds) == 0:
                    preds.append(logits.detach().cpu().numpy())
                    out_label_ids = label_ids.detach().cpu().numpy()
                else:
                    preds[0] = np.append(
                        preds[0], logits.detach().cpu().numpy(), axis=0)
                    out_label_ids = np.append(
                        out_label_ids, label_ids.detach().cpu().numpy(), axis=0)

            eval_loss = eval_loss / nb_eval_steps
            preds = preds[0]
            preds = np.argmax(preds, axis=1)
            result = compute_metrics(task_name, preds, out_label_ids)

            loss = tr_loss/global_step if args.do_train else None

            result['eval_loss'] = eval_loss
            result['global_step'] = global_step
            result['loss'] = loss

            output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt")
            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")
                for key in sorted(result.keys()):
                    logger.info("  %s = %s", key, str(result[key]))
                    writer.write("%s = %s\n" % (key, str(result[key])))
コード例 #31
0
def main():
    def print_stacktrace_if_debug():
        debug_flag = False
        if 'args' in vars() and 'debug' in args:
            debug_flag = args.debug

        if debug_flag:
            traceback.print_exc(file=sys.stdout)
            error(traceback.format_exc())
    try:
        description = ['~~~CRISPRessoWGS~~~','-Analysis of CRISPR/Cas9 outcomes from WGS data-']
        wgs_string = r'''
 ____________
|     __  __ |
||  |/ _ (_  |
||/\|\__)__) |
|____________|
        '''
        print(CRISPRessoShared.get_crispresso_header(description,wgs_string))

        parser = CRISPRessoShared.getCRISPRessoArgParser(parserTitle = 'CRISPRessoWGS Parameters',requiredParams={})

        #tool specific optional
        parser.add_argument('-b','--bam_file', type=str,  help='WGS aligned bam file', required=True,default='bam filename' )
        parser.add_argument('-f','--region_file', type=str,  help='Regions description file. A BED format  file containing the regions to analyze, one per line. The REQUIRED\
        columns are: chr_id(chromosome name), bpstart(start position), bpend(end position), the optional columns are:name (an unique indentifier for the region), guide_seq, expected_hdr_amplicon_seq,coding_seq, see CRISPResso help for more details on these last 3 parameters)', required=True)
        parser.add_argument('-r','--reference_file', type=str, help='A FASTA format reference file (for example hg19.fa for the human genome)', default='',required=True)
        parser.add_argument('--min_reads_to_use_region',  type=float, help='Minimum number of reads that align to a region to perform the CRISPResso analysis', default=10)
        parser.add_argument('--skip_failed',  help='Continue with pooled analysis even if one sample fails',action='store_true')
        parser.add_argument('--gene_annotations', type=str, help='Gene Annotation Table from UCSC Genome Browser Tables (http://genome.ucsc.edu/cgi-bin/hgTables?command=start), \
        please select as table "knownGene", as output format "all fields from selected table" and as file returned "gzip compressed"', default='')
        parser.add_argument('-p','--n_processes',type=int, help='Specify the number of processes to use for the quantification.\
        Please use with caution since increasing this parameter will increase the memory required to run CRISPResso.',default=1)
        parser.add_argument('--crispresso_command', help='CRISPResso command to call',default='CRISPResso')

        args = parser.parse_args()

        crispresso_options = CRISPRessoShared.get_crispresso_options()
        options_to_ignore = set(['fastq_r1','fastq_r2','amplicon_seq','amplicon_name','output_folder','name'])
        crispresso_options_for_wgs = list(crispresso_options-options_to_ignore)

        info('Checking dependencies...')

        if check_samtools() and check_bowtie2():
            info('\n All the required dependencies are present!')
        else:
            sys.exit(1)

        #check files
        check_file(args.bam_file)

        check_file(args.reference_file)

        check_file(args.region_file)

        if args.gene_annotations:
            check_file(args.gene_annotations)


        #INIT
        get_name_from_bam=lambda  x: os.path.basename(x).replace('.bam','')

        if not args.name:
            database_id='%s' % get_name_from_bam(args.bam_file)
        else:
            database_id=args.name


        OUTPUT_DIRECTORY='CRISPRessoWGS_on_%s' % database_id

        if args.output_folder:
                 OUTPUT_DIRECTORY=os.path.join(os.path.abspath(args.output_folder),OUTPUT_DIRECTORY)

        _jp=lambda filename: os.path.join(OUTPUT_DIRECTORY,filename) #handy function to put a file in the output directory

        try:
                 info('Creating Folder %s' % OUTPUT_DIRECTORY)
                 os.makedirs(OUTPUT_DIRECTORY)
                 info('Done!')
        except:
                 warn('Folder %s already exists.' % OUTPUT_DIRECTORY)

        log_filename=_jp('CRISPRessoWGS_RUNNING_LOG.txt')
        logging.getLogger().addHandler(logging.FileHandler(log_filename))

        with open(log_filename,'w+') as outfile:
                  outfile.write('[Command used]:\n%s\n\n[Execution log]:\n' % ' '.join(sys.argv))

        crispresso2WGS_info_file = os.path.join(OUTPUT_DIRECTORY,'CRISPResso2WGS_info.pickle')
        crispresso2_info = {} #keep track of all information for this run to be pickled and saved at the end of the run
        crispresso2_info['version'] = CRISPRessoShared.__version__
        crispresso2_info['args'] = deepcopy(args)

        crispresso2_info['log_filename'] = os.path.basename(log_filename)

        def rreplace(s, old, new):
            li = s.rsplit(old)
            return new.join(li)

        bam_index = ''
        #check if bam has the index already
        if os.path.exists(rreplace(args.bam_file,".bam",".bai")):
            info('Index file for input .bam file exists, skipping generation.')
            bam_index = args.bam_file.replace(".bam",".bai")
        elif os.path.exists(args.bam_file+'.bai'):
            info('Index file for input .bam file exists, skipping generation.')
            bam_index = args.bam_file+'.bai'
        else:
            info('Creating index file for input .bam file...')
            sb.call('samtools index %s ' % (args.bam_file),shell=True)
            bam_index = args.bam_file+'.bai'


        #load gene annotation
        if args.gene_annotations:
            info('Loading gene coordinates from annotation file: %s...' % args.gene_annotations)
            try:
                df_genes=pd.read_table(args.gene_annotations,compression='gzip')
                df_genes.txEnd=df_genes.txEnd.astype(int)
                df_genes.txStart=df_genes.txStart.astype(int)
                df_genes.head()
            except:
                info('Failed to load the gene annotations file.')


        #Load and validate the REGION FILE
        df_regions=pd.read_csv(args.region_file,names=[
                'chr_id','bpstart','bpend','Name','sgRNA',
                'Expected_HDR','Coding_sequence'],comment='#',sep='\t',dtype={'Name':str})


        #remove empty amplicons/lines
        df_regions.dropna(subset=['chr_id','bpstart','bpend'],inplace=True)

        df_regions.Expected_HDR=df_regions.Expected_HDR.apply(capitalize_sequence)
        df_regions.sgRNA=df_regions.sgRNA.apply(capitalize_sequence)
        df_regions.Coding_sequence=df_regions.Coding_sequence.apply(capitalize_sequence)


        #check or create names
        for idx,row in df_regions.iterrows():
            if pd.isnull(row.Name):
                df_regions.ix[idx,'Name']='_'.join(map(str,[row['chr_id'],row['bpstart'],row['bpend']]))


        if not len(df_regions.Name.unique())==df_regions.shape[0]:
            raise Exception('The amplicon names should be all distinct!')

        df_regions=df_regions.set_index('Name')
        #df_regions.index=df_regions.index.str.replace(' ','_')
        df_regions.index=df_regions.index.to_series().str.replace(' ','_')

        #extract sequence for each region
        uncompressed_reference=args.reference_file

        if os.path.exists(uncompressed_reference+'.fai'):
            info('The index for the reference fasta file is already present! Skipping generation.')
        else:
            info('Indexing reference file... Please be patient!')
            sb.call('samtools faidx %s >>%s 2>&1' % (uncompressed_reference,log_filename),shell=True)

        df_regions['sequence']=df_regions.apply(lambda row: get_region_from_fa(row.chr_id,row.bpstart,row.bpend,uncompressed_reference),axis=1)

        for idx,row in df_regions.iterrows():

            if not pd.isnull(row.sgRNA):

                cut_points=[]

                for current_guide_seq in row.sgRNA.strip().upper().split(','):

                    wrong_nt=find_wrong_nt(current_guide_seq)
                    if wrong_nt:
                        raise NTException('The sgRNA sequence %s contains wrong characters:%s'  % (current_guide_seq, ' '.join(wrong_nt)))

                    offset_fw=args.quantification_window_center+len(current_guide_seq)-1
                    offset_rc=(-args.quantification_window_center)-1
                    cut_points+=[m.start() + offset_fw for \
                                m in re.finditer(current_guide_seq,  row.sequence)]+[m.start() + offset_rc for m in re.finditer(CRISPRessoShared.reverse_complement(current_guide_seq),  row.sequence)]

                if not cut_points:
                    df_regions.ix[idx,'sgRNA']=''

        df_regions['bpstart'] = pd.to_numeric(df_regions['bpstart'])
        df_regions['bpend'] = pd.to_numeric(df_regions['bpend'])

        df_regions.bpstart=df_regions.bpstart.astype(int)
        df_regions.bpend=df_regions.bpend.astype(int)

        if args.gene_annotations:
            df_regions=df_regions.apply(lambda row: find_overlapping_genes(row, df_genes),axis=1)


        #extract reads with samtools in that region and create a bam
        #create a fasta file with all the trimmed reads
        info('\nProcessing each region...')

        ANALYZED_REGIONS=_jp('ANALYZED_REGIONS/')
        if not os.path.exists(ANALYZED_REGIONS):
            os.mkdir(ANALYZED_REGIONS)

        df_regions['n_reads']=0
        df_regions['bam_file_with_reads_in_region']=''
        df_regions['fastq.gz_file_trimmed_reads_in_region']=''

        for idx,row in df_regions.iterrows():

            if row['sequence']:

                fastq_gz_filename=os.path.join(ANALYZED_REGIONS,'%s.fastq.gz' % clean_filename('REGION_'+str(idx)))
                bam_region_filename=os.path.join(ANALYZED_REGIONS,'%s.bam' % clean_filename('REGION_'+str(idx)))

                #create place-holder fastq files
                open(fastq_gz_filename, 'w+').close()

                region='%s:%d-%d' % (row.chr_id,row.bpstart,row.bpend-1)
                info('\nExtracting reads in:%s and create the .bam file: %s' % (region,bam_region_filename))

                #extract reads in region
                cmd=r'''samtools view -b -F 4 %s %s > %s ''' % (args.bam_file, region, bam_region_filename)
                #print cmd
                sb.call(cmd,shell=True)


                #index bam file
                cmd=r'''samtools index %s ''' % (bam_region_filename)
                #print cmd
                sb.call(cmd,shell=True)

                info('Trim reads and create a fastq.gz file in: %s' % fastq_gz_filename)
                #trim reads in bam and convert in fastq
                n_reads=write_trimmed_fastq(bam_region_filename,row['bpstart'],row['bpend'],fastq_gz_filename)
                df_regions.ix[idx,'n_reads']=n_reads
                df_regions.ix[idx,'bam_file_with_reads_in_region']=bam_region_filename
                df_regions.ix[idx,'fastq.gz_file_trimmed_reads_in_region']=fastq_gz_filename


        df_regions.fillna('NA').to_csv(_jp('REPORT_READS_ALIGNED_TO_SELECTED_REGIONS_WGS.txt'),sep='\t')

        #Run Crispresso
        info('\nRunning CRISPResso on each region...')
        crispresso_cmds = []
        for idx,row in df_regions.iterrows():

               if row['n_reads']>=args.min_reads_to_use_region:
                    info('\nThe region [%s] has enough reads (%d) mapped to it!' % (idx,row['n_reads']))

                    crispresso_cmd= args.crispresso_command + ' -r1 %s -a %s -o %s --name %s' %\
                    (row['fastq.gz_file_trimmed_reads_in_region'],row['sequence'],OUTPUT_DIRECTORY,idx)

                    if row['sgRNA'] and not pd.isnull(row['sgRNA']):
                        crispresso_cmd+=' -g %s' % row['sgRNA']

                    if row['Expected_HDR'] and not pd.isnull(row['Expected_HDR']):
                        crispresso_cmd+=' -e %s' % row['Expected_HDR']

                    if row['Coding_sequence'] and not pd.isnull(row['Coding_sequence']):
                        crispresso_cmd+=' -c %s' % row['Coding_sequence']

                    crispresso_cmd=CRISPRessoShared.propagate_crispresso_options(crispresso_cmd,crispresso_options_for_wgs,args)
                    crispresso_cmds.append(crispresso_cmd)
#                    info('Running CRISPResso:%s' % crispresso_cmd)
#                    sb.call(crispresso_cmd,shell=True)

               else:
                    info('\nThe region [%s] has too few reads mapped to it (%d)! Not running CRISPResso!' % (idx,row['n_reads']))

        CRISPRessoMultiProcessing.run_crispresso_cmds(crispresso_cmds,args.n_processes,'region',args.skip_failed)

        quantification_summary=[]
        all_region_names = []
        all_region_read_counts = {}
        good_region_names = []
        good_region_folders = {}
        header = 'Name\tUnmodified%\tModified%\tReads_aligned\tReads_total\tUnmodified\tModified\tDiscarded\tInsertions\tDeletions\tSubstitutions\tOnly Insertions\tOnly Deletions\tOnly Substitutions\tInsertions and Deletions\tInsertions and Substitutions\tDeletions and Substitutions\tInsertions Deletions and Substitutions'
        header_els = header.split("\t")
        header_el_count = len(header_els)
        empty_line_els = [np.nan]*(header_el_count-1)
        n_reads_index = header_els.index('Reads_total') - 1
        for idx,row in df_regions.iterrows():
            folder_name='CRISPResso_on_%s' % idx
            run_name = idx

            all_region_names.append(run_name)
            all_region_read_counts[run_name] = row.n_reads

            run_file = os.path.join(_jp(folder_name),'CRISPResso2_info.pickle')
            if not os.path.exists(run_file):
                warn('Skipping the folder %s: not enough reads, incomplete, or empty folder.'% folder_name)
                this_els = empty_line_els[:]
                this_els[n_reads_index] = row.n_reads
                to_add = [run_name]
                to_add.extend(this_els)
                quantification_summary.append(to_add)
            else:
                run_data = cp.load(open(run_file,'rb'))
                ref_name = run_data['ref_names'][0] #only expect one amplicon sequence
                n_tot = row.n_reads
                n_aligned = run_data['counts_total'][ref_name]
                n_unmod = run_data['counts_unmodified'][ref_name]
                n_mod = run_data['counts_modified'][ref_name]
                n_discarded = run_data['counts_discarded'][ref_name]

                n_insertion = run_data['counts_insertion'][ref_name]
                n_deletion = run_data['counts_deletion'][ref_name]
                n_substitution = run_data['counts_substitution'][ref_name]
                n_only_insertion = run_data['counts_only_insertion'][ref_name]
                n_only_deletion = run_data['counts_only_deletion'][ref_name]
                n_only_substitution = run_data['counts_only_substitution'][ref_name]
                n_insertion_and_deletion = run_data['counts_insertion_and_deletion'][ref_name]
                n_insertion_and_substitution = run_data['counts_insertion_and_substitution'][ref_name]
                n_deletion_and_substitution = run_data['counts_deletion_and_substitution'][ref_name]
                n_insertion_and_deletion_and_substitution = run_data['counts_insertion_and_deletion_and_substitution'][ref_name]

                unmod_pct = "NA"
                mod_pct = "NA"
                if n_aligned > 0:
                    unmod_pct = 100*n_unmod/float(n_aligned)
                    mod_pct = 100*n_mod/float(n_aligned)

                vals = [run_name]
                vals.extend([round(unmod_pct,8),round(mod_pct,8),n_aligned,n_tot,n_unmod,n_mod,n_discarded,n_insertion,n_deletion,n_substitution,n_only_insertion,n_only_deletion,n_only_substitution,n_insertion_and_deletion,n_insertion_and_substitution,n_deletion_and_substitution,n_insertion_and_deletion_and_substitution])
                quantification_summary.append(vals)

                good_region_names.append(idx)
                good_region_folders[idx] = folder_name
        samples_quantification_summary_filename = _jp('SAMPLES_QUANTIFICATION_SUMMARY.txt')

        df_summary_quantification=pd.DataFrame(quantification_summary,columns=header_els)
        if args.crispresso1_mode:
            crispresso1_columns=['Name','Unmodified%','Modified%','Reads_aligned','Reads_total']
            df_summary_quantification.fillna('NA').to_csv(samples_quantification_summary_filename,sep='\t',index=None,columns=crispresso1_columns)
        else:
            df_summary_quantification.fillna('NA').to_csv(samples_quantification_summary_filename,sep='\t',index=None)

        crispresso2_info['samples_quantification_summary_filename'] = os.path.basename(samples_quantification_summary_filename)
        crispresso2_info['regions'] = df_regions
        crispresso2_info['all_region_names'] = all_region_names
        crispresso2_info['all_region_read_counts'] = all_region_read_counts
        crispresso2_info['good_region_names'] = good_region_names
        crispresso2_info['good_region_folders'] = good_region_folders

        crispresso2_info['summary_plot_names'] = []
        crispresso2_info['summary_plot_titles'] = {}
        crispresso2_info['summary_plot_labels'] = {}
        crispresso2_info['summary_plot_datas'] = {}

        df_summary_quantification.set_index('Name')

        save_png = True
        if args.suppress_report:
            save_png = False

        plot_root = _jp("CRISPRessoWGS_reads_summary")
        CRISPRessoPlot.plot_reads_total(plot_root,df_summary_quantification,save_png,args.min_reads_to_use_region)
        plot_name = os.path.basename(plot_root)
        crispresso2_info['reads_summary_plot'] = plot_name
        crispresso2_info['summary_plot_names'].append(plot_name)
        crispresso2_info['summary_plot_titles'][plot_name] = 'CRISPRessoWGS Read Allocation Summary'
        crispresso2_info['summary_plot_labels'][plot_name] = 'Each bar shows the total number of reads allocated to each amplicon. The vertical line shows the cutoff for analysis, set using the --min_reads_to_use_region parameter.'
        crispresso2_info['summary_plot_datas'][plot_name] = [('CRISPRessoWGS summary',os.path.basename(samples_quantification_summary_filename))]

        plot_root = _jp("CRISPRessoWGS_modification_summary")
        CRISPRessoPlot.plot_unmod_mod_pcts(plot_root,df_summary_quantification,save_png,args.min_reads_to_use_region)
        plot_name = os.path.basename(plot_root)
        crispresso2_info['modification_summary_plot'] = plot_name
        crispresso2_info['summary_plot_names'].append(plot_name)
        crispresso2_info['summary_plot_titles'][plot_name] = 'CRISPRessoWGS Modification Summary'
        crispresso2_info['summary_plot_labels'][plot_name] = 'Each bar shows the total number of reads aligned to each amplicon, divided into the reads that are modified and unmodified. The vertical line shows the cutoff for analysis, set using the --min_reads_to_use_region parameter.'
        crispresso2_info['summary_plot_datas'][plot_name] = [('CRISPRessoWGS summary',os.path.basename(samples_quantification_summary_filename))]

        if not args.suppress_report:
            if (args.place_report_in_output_folder):
                report_name = _jp("CRISPResso2WGS_report.html")
            else:
                report_name = OUTPUT_DIRECTORY+'.html'
            CRISPRessoReport.make_wgs_report_from_folder(report_name,crispresso2_info,OUTPUT_DIRECTORY,_ROOT)
            crispresso2_info['report_location'] = report_name
            crispresso2_info['report_filename'] = os.path.basename(report_name)

        cp.dump(crispresso2_info, open(crispresso2WGS_info_file, 'wb' ) )

        info('Analysis Complete!')
        print(CRISPRessoShared.get_crispresso_footer())
        sys.exit(0)

    except Exception as e:
        print_stacktrace_if_debug()
        error('\n\nERROR: %s' % e)
        sys.exit(-1)
コード例 #32
0
def to_bin(x):
    s = bin(x)[2:]
    return (4 - len(s)) * '0' + s
コード例 #33
0
    def obtain_samples(self, itr, log=True, log_prefix=''):
        logger.log("Obtaining samples for iteration %d..." % itr)
        paths = []
        n_samples = 0
        obses = self.vec_env.reset()
        dones = np.asarray([True] * self.vec_env.num_envs)
        running_paths = [None] * self.vec_env.num_envs

        pbar = ProgBarCounter(self.algo.batch_size)
        policy_time = 0
        env_time = 0
        process_time = 0

        policy = self.algo.policy
        import time
        while n_samples < self.algo.batch_size:
            t = time.time()
            policy.reset(dones)
            actions, agent_infos = policy.get_actions(obses)

            policy_time += time.time() - t
            t = time.time()
            next_obses, rewards, dones, env_infos = self.vec_env.step(actions)
            env_time += time.time() - t

            t = time.time()

            agent_infos = tensor_utils.split_tensor_dict_list(agent_infos)
            env_infos = tensor_utils.split_tensor_dict_list(env_infos)
            if env_infos is None:
                env_infos = [dict() for _ in range(self.vec_env.num_envs)]
            if agent_infos is None:
                agent_infos = [dict() for _ in range(self.vec_env.num_envs)]
            for idx, observation, action, reward, env_info, agent_info, done in zip(itertools.count(), obses, actions,
                                                                                    rewards, env_infos, agent_infos,
                                                                                    dones):
                if running_paths[idx] is None:
                    running_paths[idx] = dict(
                        observations=[],
                        actions=[],
                        rewards=[],
                        env_infos=[],
                        agent_infos=[],
                    )
                running_paths[idx]["observations"].append(observation)
                running_paths[idx]["actions"].append(action)
                running_paths[idx]["rewards"].append(reward)
                running_paths[idx]["env_infos"].append(env_info)
                running_paths[idx]["agent_infos"].append(agent_info)
                if done:
                    paths.append(dict(
                        observations=self.env_spec.observation_space.flatten_n(running_paths[idx]["observations"]),
                        actions=self.env_spec.action_space.flatten_n(running_paths[idx]["actions"]),
                        rewards=tensor_utils.stack_tensor_list(running_paths[idx]["rewards"]),
                        env_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["env_infos"]),
                        agent_infos=tensor_utils.stack_tensor_dict_list(running_paths[idx]["agent_infos"]),
                    ))
                    n_samples += len(running_paths[idx]["rewards"])
                    running_paths[idx] = None
            process_time += time.time() - t
            pbar.inc(len(obses))
            obses = next_obses

        pbar.stop()

        if log:
            logger.record_tabular(log_prefix + "PolicyExecTime", policy_time)
            logger.record_tabular(log_prefix + "EnvExecTime", env_time)
            logger.record_tabular(log_prefix + "ProcessExecTime", process_time)

        return paths
コード例 #34
0
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
    """Get version from 'git describe' in the root of the source tree.

    This only gets called if the git-archive 'subst' keywords were *not*
    expanded, and _version.py hasn't already been rewritten with a short
    version string, meaning we're inside a checked out source tree.
    """
    GITS = ["git"]
    if sys.platform == "win32":
        GITS = ["git.cmd", "git.exe"]

    out, rc = run_command(
        GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
    if rc != 0:
        if verbose:
            print("Directory %s not under git control" % root)
        raise NotThisMethod("'git rev-parse --git-dir' returned error")

    # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
    # if there isn't one, this yields HEX[-dirty] (no NUM)
    describe_out, rc = run_command(
        GITS, [
            "describe", "--tags", "--dirty", "--always", "--long", "--match",
            "%s*" % tag_prefix
        ],
        cwd=root)
    # --long was added in git-1.5.5
    if describe_out is None:
        raise NotThisMethod("'git describe' failed")
    describe_out = describe_out.strip()
    full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
    if full_out is None:
        raise NotThisMethod("'git rev-parse' failed")
    full_out = full_out.strip()

    pieces = {}
    pieces["long"] = full_out
    pieces["short"] = full_out[:7]  # maybe improved later
    pieces["error"] = None

    # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
    # TAG might have hyphens.
    git_describe = describe_out

    # look for -dirty suffix
    dirty = git_describe.endswith("-dirty")
    pieces["dirty"] = dirty
    if dirty:
        git_describe = git_describe[:git_describe.rindex("-dirty")]

    # now we have TAG-NUM-gHEX or HEX

    if "-" in git_describe:
        # TAG-NUM-gHEX
        mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
        if not mo:
            # unparseable. Maybe git-describe is misbehaving?
            pieces["error"] = (
                "unable to parse git-describe output: '%s'" % describe_out)
            return pieces

        # tag
        full_tag = mo.group(1)
        if not full_tag.startswith(tag_prefix):
            if verbose:
                fmt = "tag '%s' doesn't start with prefix '%s'"
                print(fmt % (full_tag, tag_prefix))
            pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" %
                               (full_tag, tag_prefix))
            return pieces
        pieces["closest-tag"] = full_tag[len(tag_prefix):]

        # distance: number of commits since tag
        pieces["distance"] = int(mo.group(2))

        # commit: short hex revision ID
        pieces["short"] = mo.group(3)

    else:
        # HEX: no tags
        pieces["closest-tag"] = None
        count_out, rc = run_command(
            GITS, ["rev-list", "HEAD", "--count"], cwd=root)
        pieces["distance"] = int(count_out)  # total number of commits

    # commit date: see ISO-8601 comment in git_versions_from_keywords()
    date = run_command(
        GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
    pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)

    return pieces
コード例 #35
0
def find_last(mylist,myvalue):
    return len(mylist) - mylist[::-1].index(myvalue) -1
コード例 #36
0
 def do_export(self, filename, used_for_merging_dbs=False):
     self.orig_dir = os.getcwd()
     if not os.path.isabs(filename):
         filename = os.path.join(self.config()["export_dir"], filename)
     os.chdir(os.path.dirname(filename))
     if used_for_merging_dbs is True:
         metadata = {}
     else:
         metadata = self.main_widget().show_export_metadata_dialog()
     if metadata is None:  # Cancelled.
         os.chdir(self.orig_dir)
         return -1
     metadata_file = open("METADATA", "w", encoding="utf-8")
     for key, value in metadata.items():
         print(key + ":" + value.strip().replace("\n", "<br>"),
               file=metadata_file)
     metadata_file.close()
     db = self.database()
     w = self.main_widget()
     # Generate log entries.
     if used_for_merging_dbs:
         w.set_progress_text(_("Extracting cards..."))
     else:
         w.set_progress_text(_("Exporting cards..."))
     active_objects = db.active_objects_to_export()
     number_of_entries = len(active_objects["tags"]) + \
         len(active_objects["fact_view_ids"]) + \
         len(active_objects["card_type_ids"]) + \
         len(active_objects["media_filenames"]) + \
         len(active_objects["_card_ids"]) + \
         len(active_objects["_fact_ids"])
     xml_file = open("cards.xml", "w", encoding="utf-8")
     xml_format = XMLFormat()
     xml_file.write(xml_format.log_entries_header(number_of_entries))
     w.set_progress_range(number_of_entries)
     w.set_progress_update_interval(number_of_entries / 20)
     for tag in active_objects["tags"]:
         log_entry = LogEntry()
         log_entry["type"] = EventTypes.ADDED_TAG
         log_entry["o_id"] = tag.id
         log_entry["name"] = tag.name
         xml_file.write(xml_format.repr_log_entry(log_entry))
         w.increase_progress(1)
     for fact_view_id in active_objects["fact_view_ids"]:
         fact_view = db.fact_view(fact_view_id, is_id_internal=False)
         log_entry = LogEntry()
         log_entry["type"] = EventTypes.ADDED_FACT_VIEW
         log_entry["o_id"] = fact_view.id
         log_entry["name"] = fact_view.name
         log_entry["q_fact_keys"] = repr(fact_view.q_fact_keys)
         log_entry["a_fact_keys"] = repr(fact_view.a_fact_keys)
         log_entry["q_fact_key_decorators"] = \
             repr(fact_view.q_fact_key_decorators)
         log_entry["a_fact_key_decorators"] = \
             repr(fact_view.a_fact_key_decorators)
         log_entry["a_on_top_of_q"] = repr(fact_view.a_on_top_of_q)
         log_entry["type_answer"] = repr(fact_view.type_answer)
         if fact_view.extra_data:
             log_entry["extra"] = repr(fact_view.extra_data)
         xml_file.write(xml_format.repr_log_entry(log_entry))
         w.increase_progress(1)
     for card_type_id in active_objects["card_type_ids"]:
         card_type = db.card_type(card_type_id, is_id_internal=False)
         log_entry = LogEntry()
         log_entry["type"] = EventTypes.ADDED_CARD_TYPE
         log_entry["o_id"] = card_type.id
         log_entry["name"] = card_type.name
         log_entry["fact_keys_and_names"] = \
             repr(card_type.fact_keys_and_names)
         log_entry["fact_views"] = repr([fact_view.id for fact_view \
             in card_type.fact_views])
         log_entry["unique_fact_keys"] = \
             repr(card_type.unique_fact_keys)
         log_entry["required_fact_keys"] = \
             repr(card_type.required_fact_keys)
         log_entry["keyboard_shortcuts"] = \
             repr(card_type.keyboard_shortcuts)
         if card_type.extra_data:
             log_entry["extra"] = repr(card_type.extra_data)
         xml_file.write(xml_format.repr_log_entry(log_entry))
         w.increase_progress(1)
     for media_filename in active_objects["media_filenames"]:
         log_entry = LogEntry()
         log_entry["type"] = EventTypes.ADDED_MEDIA_FILE
         log_entry["fname"] = media_filename
         xml_file.write(str(xml_format.repr_log_entry(log_entry)))
         w.increase_progress(1)
     for _fact_id in active_objects["_fact_ids"]:
         fact = db.fact(_fact_id, is_id_internal=True)
         log_entry = LogEntry()
         log_entry["type"] = EventTypes.ADDED_FACT
         log_entry["o_id"] = fact.id
         for fact_key, value in fact.data.items():
             log_entry[fact_key] = value
         xml_file.write(xml_format.repr_log_entry(log_entry))
         w.increase_progress(1)
     for _card_id in active_objects["_card_ids"]:
         card = db.card(_card_id, is_id_internal=True)
         log_entry = LogEntry()
         log_entry["type"] = EventTypes.ADDED_CARD
         log_entry["o_id"] = card.id
         log_entry["card_t"] = card.card_type.id
         log_entry["fact"] = card.fact.id
         log_entry["fact_v"] = card.fact_view.id
         log_entry["tags"] = ",".join([tag.id for tag in card.tags])
         if used_for_merging_dbs:
             log_entry["c_time"] = card.creation_time
             log_entry["m_time"] = card.modification_time
             log_entry["gr"] = card.grade
             log_entry["e"] = card.easiness
             log_entry["ac_rp"] = card.acq_reps
             log_entry["rt_rp"] = card.ret_reps
             log_entry["lps"] = card.lapses
             log_entry["ac_rp_l"] = card.acq_reps_since_lapse
             log_entry["rt_rp_l"] = card.ret_reps_since_lapse
             log_entry["l_rp"] = card.last_rep
             log_entry["n_rp"] = card.next_rep
         else:
             log_entry["gr"] = -1
             log_entry["e"] = 2.5
             log_entry["ac_rp"] = 0
             log_entry["rt_rp"] = 0
             log_entry["lps"] = 0
             log_entry["ac_rp_l"] = 0
             log_entry["rt_rp_l"] = 0
             log_entry["l_rp"] = -1
             log_entry["n_rp"] = -1
         if card.extra_data:
             log_entry["extra"] = repr(card.extra_data)
         xml_file.write(xml_format.repr_log_entry(log_entry))
         w.increase_progress(1)
     xml_file.write(xml_format.log_entries_footer())
     xml_file.close()
     # Make archive (Zipfile requires a .zip extension).
     zip_file = zipfile.ZipFile(filename + ".zip",
                                "w",
                                compression=zipfile.ZIP_DEFLATED)
     zip_file.write("cards.xml")
     zip_file.write("METADATA")
     w.close_progress()
     if used_for_merging_dbs:
         w.set_progress_text(_("Extracting media files..."))
     else:
         w.set_progress_text(_("Bundling media files..."))
     number_of_media_files = len(active_objects["media_filenames"])
     w.set_progress_range(number_of_media_files)
     w.set_progress_update_interval(number_of_media_files / 100)
     for media_filename in active_objects["media_filenames"]:
         full_path = os.path.normpath(\
             os.path.join(self.database().media_dir(), media_filename))
         if not os.path.exists(full_path):
             self.main_widget().show_error(\
             _("Missing filename: " + full_path))
             continue
         zip_file.write(full_path,
                        media_filename,
                        compress_type=zipfile.ZIP_STORED)
         w.increase_progress(1)
     zip_file.close()
     if os.path.exists(filename):
         os.remove(filename)
     os.rename(filename + ".zip", filename)
     os.remove("cards.xml")
     os.remove("METADATA")
     os.chdir(self.orig_dir)
     w.close_progress()
コード例 #37
0
ファイル: digitemp.py プロジェクト: borand/digitemPy
 def SerialNumberToDec(self, hex_str):
     dec_vector = []
     for i in range(0, len(hex_str), 2):
         dec_vector.append(int(float.fromhex(hex_str[i:i + 2])))
     return dec_vector
    'inventory': 1
})
for doc in count:
    print(f'Character Name: {doc["name"]} == '
          f'Item Count: {len(doc["inventory"])}')

# # Weapons per character
# count = rpg.charactercreator.character.find(
#     {}, {'_id': 0, 'name': 1, 'inventory': 1}
# )
# for doc in count:
#     print(f'Character Name: {doc["name"]} == '
#           f'Item Count: {len(doc["inventory"])}')

# Average items per character
count = rpg.charactercreator.character.find({}, {'_id': 0, 'inventory': 1})
inventories = []
for doc in count:
    inventories.append(len(doc['inventory']))
count2 = rpg.charactercreator.character.count_documents({})
print(f'Average Items Per Character: {sum(inventories)/count2}')

# # Average Weapons Per Character
# count = rpg.charactercreator.character.find(
#     {}, {'_id': 0, 'inventory': 1})
# inventories = []
# for doc in count:
#     inventories.append(len(doc['inventory']))
# count2 = rpg.charactercreator.character.count_documents({})
# print(f'Average Items Per Character: {sum(inventories)/count2}')
コード例 #39
0
    def __init__(self, config, debug=False):
        """
        :param config:
            If a string, then treat it as a filename of a YAML config file; if
            a dictionary then treat it as the config dictionary itself.

            For each dictionary in `config['data']`, a new matrix, colorbar,
            and slider will be created using the filename and colormap
            specified.  The matrices for the files will be plotted on the same
            Axes.

            There is no limit, but colors get complicated quickly
            with, say, >3 files.

            Example config dict::

                {
                 'dim': 128,
                 'genome': 'hg19',
                 'chrom': 'chr10',
                 'data': [
                       {'filename': '../data/cpg-islands.hg19.chr10.bed',
                        'colormap': 'Blues'},

                       {'filename': '../data/refseq.chr10.exons.bed',
                        'colormap': 'Reds'}

                         ]
                }

            Example YAML file::

                dim: 128
                chrom: chr10
                genome: hg19
                data:
                    -
                        filename: ../data/cpg-islands.hg19.chr10.bed
                        colormap: Blues

                    -
                        filename: ../data/refseq.chr10.exons.bed
                        colormap: Reds


        :param debug:
            If True, then print some extra debugging info

        :param kwargs:
            Additional keyword arguments are passed to HilbertMatrix (e.g.,
            m_dim, genome, chrom)
        """
        self.config = self._parse_config(config)
        self.matrix_dim = self.config['dim']

        kwargs = dict(matrix_dim=self.config['dim'],
                      genome=self.config['genome'],
                      chrom=self.config['chrom'])

        self.hilberts = []
        self.colormaps = []

        for chunk in self.config['data']:
            self.hilberts.append(HilbertMatrix(chunk['filename'], **kwargs))
            self.colormaps.append(getattr(matplotlib.cm, chunk['colormap']))

        for h in self.hilberts:
            h.mask_low_values()

        self.debug = debug
        self.n = len(self.config['data'])
        self.fig = plt.figure(figsize=(8, 8))
コード例 #40
0
    def __init__(self,
                 level: LevelSelection,
                 frame_skip: int,
                 visualization_parameters: VisualizationParameters,
                 seed: Union[None, int] = None,
                 human_control: bool = False,
                 custom_reward_threshold: Union[int, float] = None,
                 screen_size: int = 84,
                 minimap_size: int = 64,
                 feature_minimap_maps_to_use: List = range(7),
                 feature_screen_maps_to_use: List = range(17),
                 observation_type:
                 StarcraftObservationType = StarcraftObservationType.Features,
                 disable_fog: bool = False,
                 auto_select_all_army: bool = True,
                 use_full_action_space: bool = False,
                 **kwargs):
        super().__init__(level, seed, frame_skip, human_control,
                         custom_reward_threshold, visualization_parameters)

        self.screen_size = screen_size
        self.minimap_size = minimap_size
        self.feature_minimap_maps_to_use = feature_minimap_maps_to_use
        self.feature_screen_maps_to_use = feature_screen_maps_to_use
        self.observation_type = observation_type
        self.features_screen_size = None
        self.feature_minimap_size = None
        self.rgb_screen_size = None
        self.rgb_minimap_size = None
        if self.observation_type == StarcraftObservationType.Features:
            self.features_screen_size = screen_size
            self.feature_minimap_size = minimap_size
        elif self.observation_type == StarcraftObservationType.RGB:
            self.rgb_screen_size = screen_size
            self.rgb_minimap_size = minimap_size
        self.disable_fog = disable_fog
        self.auto_select_all_army = auto_select_all_army
        self.use_full_action_space = use_full_action_space

        # step_mul is the equivalent to frame skipping. Not sure if it repeats actions in between or not though.
        self.env = sc2_env.SC2Env(
            map_name=self.env_id,
            step_mul=frame_skip,
            visualize=self.is_rendered,
            agent_interface_format=sc2_env.AgentInterfaceFormat(
                feature_dimensions=sc2_env.Dimensions(
                    screen=self.features_screen_size,
                    minimap=self.feature_minimap_size)
                # rgb_dimensions=sc2_env.Dimensions(
                #     screen=self.rgb_screen_size,
                #     minimap=self.rgb_screen_size
                # )
            ),
            # feature_screen_size=self.features_screen_size,
            # feature_minimap_size=self.feature_minimap_size,
            # rgb_screen_size=self.rgb_screen_size,
            # rgb_minimap_size=self.rgb_screen_size,
            disable_fog=disable_fog,
            random_seed=self.seed)

        # print all the available actions
        # self.env = available_actions_printer.AvailableActionsPrinter(self.env)

        self.reset_internal_state(True)
        """
        feature_screen:  [height_map, visibility_map, creep, power, player_id, player_relative, unit_type, selected,
                          unit_hit_points, unit_hit_points_ratio, unit_energy, unit_energy_ratio, unit_shields, 
                          unit_shields_ratio, unit_density, unit_density_aa, effects]
        feature_minimap: [height_map, visibility_map, creep, camera, player_id, player_relative, selecte
        d]
        player:          [player_id, minerals, vespene, food_cap, food_army, food_workers, idle_worker_dount, 
                          army_count, warp_gate_count, larva_count]
        """
        self.screen_shape = np.array(
            self.env.observation_spec()[0]['feature_screen'])
        self.screen_shape[0] = len(self.feature_screen_maps_to_use)
        self.minimap_shape = np.array(
            self.env.observation_spec()[0]['feature_minimap'])
        self.minimap_shape[0] = len(self.feature_minimap_maps_to_use)
        self.state_space = StateSpace({
            "screen":
            PlanarMapsObservationSpace(shape=self.screen_shape,
                                       low=0,
                                       high=255,
                                       channels_axis=0),
            "minimap":
            PlanarMapsObservationSpace(shape=self.minimap_shape,
                                       low=0,
                                       high=255,
                                       channels_axis=0),
            "measurements":
            VectorObservationSpace(self.env.observation_spec()[0]["player"][0])
        })
        if self.use_full_action_space:
            action_identifiers = list(self.env.action_spec()[0].functions)
            num_action_identifiers = len(action_identifiers)
            action_arguments = [(arg.name, arg.sizes)
                                for arg in self.env.action_spec()[0].types]
            sub_action_spaces = [DiscreteActionSpace(num_action_identifiers)]
            for argument in action_arguments:
                for dimension in argument[1]:
                    sub_action_spaces.append(DiscreteActionSpace(dimension))
            self.action_space = CompoundActionSpace(sub_action_spaces)
        else:
            self.action_space = BoxActionSpace(2,
                                               0,
                                               self.screen_size - 1,
                                               ["X-Axis, Y-Axis"],
                                               default_action=np.array([
                                                   self.screen_size / 2,
                                                   self.screen_size / 2
                                               ]))
コード例 #41
0
ファイル: four_digital_led.py プロジェクト: aska912/pi_study
        try:
            fn = open('/sys/class/thermal/thermal_zone0/temp', 'r') 
            cpu_temp = int(float(fn.read()) /1000)
            fn.close()
        except:
            pass
        #print("CPU Tempture: ", cpu_temp, '\'C')
        if not cpu_temp == -1:
            cpu_temp_shi  = cpu_temp / 10
            cpu_temp_ge   = cpu_temp % 10
            data_queue    = [ LED_FONTS['blank'], LED_FONTS['C'], LED_FONTS['P'], LED_FONTS['U'], LED_FONTS['-'], \
                              LED_FONTS['%d'%cpu_temp_shi], LED_FONTS['%d'%cpu_temp_ge], LED_FONTS['o'], LED_FONTS['C'] ]
            display_queue = [LED_FONTS['blank'], LED_FONTS['blank'], LED_FONTS['blank'], LED_FONTS['blank']]

            # 流水灯模式, 向左移动
            for i in range( len(data_queue) ):
                display_queue.pop(0)
                display_queue.append(data_queue[i])
                led.write_data(display_queue[0], display_queue[1], display_queue[2], display_queue[3])
                led.display()
                time.sleep(0.6)
        else:
            led.write_data(LED_FONTS['E'], LED_FONTS['r'], LED_FONTS['r'], LED_FONTS['1'])
            led.display()
        time.sleep(3)

        dht_data = dht11.get()
        if dht_data[0]:
            temp = int(dht_data[1])
            humi = int(dht_data[2])
            temp_ge  = temp % 10
コード例 #42
0
ファイル: test_path.py プロジェクト: ajyoon/brown
 def test_move_to_with_no_parent(self):
     path = Path((Unit(5), Unit(6)))
     path.move_to(Unit(10), Unit(11))
     assert len(path.elements) == 1
     assert_path_els_equal(path.elements[0], MoveTo(Point(Unit(10), Unit(11)), path))
コード例 #43
0
ignore_str = '-----'

for sub_exp_dir in os.listdir(exp_path):
    sub_exp_path = os.path.join(exp_path, sub_exp_dir)
    if not os.path.isdir(sub_exp_path): continue
    try:
        with open(os.path.join(sub_exp_path, 'debug.log'), 'r') as f:
            value_dict = defaultdict(list)
            for line in f:
                line = line.split()
                if ignore_str not in line[0]:
                    k, v = line[0], float(line[1])
                    value_dict[k].append(v)

            # find the shortest list length
            min_len = min(map(lambda k: len(value_dict[k]), value_dict))
            new_dict = {k: v[:min_len] for k, v in value_dict.items()}

            # convert to numpy array
            arr = np.zeros((min_len, len(new_dict.keys())))
            for i, k in enumerate(sorted(new_dict.keys())):
                v = np.array(new_dict[k])
                arr[:, i] = v
            header = ','.join(sorted(new_dict.keys()))

            np.savetxt(os.path.join(sub_exp_path, 'progress.csv'),
                       arr,
                       delimiter=",",
                       header=header)
            # print(os.path.join(sub_exp_path, 'progress.csv'))
    except:
コード例 #44
0
ファイル: tests_file.py プロジェクト: tblakex01/ara
 def test_get_no_files(self):
     request = self.client.get("/api/v1/files")
     self.assertEqual(0, len(request.data["results"]))
コード例 #45
0
ファイル: test_path.py プロジェクト: ajyoon/brown
 def test_straight_line(self):
     path = Path.straight_line((Unit(5), Unit(6)), (Unit(10), Unit(11)))
     assert path.pos == Point(Unit(5), Unit(6))
     assert len(path.elements) == 2
     assert_path_els_equal(path.elements[0], MoveTo(ORIGIN, path))
     assert_path_els_equal(path.elements[1], LineTo(Point(Unit(10), Unit(11)), path))
コード例 #46
0
def OptionMenu_SelectionEvent(
        event):  # I'm not sure on the arguments here, it works though
    ## do something
    global robot_adres, socket, recive_flag, flag_inet_work, robot_adres_inet
    print(FgBlue, event)

    # if event == "none" or robot_adres != "-1":
    #     print("return")
    #     return

    if event[0] == "scan":
        ScanRobots(event)
        return

    if event[0] == "scan_inet":
        ip_adress_s = sc.gethostbyname(sc.gethostname())
        print(ip_adress_s)
        print("connect to server...")
        ic.connect()
        print("take list")
        list = ic.take_list()
        # print(list)
        # print(ic.take_list())
        # list_combobox_inet = []
        # list_combobox_inet.append(["scan_inet", " "])
        for r in list:
            print(r)
            if r[2] == "robot":
                list_combobox.append(r)
        if len(list) == 0:
            print("no robots in server list")

        dropVar = StringVar()
        dropVar.set(list_combobox_inet[0])

        combobox_inet = OptionMenu(panelFrame,
                                   dropVar,
                                   *(list_combobox),
                                   command=OptionMenu_SelectionEvent)
        combobox_inet.place(x=260, y=10, width=150,
                            height=40)  # Позиционируем Combobox на форме
        # print("end take")
        return

    if event[3] == "l":
        robot_adres = event[1]
        robot_adres_inet = event[0]
        # socket = context.socket(zmq.REP)
        socket = context.socket(zmq.REQ)
        socket.connect("tcp://" + robot_adres + ":%s" % port)

        ip_adress = sc.gethostbyname(sc.gethostname())

        # s = socket.recv_string(zmq.NOBLOCK)

        print("Taking robot..", robot_adres)
        try:
            socket.send_string("take|" + ip_adress)
            print("Connected to robot: " + BgGreen + socket.recv_string() +
                  Reset)
        except:
            pass

        # recive_flag = 1
        flag_inet_work = False

    if event[3] == "i":
        robot_adres_inet = event[0]
        robot_adres = event[0]
        print(robot_adres_inet)
        flag_inet_work = True
        print("Connected to robot: " + BgGreen + event[1] + Reset)
        pass
    connect_keyboard(robot_adres)
    pass
コード例 #47
0
ファイル: tests_file.py プロジェクト: tblakex01/ara
 def test_get_files(self):
     file = factories.FileFactory()
     request = self.client.get("/api/v1/files")
     self.assertEqual(1, len(request.data["results"]))
     self.assertEqual(file.path, request.data["results"][0]["path"])
コード例 #48
0
def camera_work():
    global root, video_show2, socket2, video_show2_global, image, started_flag, flag_inet_work, socket_2_connected
    ic_v = InetConnection.InetConnect(sc.gethostname() + "_v", "client")
    ic_v.connect()
    image = np.zeros((480, 640, 3), np.uint8)
    time_frame = time.time()
    frames = 0
    frames_time = time.time()

    while 1:
        # try:
        # print("s",started_flag)
        # print("video status", video_show2_global, video_show2)
        if video_show2_global == 1:
            if video_show2 == 1:  # and started_flag == 1:
                # print("vid1", flag_inet_work)
                if flag_inet_work == True:
                    ic_v.send_and_wait_answer(robot_adres_inet, "p")
                    while 1:
                        j_mesg, jpg_bytes = ic_v.take_answer_bytes()
                        if len(jpg_bytes) > 1:
                            try:
                                A = np.frombuffer(jpg_bytes,
                                                  dtype=j_mesg['dtype'])
                                # arrayname = md['arrayname']sccv2.waitKey(1)

                                # image = A.reshape(j_mesg['shape'])
                                image = A.reshape(j_mesg['shape'])
                                image = cv2.imdecode(image, 1)
                                time_frame = time.time()
                                frames += 1

                            except:
                                pass

                        else:
                            # time.sleep(0.01)
                            break
                            # continue
                else:

                    try:
                        socket2.send_string("1", zmq.NOBLOCK)  # zmq.NOBLOCK
                    except:
                        # print("error", e)
                        pass
                    md = ""
                    t = time.time()
                    while 1:
                        try:
                            md = socket2.recv_json(zmq.NOBLOCK)
                        except:
                            pass
                        if md != "":
                            break
                        if time.time() - t > 1:
                            # print("break video")
                            break

                    if md != "" and video_show2 == 1:
                        msg = 0
                        t = time.time()
                        while 1:
                            try:
                                msg = socket2.recv(zmq.NOBLOCK)
                            except:
                                pass
                                # print("error", e)
                            if msg != 0:
                                break
                            if time.time() - t > 1:
                                # print("break video")
                                break

                        try:

                            A = np.frombuffer(msg, dtype=md['dtype'])
                            # arrayname = md['arrayname']sccv2.waitKey(1)
                            image = A.reshape(md['shape'])
                            image = cv2.imdecode(image, 1)
                            time_frame = time.time()
                            # print("frame", md['shape'])
                            # cv2.imshow("Robot frame", image)
                            # cv2.waitKey(1)
                            frames += 1

                        except:
                            pass

                # отрисовываем картинку
                if time.time() - time_frame > 2:

                    cv2.putText(image, "video lost",
                                (10, int(image.shape[0] - 10)),
                                cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                (255, 255, 255))
                    for i in range(int(time.time() - time_frame)):
                        cv2.putText(image, ".",
                                    (140 + (i * 10), int(image.shape[0] - 10)),
                                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                    (255, 255, 255))

                    # автореконнект видео
                    if time.time() - time_frame > 5:
                        # print("reconnect video")
                        if flag_inet_work == True:
                            ic_v.disconnect()

                        else:
                            if socket_2_connected:
                                socket2.close()

                        time_frame = time.time()
                        video_show2 = 0

                        continue

                if frames_time < time.time():
                    fps = frames
                    # print("fps:",fps)
                    frames_time = int(time.time()) + 1
                    # print(frames_time)
                    frames = 0
                if fps_show == 1:
                    cv2.putText(
                        image, "fps:" + str(fps),
                        (int(image.shape[1] - 80), int(image.shape[0] - 10)),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255))
                cv2.imshow("Robot frame", image)
                cv2.waitKey(1)
                continue

            if video_show2 == 0:

                if flag_inet_work == True:
                    video_show2 = 1
                    ic_v.connect()
                    continue
                else:
                    # print("Connecting to soft...", robot_adres)
                    cv2.destroyAllWindows()
                    for i in range(1, 5):
                        cv2.waitKey(1)
                    context = zmq.Context()
                    socket2 = context.socket(zmq.REQ)
                    socket2.connect("tcp://" + robot_adres + ":5555")
                    socket_2_connected = True
                    # print("connect ok")
                    # context = zmq.Context()
                    # socket2 = context.socket(zmq.REQ)
                    # socket2.setsockopt(zmq.LINGER, 0)
                    # socket2.connect("tcp://" + robot_adres + ":5555")
                    # socket2.send_string("1")  # send can block on other socket types, so keep track
                    # # use poll for timeouts:
                    # poller = zmq.Poller()
                    # poller.register(socket, zmq.POLLIN)
                    # if poller.poll(1 * 1000):  # 10s timeout in milliseconds
                    #     #msg = socket2.recv_json()
                    #     pass
                    # else:
                    #     print("Timeout processing auth request")

                    # these are not necessary, but still good practice:
                    pass

                image = np.zeros((480, 640, 3), np.uint8)
                cv2.putText(image, "Connect to robot...", (180, 240),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))
                time_frame = time.time()
                video_show2 = 1
                cv2.namedWindow("Robot frame")
                cv2.startWindowThread()
                # print("connected")

                continue
            if video_show2 == -1:
                # print("vid-1")
                # print("close socket2")

                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)

                if flag_inet_work == True:
                    video_show2 = 3
                    continue

                if socket_2_connected:
                    socket2.close()
                    socket_2_connected = False

                time.sleep(0.1)
                video_show2 = 3
                ic_v.disconnect()
                time.sleep(0.05)
                # print("video_show2", video_show2 )

                continue
            if video_show2 == 3:
                # print("vid3")
                # cv2.imshow("Robot frame", image)
                # cv2.destroyWindow("Robot frame")
                cv2.destroyAllWindows()
                for i in range(1, 5):
                    cv2.waitKey(1)

                time.sleep(0.05)
                continue
                # print("vid??", video_show2, "started_flag==", started_flag)

        else:

            cv2.destroyAllWindows()
            cv2.waitKey(1)
            video_show2 = 3
            time.sleep(0.1)
コード例 #49
0
3.客户端再循环接收信息

'''

import socket

'''
步骤:
1.新建socket客户端
2.连接服务端
3.发送信息
4.接收服务端返回的信息
'''
client = socket.socket()
client.connect(('47.97.165.75', 9000))
while True:
    cmd = input(">>:").strip()
    if len(cmd) == 0: continue
    client.send(cmd.encode("utf-8"))
    cmd_size = client.recv(1024)
    content = b''
    size = 0
    while size < int(cmd_size.decode()) :
        data = client.recv(1024)
        size += len(data)
        content +=data
    else:
        print(content.decode())

client.close()