Esempio n. 1
0
async def on_message(message):
  start = time.time()
  #print("Author: ", message.author, type(message.author))
  #print("Channel: ", message.channel, type(message.channel))
  if (message.author.bot):
    print("DISCARDING BOT MESSAGE FROM ", message.author)
    return
  if type(message.channel) == discord.channel.PrivateChannel:
    print("DISCARDING PRIVATE MESSAGE FROM", message.author)
    return
  if "markov-bot" in str(message.author) or "MikuBot" in str(message.author):
    print("Discarding self message")
    return
  print("Got message on channel ", message.channel, "from author", message.author, ":", message.content)
  split = message.content.split()
  if len(split) == 0: return
  if split[0] in ["?femboy", "?tomboy"]:
    if "welcome-center" in str(message.channel):
      await client.send_message(message.server.get_channel('308342435430400012'), "Welcome <@" +str(message.author.id) + ">!");
  elif split[0] == "!help":
    await client.send_message(message.channel, "Commands: `!markov` - Generates random text based on collected probabilities\n`!markov <starting word>` - Generates starting from a particular word\n`!markov <limit>` - Generates random text with the given length\n`!percents <word>` - Shows statistics on the given word\n`!mask <message>` - Misspells some text\n`!mask10 <message>` - Misspells some text 10 times")
  elif split[0] == "!markov":
    await client.send_typing(message.channel)
    args = message.content.split()
    arg = False
    if len(args) > 1:
      arg = args[1]
    print("Sending")
    await client.send_message(message.channel, make_message(arg))
  elif split[0] == "!percents" and len(split) > 1:
    percents = get_percents(split[1])
    await client.send_message(message.channel, percents)
  elif split[0] == "!mask":
    await client.send_message(message.channel, mask.mask(" ".join(split[1:])))
  elif split[0] == "!mask10":
    msg = []
    curr = mask.mask(" ".join(split[1:]))
    for i in range(10):
      msg.append(curr)
      curr = mask.mask(curr)
    await client.send_message(message.channel, "\n".join(msg))
  elif split[0] == "!poll" and message.author.id == "158673755105787904":
    options = [x.strip() for x in message.content.split("\n") if len(x.strip()) > 0 and x != "!poll"]
    users = [x.id for x in message.server.members]
    if len(users) == 0 or len(options) == 0:
      await client.send_message(message.channel, "Error - zero options or zero users")
    start_poll(users, options)
    await client.send_message(message.channel, "Poll started with " + str(len(options)) + " and " + str(len(users)) + " authorized users")
  elif split[0] == "!vote" and message.channel.id == org_channel:
    result = cast_vote(message, split)
    if result == True:
      await client.add_reaction(message, "✅")
    else:
      await client.send_message(message.channel, result)
  elif split[0] == "!votes" and message.channel.id == org_channel:
    msg = get_votes()
    if msg: await client.send_message(message.channel, msg)
  else:
    markov_add(message.content);
  print("Took " + str(time.time() - start) + " seconds to process message of " + str(len(split)) + " words");
Esempio n. 2
0
def Open_Mask(window):
    window.destroy()
    new_Frame = Tk()
    new_Frame.geometry("800x500+500+200")  # 창 크기
    new_Frame.configure(background='aquamarine')  # 배경색
    new_Frame.title("Corona")  # 창 제목
    new_Frame.resizable(False, False)  # 화면크기조절 불가능
    ShowUI(new_Frame)
    mask.mask(new_Frame)
def tau(modelname):

    NPP = mask('annual', 'S3', 'npp', total_mask, mn)
    CVeg = mask('annual', 'S3', 'cVeg', total_mask, mn)

    time = np.arange(1901, 2018)

    tau = []

    for i in range(0, 116):
        tau_prel = CVeg[i + 1] / (NPP[i + 1] - ((CVeg[i + 1] - CVeg[i]) /
                                                (time[i + 1] - time[i])))
        tau.append(tau_prel)

    tau_change = tau - (sum(tau[:30]) / len(tau[:30]))
    return (tau, tau_change)
Esempio n. 4
0
    def __init__(self,trainingfeatures,traininglabels):
        '''
        Input:
            trainingfeatures: numpy array of features with one row per sample
            traininglabels: list of character features with each entry corresping
                            to its corresponding entry in trainingfeatures
        Output:
            object of type ml
            
        Initializes class of type ml.  Builds multiple machine learning modules.
        '''
        
        self.labeldict={}
        self.intdict={}
        self.traininglabels = np.zeros_like(traininglabels,dtype=int)
        n=0
        for i in xrange(len(traininglabels)):
            val= self.labeldict.get(traininglabels[i])
            if val==None:
                val = n
                self.labeldict[traininglabels[i]]=n
                self.intdict[n]=traininglabels[i]
                n+=1
            self.traininglabels[i]=val

        colorFeatures=trainingfeatures[:,-23:];
        print "labeldict: ", self.labeldict
        print"-------\ninverted dict: ", self.intdict
        self.maskGen= mask.mask(colorFeatures, self.traininglabels, self.intdict)

        self.trainingfeatures,self.means,self.stds=normalizearray(trainingfeatures)

        testfeatures,testlabels,tfeatures,tlabels = splitTrainingTesting(self.trainingfeatures,self.traininglabels)
        self.svm = buildSVM(tlabels,tfeatures,testlabels,testfeatures)
Esempio n. 5
0
def compute(dataframe):

    #nltk.download('words')

    # receive parameteres from users
    SUMMARIZE = 0  # 0: no summarize, 1: summarize
    KEYWORDS_POSITION = 0 # 0: keywords, 1: position

    #SUMMARIZE = sys.argv[1]
    #KEYWORDS_POSITION = sys.argv[2]
    rows = refine_label(dataframe)
    rows = process(rows)
    text, label = mask(rows)
    first_dataframe = impute(text, label)
    print("ready to swap!")

    if SUMMARIZE:
        # SUMMARIZE FUNC
        tfidf_result = vectorize(dataframe, True)
        result = Obfuscate(tfidf_result, "summary")
    else:
        # NO SUMMARIZE FUNC
        if KEYWORDS_POSITION:
            # Position FUNC
            tfidf_result = vectorize(dataframe, False)
            result = Obfuscate(tfidf_result, "pos")
        else:
            # Keywords Func
            tfidf_result = vectorize(dataframe, False)
            result = Obfuscate(tfidf_result, "keyword")
    return result
Esempio n. 6
0
File: model.py Progetto: pf4d/issm
    def __init__(self):  #{{{

        # classtype=model.properties

        # for classe in dict.keys(classtype):
        # 	print classe
        # 	self.__dict__[classe] = classtype[str(classe)]

        self.mesh = mesh2d()
        self.mask = mask()
        self.geometry = geometry()
        self.constants = constants()
        self.smb = SMBforcing()
        self.basalforcings = basalforcings()
        self.materials = matice()
        self.damage = damage()
        self.friction = friction()
        self.flowequation = flowequation()
        self.timestepping = timestepping()
        self.initialization = initialization()
        self.rifts = rifts()
        self.slr = slr()

        self.debug = debug()
        self.verbose = verbose()
        self.settings = settings()
        self.toolkits = toolkits()
        self.cluster = generic()

        self.balancethickness = balancethickness()
        self.stressbalance = stressbalance()
        self.groundingline = groundingline()
        self.hydrology = hydrologyshreve()
        self.masstransport = masstransport()
        self.thermal = thermal()
        self.steadystate = steadystate()
        self.transient = transient()
        self.levelset = levelset()
        self.calving = calving()
        self.gia = giaivins()

        self.autodiff = autodiff()
        self.inversion = inversion()
        self.qmu = qmu()
        self.amr = amr()

        self.results = results()
        self.outputdefinition = outputdefinition()
        self.radaroverlay = radaroverlay()
        self.miscellaneous = miscellaneous()
        self.private = private()
Esempio n. 7
0
async def on_message(message):
  print("Got message on channel ", message.channel, "from author", message.author, ":", message.content)
  #print("Author: ", message.author, type(message.author))
  #print("Channel: ", message.channel, type(message.channel))
  if (message.author.bot):
    print("DISCARDING BOT MESSAGE FROM ", message.author)
    return
  if type(message.channel) == discord.channel.PrivateChannel:
    print("DISCARDING PRIVATE MESSAGE FROM", message.author)
    return
  if "markov-bot" in str(message.author) or "MikuBot" in str(message.author):
    print("Discarding self message")
    return
  split = message.content.split()
  if len(split) == 0: return
  if split[0] == "!down":
    await notify_pref(message, split)
    pass
  elif split[0] == "!help":
    await client.send_message(message.channel, "Commands: `!markov` - Generates random text based on collected probabilities\n`!markov <starting word>` - Generates starting from a particular word\n`!markov <limit>` - Generates random text with the given length\n`!percents <word>` - Shows statistics on the given word")
  elif split[0] == "!markov":
    args = message.content.split()
    arg = False
    if len(args) > 1:
      arg = args[1]
    print("Sending")
    await client.send_message(message.channel, make_message(arg))
  elif split[0] == "!percents" and len(split) > 1:
    percents = get_percents(split[1])
    await client.send_message(message.channel, percents)
  elif split[0] == "!emotes":
    await client.send_message(message.channel, " ".join([x[0] for x in con.execute("SELECT value FROM emotes;").fetchall()]))
  elif split[0] == "!mask":
    await client.send_message(message.channel, mask.mask(" ".join(split[1:])))
  elif len(split) == 1:
    return;
    if split[0] in ["no", "yes", "yeah", "rip", "oh", "hehe", "xd", "xD", "ayy", "lol", "what", "y", "n", "s", "waitwhat", "oof", "ok", "lmao", "Yes", "Nice"]: return;
    #res = db.Select("emotes", key = split[0])
    res = con.execute("SELECT value FROM emotes WHERE key = ?", [split[0]]).fetchall()
    if len(res) != 0:
      #await client.send_message(message.channel, res[0]["value"])
      await client.send_message(message.channel, res[0][0])
  else:
    markov_add(message.content);
  parse_emotes(message.content);
Esempio n. 8
0
def mask_merge_cog(jp2s, nodata=None, geojson_mask=None, clean=False, **kwargs):
    """Mask, merge, and cog a list of Sentinel jp2s.

    Arguments:
        jp2s: List of paths to jp2 files
        nodata: An override nodata value for the source imagery
        geojson_mask: Path to a GeoJSON to apply as mask 
        clean: bool: To delete input and intermediate files after processing
        Optional **kwargs to pass to mask.mask and cog.build_local

    Returns: Path to a COG
    """
    geotiffs = [jp2_to_geotiff(jp2, clean=clean) for jp2 in jp2s]
    if geojson_mask:
        masked = [mask.mask(g, geojson_mask, nodata=nodata, clean=clean,
                            **kwargs)
                      for g in geotiffs]
    else:
        masked = geotiffs
    cogged = cog.build_local(masked, nodata=nodata, clean=clean, **kwargs)
    return cogged
ax5 = fig.add_subplot(3,2,5)
ax6 = fig.add_subplot(3,2,6)

titles = ['Tropics', 'Warm temperate', 'Cool temperate', 'Savanna',
          'Mediterranean', 'Desert']
veg_masks = [tropics_mask, warm_temperate_mask, cool_temperate_mask,
             savanna_mask, mediterranean_mask, desert_mask]

axes = [ax1, ax2, ax3, ax5, ax4, ax6]

time = np.arange(1,13)
idx = [6,7,8,9,10,11,0,1,2,3,4,5]

for vegm, ax, t in zip(veg_masks, axes, titles):
    print(t)
    annual_gosif = mask('seasonal', 'S3', 'GPP', vegm, 'GOSIF-GPP')
    annual_gosif_sd = mask('seasonal', 'S3', 'GPP_SD', vegm, 'GOSIF-GPP_SD')

    annual_upper = np.array(annual_gosif) + np.array(annual_gosif_sd)
    annual_lower = np.array(annual_gosif) - np.array(annual_gosif_sd)

    ax.plot(time, np.array(annual_gosif)[idx], color = 'k', lw = 4.0,
            label = 'GOSIF-GPP')
    ax.fill_between(time, annual_upper[idx], annual_lower[idx], color = 'k',
                    alpha = 0.3)
    ax.set_xticks(time)
    ax.set_title(t)

    for mn, c in zip(model_names, colours):
        annual = mask('seasonal', 'S3', 'gpp', vegm, mn)
        if mn in ('CABLE-POP', 'ISAM', 'JULES-ES', 'ORCHIDEE', 'VISIT'):
def sky_spec(filename):
    '''
    This function will try to find the spectrum of the sky given a list/array
    that holds the 2D information of the sky. Note that this is already split meaning
    that we have a list/aray with each element representing all the columns and 
    certain rows
    
    Parameter
    -------------
    data: This is the array/list of data to get the sky spectrum

    Output
    -------------
    A Median sky spectrum

    '''

    #getting the data from the filename
    data = fits.getdata(filename)

    #print('filename: ' + filename+ ', shape =  '+ str(data.shape))

    #getting the header for the file as we will need it to get transformation from pixels to wavelength.
    hdr = fits.getheader(filename)

    #making an array with pixel-coordinates
    pix_x, pix_y = np.meshgrid(range(len(data[0, :])), range(len(data[:, 0])))

    #getting the transormation for the file from the header
    transformation = get_wcs_solution(hdr)

    #this gives me a 2D wavelength array for each (x, y) pixel coordinates in the array
    wvln_array = transformation(pix_x, pix_y)

    #making my guess to the row center into its own variable
    row_center_guess = 1690

    #making my max and minimum variables which will limit the box around what I want
    minimum = 200
    maximum = 125

    #making the actual row_min variables and row_max variables
    row_min = row_center_guess - minimum
    row_max = row_center_guess + maximum

    #getting the mask that will filter out the bad data
    filt_mask = mask(row_min, row_max, data)

    #applying the mask to the data
    reduced_data = filt_mask * data

    #plt.imshow(reduced_data, origin='lower', cmap ='gray', norm=LogNorm())
    #plt.show()

    #getting the index of the row for the center of our spectrum
    spec_row_center = finding_row_center(reduced_data)

    #rounding the index to the nearest 5 or 0 so that vsplit can work properly in the sum_sky function
    row_center = myround(spec_row_center)

    #applying mask to wavelength array
    wvln_reduced = wvln_array * filt_mask

    #making a window to not include the target spectrum in this case we go 50 pixels above and below the center of our spectrum
    window = 50

    test_mask = mask(row_center - 150, row_center - 50, data)
    test_mask1 = mask(row_center - 200, row_center - 50, data)

    plt.figure(figsize=(10, 10))
    gs = gridspec.GridSpec(2, 2)

    ax = plt.subplot(gs[:, 0])
    ax.imshow(test_mask * data, origin='lower', cmap='gray', norm=LogNorm())

    ax1 = plt.subplot(gs[:, -1])
    ax1.imshow(test_mask1 * data, origin='lower', cmap='gray', norm=LogNorm())

    plt.show()
    '''
Esempio n. 11
0
from preprocess_0821 import process
from mask import mask
from predict_tsv_1_seq import impute
from refine_label import refine_label
from csv_to_tsv import to_tsv

refine_label()
process("0")
mask()
impute()
to_tsv()
Esempio n. 12
0
def getmatrix(version, finalmessage, ecl):
    size = matrixsizedict[version]
    remainderbits = remnumdict[version]
    matrix = [[-1  for i in range(size)]
	    for j in range(size)]

    # 1. add finder pattern and separators
    # upper left
    for i in range(8):
	for j in range(8):
	    matrix[i][j] = 0
    for i in range(7):
	for j in range(7):
	    matrix[i][j] = 1
    for i in range(1, 6):
	for j in range(1, 6):
	    matrix[i][j] = 0
    for i in range(2, 5):
	for j in range(2, 5):
	    matrix[i][j] = 1

    # upper right
    for i in range(size - 8, size):
	for j in range(8):
	    matrix[i][j] = 0
    for i in range(size - 7, size):
	for j in range(7):
	    matrix[i][j] = 1
    for i in range(size - 6, size - 1):
	for j in range(1, 6):
	    matrix[i][j] = 0
    for i in range(size - 5, size - 2):
	for j in range(2, 5): matrix[i][j] = 1

    # lower left
    for i in range(8):
	for j in range(size - 8, size):
	    matrix[i][j] = 0
    for i in range(7):
	for j in range(size - 7, size):
	    matrix[i][j] = 1
    for i in range(1, 6):
	for j in range(size - 6, size -1):
	    matrix[i][j] = 0
    for i in range(2, 5):
	for j in range(size - 5, size -2):
	    matrix[i][j] = 1

    # 2. add timing patterns
    for i in range(8, size - 8, 2):
	matrix[i][6] = 1
	matrix[i + 1][6] = 0
    for j in range(8, size - 8, 2):
	matrix[6][j] = 1
	matrix[6][j + 1] = 0

    # 3. add alignment patterns(version 1 doesn't need)
    if version != 1:
        cents = aligndict[version]
        for centx in cents:
	    for centy in cents:
	        if not ((centx in range(8) and centy in range(8)) or
		        (centx in range(size -8, size) and centy in range(8)) or
		        (centx in range(8) and centy in range(size -8, size))):
		    for i in range(centx - 2, centx + 3):
		        for j in range(centy - 2, centy + 3):
			    matrix[i][j] = 1
		    for i in range(centx - 1, centx + 2):
		        for j in range(centy -1, centy + 2):
			    matrix[i][j] = 0
		    matrix[centx][centy] = 1

    # 4. reserve place of formation fomation 
    formatinfo = utils.genformatinfo(ecl, '000')
    print len(formatinfo)
    formatinfo = [int(i) for i in formatinfo]
    for j in range(6):
        matrix[8][j] = formatinfo[j]
    matrix[7][8] = formatinfo[6]
    for j in range(size - 8, size):
        matrix[8][j] = formatinfo[j - size + 15]

    for i in range(size - 1, size - 8, -1):
        matrix[i][8] = formatinfo[size - 1 - i]
    matrix[size - 8][8] = 1 # this position should always be dark
    matrix[8][8] = formatinfo[7]
    matrix[7][8] = formatinfo[8]
    for i in range(6):
        matrix[i][8] = formatinfo[-(i + 1)]

    # 5. add version information(for versions 7-40 only)
    if version > 6:
        versioninfo = versioninfodict[version]
	versioninfo = [int(i) for i in versioninfo]
        # upper right
        n = len(versioninfo) - 1
        for i in range(6):
            for j in range(size - 11, size - 8):
                matrix[i][j] = versioninfo[n]
                n -= 1

        # lower left
        n = len(versioninfo) - 1
        for j in range(6):
            for i in range(size - 11, size - 8):
                matrix[i][j] = versioninfo[n]
                n -= 1

    # 6. add final message
    # CAUTION!!! what x and y mean
    x = size - 1
    y = size - 1    

    lflag = 1
    # vflag 0 means walk up
    vflag = 0
    position = (x, y)
    k = 0

    while k < len(finalmessage):
        while True:
	    next, position, lflag, vflag = utils.findnext(matrix, position, lflag, vflag, size)
	    if next < 0:
	        break
	x, y = position
        if finalmessage[k] == 0:
	    matrix[x][y] = 2 
        else:
	    matrix[x][y] = 3
        k += 1

    for i in range(size):
	for j in range(size):
	    if matrix[i][j] == -1:
		matrix[i][j] = 0

    # 7. masking
    matrix, maskpattern = mask.mask(matrix)

    # 4. add format information
    formatinfo = utils.genformatinfo(ecl, maskpattern)
    formatinfo = [int(i) for i in formatinfo]
    for j in range(6):
        matrix[8][j] = formatinfo[j]
    matrix[7][8] = formatinfo[6]
    for j in range(size - 8, size):
        matrix[8][j] = formatinfo[j - size + 15]

    for i in range(size - 1, size - 8, -1):
        matrix[i][8] = formatinfo[size - 1 - i]
    matrix[size - 8][8] = 1 # this position should always be dark
    matrix[8][8] = formatinfo[7]
    matrix[7][8] = formatinfo[8]
    for i in range(6):
        matrix[i][8] = formatinfo[-(i + 1)]

    return matrix
Esempio n. 13
0
def getmatrix(version, finalmessage, ecl):
    size = matrixsizedict[version]
    remainderbits = remnumdict[version]
    matrix = [[-1 for i in range(size)] for j in range(size)]

    # 1. add finder pattern and separators
    # upper left
    for i in range(8):
        for j in range(8):
            matrix[i][j] = 0
    for i in range(7):
        for j in range(7):
            matrix[i][j] = 1
    for i in range(1, 6):
        for j in range(1, 6):
            matrix[i][j] = 0
    for i in range(2, 5):
        for j in range(2, 5):
            matrix[i][j] = 1

    # upper right
    for i in range(size - 8, size):
        for j in range(8):
            matrix[i][j] = 0
    for i in range(size - 7, size):
        for j in range(7):
            matrix[i][j] = 1
    for i in range(size - 6, size - 1):
        for j in range(1, 6):
            matrix[i][j] = 0
    for i in range(size - 5, size - 2):
        for j in range(2, 5):
            matrix[i][j] = 1

    # lower left
    for i in range(8):
        for j in range(size - 8, size):
            matrix[i][j] = 0
    for i in range(7):
        for j in range(size - 7, size):
            matrix[i][j] = 1
    for i in range(1, 6):
        for j in range(size - 6, size - 1):
            matrix[i][j] = 0
    for i in range(2, 5):
        for j in range(size - 5, size - 2):
            matrix[i][j] = 1

    # 2. add timing patterns
    for i in range(8, size - 8, 2):
        matrix[i][6] = 1
        matrix[i + 1][6] = 0
    for j in range(8, size - 8, 2):
        matrix[6][j] = 1
        matrix[6][j + 1] = 0

    # 3. add alignment patterns(version 1 doesn't need)
    if version != 1:
        cents = aligndict[version]
        for centx in cents:
            for centy in cents:
                if not (
                    (centx in range(8) and centy in range(8)) or
                    (centx in range(size - 8, size) and centy in range(8)) or
                    (centx in range(8) and centy in range(size - 8, size))):
                    for i in range(centx - 2, centx + 3):
                        for j in range(centy - 2, centy + 3):
                            matrix[i][j] = 1
                    for i in range(centx - 1, centx + 2):
                        for j in range(centy - 1, centy + 2):
                            matrix[i][j] = 0
                    matrix[centx][centy] = 1

    # 4. reserve place of formation fomation
    formatinfo = utils.genformatinfo(ecl, '000')
    print len(formatinfo)
    formatinfo = [int(i) for i in formatinfo]
    for j in range(6):
        matrix[8][j] = formatinfo[j]
    matrix[7][8] = formatinfo[6]
    for j in range(size - 8, size):
        matrix[8][j] = formatinfo[j - size + 15]

    for i in range(size - 1, size - 8, -1):
        matrix[i][8] = formatinfo[size - 1 - i]
    matrix[size - 8][8] = 1  # this position should always be dark
    matrix[8][8] = formatinfo[7]
    matrix[7][8] = formatinfo[8]
    for i in range(6):
        matrix[i][8] = formatinfo[-(i + 1)]

    # 5. add version information(for versions 7-40 only)
    if version > 6:
        versioninfo = versioninfodict[version]
        versioninfo = [int(i) for i in versioninfo]
        # upper right
        n = len(versioninfo) - 1
        for i in range(6):
            for j in range(size - 11, size - 8):
                matrix[i][j] = versioninfo[n]
                n -= 1

        # lower left
        n = len(versioninfo) - 1
        for j in range(6):
            for i in range(size - 11, size - 8):
                matrix[i][j] = versioninfo[n]
                n -= 1

    # 6. add final message
    # CAUTION!!! what x and y mean
    x = size - 1
    y = size - 1

    lflag = 1
    # vflag 0 means walk up
    vflag = 0
    position = (x, y)
    k = 0

    while k < len(finalmessage):
        while True:
            next, position, lflag, vflag = utils.findnext(
                matrix, position, lflag, vflag, size)
            if next < 0:
                break
        x, y = position
        if finalmessage[k] == 0:
            matrix[x][y] = 2
        else:
            matrix[x][y] = 3
        k += 1

    for i in range(size):
        for j in range(size):
            if matrix[i][j] == -1:
                matrix[i][j] = 0

    # 7. masking
    matrix, maskpattern = mask.mask(matrix)

    # 4. add format information
    formatinfo = utils.genformatinfo(ecl, maskpattern)
    formatinfo = [int(i) for i in formatinfo]
    for j in range(6):
        matrix[8][j] = formatinfo[j]
    matrix[7][8] = formatinfo[6]
    for j in range(size - 8, size):
        matrix[8][j] = formatinfo[j - size + 15]

    for i in range(size - 1, size - 8, -1):
        matrix[i][8] = formatinfo[size - 1 - i]
    matrix[size - 8][8] = 1  # this position should always be dark
    matrix[8][8] = formatinfo[7]
    matrix[7][8] = formatinfo[8]
    for i in range(6):
        matrix[i][8] = formatinfo[-(i + 1)]

    return matrix
Esempio n. 14
0
def train():
    cfg = opt.cfg
    data = opt.data
    img_size, img_size_test = opt.img_size if len(
        opt.img_size) == 2 else opt.img_size * 2  # train, test sizes
    epochs = opt.epochs  # 500200 batches at bs 64, 117263 images = 273 epochs
    batch_size = opt.batch_size
    accumulate = opt.accumulate  # effective bs = batch_size * accumulate = 16 * 4 = 64
    weights = opt.weights  # initial training weights

    # Initialize
    init_seeds()
    if opt.multi_scale:
        img_sz_min = round(img_size / 32 / 1.5)
        img_sz_max = round(img_size / 32 * 1.5)
        img_size = img_sz_max * 32  # initiate with maximum multi_scale size
        print('Using multi-scale %g - %g' % (img_sz_min * 32, img_size))

    # Configure run
    data_dict = parse_data_cfg(data)
    train_path = data_dict['train']
    test_path = data_dict['valid']
    nc = 1 if opt.single_cls else int(
        data_dict['classes'])  # number of classes

    # Remove previous results
    for f in glob.glob('*_batch*.png') + glob.glob(results_file):
        os.remove(f)

    # Initialize model
    model = Darknet(cfg, arc=opt.arc).to(device)

    # Optimizer
    pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in dict(model.named_parameters()).items():
        if '.bias' in k:
            pg2 += [v]  # biases
        elif 'Conv2d.weight' in k:
            pg1 += [v]  # apply weight_decay
        else:
            pg0 += [v]  # all else

    if opt.adam:
        # hyp['lr0'] *= 0.1  # reduce lr (i.e. SGD=5E-3, Adam=5E-4)
        optimizer = optim.Adam(pg0, lr=hyp['lr0'])
        # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)
    else:
        optimizer = optim.SGD(pg0,
                              lr=hyp['lr0'],
                              momentum=hyp['momentum'],
                              nesterov=True)
    optimizer.add_param_group({
        'params': pg1,
        'weight_decay': hyp['weight_decay']
    })  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    del pg0, pg1, pg2

    # https://github.com/alphadl/lookahead.pytorch
    # optimizer = torch_utils.Lookahead(optimizer, k=5, alpha=0.5)

    start_epoch = 0
    best_fitness = 0.0
    attempt_download(weights)
    if weights.endswith('.pt'):  # pytorch format
        # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.
        chkpt = torch.load(weights, map_location=device)

        # load model
        try:
            chkpt['model'] = {
                k: v
                for k, v in chkpt['model'].items()
                if model.state_dict()[k].numel() == v.numel()
            }
            model.load_state_dict(chkpt['model'], strict=False)
        except KeyError as e:
            s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \
                "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights)
            raise KeyError(s) from e

        # load optimizer
        if chkpt['optimizer'] is not None:
            optimizer.load_state_dict(chkpt['optimizer'])
            best_fitness = chkpt['best_fitness']

        # load results
        if chkpt.get('training_results') is not None:
            with open(results_file, 'w') as file:
                file.write(chkpt['training_results'])  # write results.txt

        start_epoch = chkpt['epoch'] + 1
        del chkpt

    elif len(weights) > 0:  # darknet format
        # possible weights are '*.weights', 'yolov3-tiny.conv.15',  'darknet53.conv.74' etc.
        load_darknet_weights(model, weights)

    # Scheduler https://github.com/ultralytics/yolov3/issues/238
    # lf = lambda x: 1 - x / epochs  # linear ramp to zero
    # lf = lambda x: 10 ** (hyp['lrf'] * x / epochs)  # exp ramp
    # lf = lambda x: 1 - 10 ** (hyp['lrf'] * (1 - x / epochs))  # inverse exp ramp
    # scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    # scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=range(59, 70, 1), gamma=0.8)  # gradual fall to 0.1*lr0
    scheduler = lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[round(opt.epochs * x) for x in [0.8, 0.9]],
        gamma=0.1)
    scheduler.last_epoch = start_epoch - 1

    # # Plot lr schedule
    # y = []
    # for _ in range(epochs):
    #     scheduler.step()
    #     y.append(optimizer.param_groups[0]['lr'])
    # plt.plot(y, label='LambdaLR')
    # plt.xlabel('epoch')
    # plt.ylabel('LR')
    # plt.tight_layout()
    # plt.savefig('LR.png', dpi=300)

    # Mixed precision training https://github.com/NVIDIA/apex
    if mixed_precision:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level='O1',
                                          verbosity=0)

    # Initialize distributed training
    if device.type != 'cpu' and torch.cuda.device_count() > 1:
        dist.init_process_group(
            backend='nccl',  # 'distributed backend'
            init_method=
            'tcp://127.0.0.1:9999',  # distributed training init method
            world_size=1,  # number of nodes for distributed training
            rank=0)  # distributed training node rank
        model = torch.nn.parallel.DistributedDataParallel(
            model, find_unused_parameters=True)
        model.yolo_layers = model.module.yolo_layers  # move yolo layer indices to top level

    # Dataset
    dataset = LoadImagesAndLabels(
        train_path,
        img_size,
        batch_size,
        augment=True,
        hyp=hyp,  # augmentation hyperparameters
        rect=opt.rect,  # rectangular training
        cache_labels=True,
        cache_images=opt.cache_images,
        single_cls=opt.single_cls)

    # Dataloader
    batch_size = min(batch_size, len(dataset))
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        num_workers=nw,
        shuffle=not opt.
        rect,  # Shuffle=True unless rectangular training is used
        pin_memory=True,
        collate_fn=dataset.collate_fn)

    # Testloader
    testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(
        test_path,
        img_size_test,
        batch_size * 2,
        hyp=hyp,
        rect=True,
        cache_labels=True,
        cache_images=opt.cache_images,
        single_cls=opt.single_cls),
                                             batch_size=batch_size * 2,
                                             num_workers=nw,
                                             pin_memory=True,
                                             collate_fn=dataset.collate_fn)

    # Start training
    nb = len(dataloader)
    prebias = start_epoch == 0
    model.nc = nc  # attach number of classes to model
    model.arc = opt.arc  # attach yolo architecture
    model.hyp = hyp  # attach hyperparameters to model
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(
        device)  # attach class weights
    maps = np.zeros(nc)  # mAP per class
    # torch.autograd.set_detect_anomaly(True)
    results = (
        0, 0, 0, 0, 0, 0, 0
    )  # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
    t0 = time.time()
    torch_utils.model_info(model, report='summary')  # 'full' or 'summary'
    print('Using %g dataloader workers' % nw)
    print('Starting training for %g epochs...' % epochs)
    for epoch in range(start_epoch,
                       epochs):  # epoch ------------------------------
        model.train()

        # Prebias
        if prebias:
            if epoch < 3:  # prebias
                ps = 0.1, 0.9  # prebias settings (lr=0.1, momentum=0.9)
            else:  # normal training
                ps = hyp['lr0'], hyp['momentum']  # normal training settings
                print_model_biases(model)
                prebias = False

            # Bias optimizer settings
            optimizer.param_groups[2]['lr'] = ps[0]
            if optimizer.param_groups[2].get(
                    'momentum') is not None:  # for SGD but not Adam
                optimizer.param_groups[2]['momentum'] = ps[1]

        # Update image weights (optional)
        if dataset.image_weights:
            w = model.class_weights.cpu().numpy() * (1 -
                                                     maps)**2  # class weights
            image_weights = labels_to_image_weights(dataset.labels,
                                                    nc=nc,
                                                    class_weights=w)
            dataset.indices = random.choices(range(dataset.n),
                                             weights=image_weights,
                                             k=dataset.n)  # rand weighted idx

        mloss = torch.zeros(4).to(device)  # mean losses
        print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls',
                                     'total', 'targets', 'img_size'))
        pbar = tqdm(enumerate(dataloader), total=nb)  # progress bar
        for i, (
                imgs, targets, paths, _
        ) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device).float(
            ) / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
            targets = targets.to(device)

            # Hyperparameter burn-in
            # n_burn = nb - 1  # min(nb // 5 + 1, 1000)  # number of burn-in batches
            # if ni <= n_burn:
            #     for m in model.named_modules():
            #         if m[0].endswith('BatchNorm2d'):
            #             m[1].momentum = 1 - i / n_burn * 0.99  # BatchNorm2d momentum falls from 1 - 0.01
            #     g = (i / n_burn) ** 4  # gain rises from 0 - 1
            #     for x in optimizer.param_groups:
            #         x['lr'] = hyp['lr0'] * g
            #         x['weight_decay'] = hyp['weight_decay'] * g

            # Plot images with bounding boxes
            if ni == 0:
                fname = 'train_batch%g.png' % i
                plot_images(imgs=imgs,
                            targets=targets,
                            paths=paths,
                            fname=fname)
                if tb_writer:
                    tb_writer.add_image(fname,
                                        cv2.imread(fname)[:, :, ::-1],
                                        dataformats='HWC')

            # Multi-Scale training
            if opt.multi_scale:
                if ni / accumulate % 1 == 0:  #  adjust img_size (67% - 150%) every 1 batch
                    img_size = random.randrange(img_sz_min,
                                                img_sz_max + 1) * 32
                sf = img_size / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [
                        math.ceil(x * sf / 32.) * 32 for x in imgs.shape[2:]
                    ]  # new shape (stretched to 32-multiple)
                    imgs = F.interpolate(imgs,
                                         size=ns,
                                         mode='bilinear',
                                         align_corners=False)

            # Run model
            pred = model(imgs)

            # Compute loss
            loss, loss_items = compute_loss(pred, targets, model, not prebias)
            if not torch.isfinite(loss):
                print('WARNING: non-finite loss, ending training ', loss_items)
                return results

            # Scale loss by nominal batch_size of 64
            loss *= batch_size / 64

            # Compute gradient
            if mixed_precision:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            # Accumulate gradient for x batches before optimizing
            if ni % accumulate == 0:
                optimizer.step()
                optimizer.zero_grad()

            # Print batch results
            mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
            mem = '%.3gG' % (torch.cuda.memory_cached() /
                             1E9 if torch.cuda.is_available() else 0)  # (GB)
            s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' %
                                               (epoch, epochs - 1), mem,
                                               *mloss, len(targets), img_size)
            pbar.set_description(s)

            # end batch ------------------------------------------------------------------------------------------------

        # Process epoch results
        final_epoch = epoch + 1 == epochs
        if not opt.notest or final_epoch:  # Calculate mAP
            is_coco = any([
                x in data
                for x in ['fruit.data', 'coco2014.data', 'coco2017.data']
            ]) and model.nc == 80
            results, maps = mask.mask(
                cfg,
                data,
                batch_size=batch_size * 2,
                img_size=img_size_test,
                model=model,
                conf_thres=1E-3 if opt.evolve or
                (final_epoch and is_coco) else 0.1,  # 0.1 faster
                iou_thres=0.6,
                save_json=final_epoch and is_coco,
                single_cls=opt.single_cls,
                dataloader=testloader)

        # Update scheduler
        scheduler.step()

        # Write epoch results
        with open(results_file, 'a') as f:
            f.write(s + '%10.3g' * 7 % results +
                    '\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
        if len(opt.name) and opt.bucket:
            os.system('gsutil cp results.txt gs://%s/results/results%s.txt' %
                      (opt.bucket, opt.name))

        # Write Tensorboard results
        if tb_writer:
            x = list(mloss) + list(results)
            titles = [
                'GIoU', 'Objectness', 'Classification', 'Train loss',
                'Precision', 'Recall', 'mAP', 'F1', 'val GIoU',
                'val Objectness', 'val Classification'
            ]
            for xi, title in zip(x, titles):
                tb_writer.add_scalar(title, xi, epoch)

        # Update best mAP
        fi = fitness(np.array(results).reshape(
            1, -1))  # fitness_i = weighted combination of [P, R, mAP, F1]
        if fi > best_fitness:
            best_fitness = fi

        # Save training results
        save = (not opt.nosave) or (final_epoch and not opt.evolve)
        if save:
            with open(results_file, 'r') as f:
                # Create checkpoint
                chkpt = {
                    'epoch':
                    epoch,
                    'best_fitness':
                    best_fitness,
                    'training_results':
                    f.read(),
                    'model':
                    model.module.state_dict()
                    if type(model) is nn.parallel.DistributedDataParallel else
                    model.state_dict(),
                    'optimizer':
                    None if final_epoch else optimizer.state_dict()
                }

            # Save last checkpoint
            torch.save(chkpt, last)

            # Save best checkpoint
            if best_fitness == fi:
                torch.save(chkpt, best)

            # Save backup every 10 epochs (optional)
            # if epoch > 0 and epoch % 10 == 0:
            #     torch.save(chkpt, wdir + 'backup%g.pt' % epoch)

            # Delete checkpoint
            del chkpt

        # end epoch ----------------------------------------------------------------------------------------------------

    # end training
    n = opt.name
    if len(n):
        n = '_' + n if not n.isnumeric() else n
        fresults, flast, fbest = 'results%s.txt' % n, 'last%s.pt' % n, 'best%s.pt' % n
        os.rename('results.txt', fresults)
        os.rename(wdir + 'last.pt', wdir +
                  flast) if os.path.exists(wdir + 'last.pt') else None
        os.rename(wdir + 'best.pt', wdir +
                  fbest) if os.path.exists(wdir + 'best.pt') else None
        if opt.bucket:  # save to cloud
            os.system('gsutil cp %s gs://%s/results' % (fresults, opt.bucket))
            os.system('gsutil cp %s gs://%s/weights' %
                      (wdir + flast, opt.bucket))
            # os.system('gsutil cp %s gs://%s/weights' % (wdir + fbest, opt.bucket))

    if not opt.evolve:
        plot_results()  # save as results.png
    print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1,
                                                    (time.time() - t0) / 3600))
    dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
    torch.cuda.empty_cache()

    return results
Esempio n. 15
0
import argparse
from mask import mask
from inpaint import inpaint


parser = argparse.ArgumentParser(description='Demo')

parser.add_argument('--resume', default='cp/SiamMask_DAVIS.pth', type=str,
                    metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--data', default='data/Human6', help='videos or image files')
parser.add_argument('--mask-dilation', default=32, type=int, help='mask dilation when inpainting')
args = parser.parse_args()

mask(args)
inpaint(args)

Esempio n. 16
0
def main():
    import argparse
    global _geomFile
    global _anglFile

    parser = argparse.ArgumentParser(
        description=("UV destaggering and rotation for MOM6 output"))
    parser.add_argument('infile')
    parser.add_argument('outfile')
    parser.add_argument('-u', required=True)
    parser.add_argument('-v', required=True)
    parser.add_argument('-geomFile', default=_geomFile)
    parser.add_argument('-anglFile', default=_anglFile)
    parser.add_argument('--mask', action='store_true')
    parser.add_argument('--nodestag', action='store_true')
    parser.add_argument('--norotate', action='store_true')

    args = parser.parse_args()

    _geomFile = args.geomFile
    _anglFile = args.anglFile

    args.u = args.u.split(',')
    args.v = args.v.split(',')
    print(args)

    # read input file, create output file
    ncd_in = nc.Dataset(args.infile, 'r')
    ncd_out = nc.Dataset(args.outfile, 'w')

    dims_out = set([])

    # For each u/v pair we need to process
    #------------------------------------------------------------
    for i in range(len(args.u)):

        u_var = args.u[i]
        v_var = args.v[i]
        u_in = ncd_in.variables[u_var]
        v_in = ncd_in.variables[v_var]
        u = u_in[:]
        v = v_in[:]

        # create output dimensions if not already created
        dims = []
        hasXYZ = set([])
        for d in u_in.dimensions:
            d0 = d
            dim_var = None
            if d in ncd_in.variables:
                dim_var = ncd_in.variables[d]
                ax = None
                for a in 'cartesian_axis', 'axis':
                    if a in dim_var.ncattrs():
                        ax = a
                if ax is not None:
                    ct = dim_var.getncattr(ax)
                    if ct == 'X':
                        d = 'lon'
                    elif ct == 'Y':
                        d = 'lat'
                    elif ct == 'Z':
                        d = 'depth'
                    hasXYZ.add(ct)
            if d not in dims_out:
                dims_out.add(d)
                ncd_out.createDimension(d, ncd_in.dimensions[d0].size)
                if dim_var is not None:
                    var = ncd_out.createVariable(d, dim_var.dtype, d)
                    for a in dim_var.ncattrs():
                        var.setncattr(a, dim_var.getncattr(a))
                    if d == "lat":
                        var[:], _ = getLatLon()
                    elif d == "lon":
                        _, var[:] = getLatLon()
                    else:
                        var[:] = dim_var[:]
            dims.append(d)
        hasXYZ = len(hasXYZ & set(('X', 'Y', 'Z'))) == 3

        # mask the variables
        if args.mask and hasXYZ:
            u = mask.mask(u)
            v = mask.mask(v)

        # destagger the variables
        if not args.nodestag:
            u = destagger(u, 'U')
            v = destagger(v, 'V')

        # rotate the variables
        if not args.norotate:
            u, v = uvRot(u, v)

        # create variable
        u_out = ncd_out.createVariable(u_var, u_in.dtype, dims)
        v_out = ncd_out.createVariable(v_var, v_in.dtype, dims)
        for var in ((u_out, u_in), (v_out, v_in)):
            for a in var[1].ncattrs():
                var[0].setncattr(a, var[1].getncattr(a))
        u_out[:] = u
        v_out[:] = v

    ncd_in.close()
    ncd_out.close()
Esempio n. 17
0
def boxplot(var, season, ylabel, vegetation, veg_mask):
    df = pd.DataFrame()
    df['CABLE-POP'] = mask('S3', var, total_mask, 'CABLE-POP')
    df['CLASS-CTEM'] = mask('S3', var, total_mask, 'CLASS-CTEM')
    df['CLM5.0'] = mask('S3', var, total_mask, 'CLM5.0')
    df['ISAM'] = mask('S3', var, total_mask, 'ISAM')
    df['ISBA-CTRIP'] = mask('S3', var, total_mask, 'ISBA-CTRIP')
    df['JSBACH'] = mask('S3', var, total_mask, 'JSBACH')
    df['JULES-ES'] = mask('S3', var, total_mask, 'JULES-ES')
    df['LPX-Bern'] = mask('S3', var, total_mask, 'LPX-Bern')
    df['OCN'] = mask('S3', var, total_mask, 'OCN')
    df['ORCHIDEE'] = mask('S3', var, total_mask, 'ORCHIDEE')
    df['ORCHIDEE-CNP'] = mask('S3', var, total_mask, 'ORCHIDEE-CNP')
    df['SDGVM'] = mask('S3', var, total_mask, 'SDGVM')
    df['VISIT'] = mask('S3', var, total_mask, 'VISIT')
    df['year'] = np.arange(1901, 2018)

    df_prec = pd.DataFrame(data_prec.variables['prec'][:, 0, 0],
                           columns=['prec'])
    df_prec['year'] = np.arange(1901, 2018)
    df_prec_dry = df_prec.nsmallest(10, 'prec')
    dry_years = df_prec_dry['year'].tolist()

    df_prec_wet = df_prec.nlargest(10, 'prec')
    wet_years = df_prec_wet['year'].tolist()

    df_pIOD = df[df.year.isin(pIOD)]
    df_nIOD = df[df.year.isin(nIOD)]
    df_nino = df[df.year.isin(nino_years)]
    df_nina = df[df.year.isin(nina_years)]
    df_dry = df[df.year.isin(dry_years)]
    df_wet = df[df.year.isin(wet_years)]

    df_positive = df.mask(df <= 0, np.nan)
    df_negative = df.mask(df >= 0, np.nan)

    df_not_pIOD = df_negative[~df_negative.year.isin(pIOD)]
    df_neg = df_not_pIOD[~df_not_pIOD.year.isin(nino_years)]

    df_not_nIOD = df_positive[~df_positive.year.isin(nIOD)]
    df_pos = df_not_nIOD[~df_not_nIOD.year.isin(nina_years)]

    axes = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]
    dataframes = [
        df_pIOD, df_nIOD, df_nino, df_nina, df_neg, df_pos, df_dry, df_wet
    ]
    titles = [
        'pIOD', 'nIOD', r'El Ni$\mathrm{\tilde{n}}$o',
        r'La Ni$\mathrm{\tilde{n}}$a', 'other negative', 'other positive',
        'Driest years', 'Wettest years'
    ]

    for a, d, t in zip(axes, dataframes, titles):
        boxplots = a.boxplot([
            d['CABLE-POP'].dropna(), d['CLASS-CTEM'].dropna(),
            d['CLM5.0'].dropna(), d['ISAM'].dropna(), d['ISBA-CTRIP'].dropna(),
            d['JSBACH'].dropna(), d['JULES-ES'].dropna(),
            d['LPX-Bern'].dropna(), d['OCN'].dropna(), d['ORCHIDEE'].dropna(),
            d['ORCHIDEE-CNP'].dropna(), d['SDGVM'].dropna(),
            d['VISIT'].dropna()
        ],
                             labels=model_names,
                             widths=.7,
                             patch_artist=True,
                             medianprops=dict(linestyle='-',
                                              linewidth=2,
                                              color='Yellow'),
                             boxprops=dict(linestyle='--',
                                           linewidth=2,
                                           color='Black',
                                           facecolor='green',
                                           alpha=.8))

        print(t)
        if t in ('pIOD', r'El Ni$\mathrm{\tilde{n}}$o', 'other positive'):
            print(d.quantile(.75) - d.quantile(.25))
            #print(d.quantile(.75))
        #print('Median')
        #print(d.median())
        #print('IQR')
        #print(d.quantile(.75)-d.quantile(.25))
        #print('Spread')
        #print((d.quantile(.75) + 1.5*(d.quantile(.75)-d.quantile(.25)))-(d.quantile(.25) - 1.5*(d.quantile(.75)-d.quantile(.25))))
        #print(d.quantile(.75) + 1.5*(d.quantile(.75)-d.quantile(.25)))
        #print(d.quantile(.25) - 1.5*(d.quantile(.75)-d.quantile(.25)))
        a.set_title(t)
        a.axhline(linewidth=1, color='k', alpha=0.5)

        boxplot1 = boxplots['boxes'][0]
        boxplot2 = boxplots['boxes'][1]
        boxplot3 = boxplots['boxes'][2]
        boxplot4 = boxplots['boxes'][3]
        boxplot5 = boxplots['boxes'][4]
        boxplot6 = boxplots['boxes'][5]
        boxplot7 = boxplots['boxes'][6]
        boxplot8 = boxplots['boxes'][7]
        boxplot9 = boxplots['boxes'][8]
        boxplot10 = boxplots['boxes'][9]
        boxplot11 = boxplots['boxes'][10]
        boxplot12 = boxplots['boxes'][11]
        boxplot13 = boxplots['boxes'][12]

        boxplot_list = [
            boxplot1, boxplot2, boxplot3, boxplot4, boxplot5, boxplot6,
            boxplot7, boxplot8, boxplot9, boxplot10, boxplot11, boxplot12,
            boxplot13
        ]

        for bl, c in zip(boxplot_list, colours):
            bl.set_facecolor(c)

        if a in (ax1, ax2, ax3, ax4, ax5, ax6):
            a.set_xticklabels([])
        else:
            a.set_xticklabels(labels=model_names, rotation=90, ha='center')
        if a in (ax1, ax3, ax5, ax7):
            a.set_ylabel(ylabel)
Esempio n. 18
0
async def on_message(message):
    start = time.time()
    #print("Author: ", message.author, type(message.author))
    #print("Channel: ", message.channel, type(message.channel))
    if (message.author.bot):
        print("DISCARDING BOT MESSAGE FROM ", message.author)
        return
    if type(message.channel) == discord.channel.PrivateChannel:
        print("DISCARDING PRIVATE MESSAGE FROM", message.author)
        return
    if "markov-bot" in str(message.author) or "MikuBot" in str(message.author):
        print("Discarding self message")
        return
    print("Got message on channel ", message.channel, "from author",
          message.author, ":", message.content)
    split = message.content.split()
    if len(split) == 0: return
    if split[0] in ["?femboy", "?tomboy"]:
        if "welcome-center" in str(message.channel):
            await client.send_message(
                message.server.get_channel('308342435430400012'),
                "Welcome <@" + str(message.author.id) + ">!")
    elif split[0] == (bot_prefix + "help"):
        await client.send_message(
            message.channel, "Commands: `" + bot_prefix + bot_name +
            "` - Generates random text based on collected probabilities\n`" +
            bot_prefix + bot_name +
            "<starting word>` - Generates starting from a particular word\n`" +
            bot_prefix + bot_name +
            " <limit>` - Generates random text with the given length\n`" +
            bot_prefix +
            "percents <word>` - Shows statistics on the given word\n`" +
            bot_prefix + "mask <message>` - Misspells some text\n`" +
            bot_prefix +
            "mask10 <message>` - Misspells some text 10 times\n`" +
            bot_prefix +
            " <message>` - Comment that will not be processed by the bot\n`")
    elif split[0] == (bot_prefix + bot_name):
        await client.send_typing(message.channel)
        args = message.content.split()
        arg = False
        if len(args) > 1:
            arg = args[1]
        print("Sending")
        await client.send_message(message.channel, make_message(arg))
    elif split[0] == (bot_prefix + "percents") and len(split) > 1:
        percents = get_percents(split[1])
        await client.send_message(message.channel, percents)
    elif split[0] == (bot_prefix + "top"):
        if len(split) > 1 and str.isdigit(split[1]):
            mess = top(int(split[1]))
        else:
            mess = top(10)
        await client.send_message(message.channel, mess)
    elif split[0] == (bot_prefix + "mask"):
        await client.send_message(message.channel,
                                  mask.mask(" ".join(split[1:])))
    elif split[0] == (bot_prefix + "mask10"):
        msg = []
        curr = mask.mask(" ".join(split[1:]))
        for i in range(10):
            msg.append(curr)
            curr = mask.mask(curr)
        await client.send_message(message.channel, "\n".join(msg))
    elif (message.content.startswith(bot_prefix)):
        print("DISCARDING COMMENT MESSAGE FROM ", message.author)
        return
    elif train_in_all or message.channel.id in bot_trainning_channels:
        markov_add(message.content)
    else:
        print("DISCARDING MESSAGE OUTSIDE TRAINNING CHANNELS FROM",
              message.author)
    print("Took " + str(time.time() - start) +
          " seconds to process message of " + str(len(split)) + " words")
Esempio n. 19
0
ax2 = fig.add_subplot(2,2,3)
ax3 = fig.add_subplot(2,2,4)

### Modelspread

model_names = ['CLASS-CTEM', 'CLM5.0', 'ISBA-CTRIP', 'JSBACH', 'LPX-Bern',
               'SDGVM', 'VISIT', 'CAMS_GFAS', 'GFED4s']
colours = ['tab:orange', 'tab:green', 'tab:purple', 'tab:brown', 'tab:olive',
           'gold', 'purple', 'k', 'tab:grey']

df_monthly = pd.DataFrame()
df_annual = pd.DataFrame()

dataframes = [df_monthly, df_annual]
time_res = ['monthly', 'annual']
test = mask('monthly', 'S3', 'fFire', total_mask, 'CLASS-CTEM')

for df, tr in zip(dataframes, time_res):
    df['CLASS-CTEM'] = mask(tr, 'S3', 'fFire', total_mask, 'CLASS-CTEM')
    df['CLM5.0'] = mask(tr, 'S3', 'fFire', total_mask, 'CLM5.0')
    df['ISBA-CTRIP'] = mask(tr, 'S3', 'fFire', total_mask, 'ISBA-CTRIP')
    df['JSBACH'] = mask(tr, 'S3', 'fFire', total_mask, 'JSBACH')
    df['LPX-Bern'] = mask(tr, 'S3', 'fFire', total_mask, 'LPX-Bern')
    df['SDGVM'] = mask(tr, 'S3', 'fFire', total_mask, 'SDGVM')
    df['VISIT'] = mask(tr, 'S3', 'fFire', total_mask, 'VISIT')
    df['CAMS_GFAS'] = mask(tr, '', 'co2fire', total_mask, 'CAMS_GFAS')

df_gfed = pd.read_csv('../../GFED4s/GFED4s_monthly.csv')
GFED4s_monthly = df_gfed.stack().to_numpy()
GFED4s_annual = np.sum(GFED4s_monthly[6:-18].reshape(-1, 12), axis=1)
Esempio n. 20
0
    df_NBP_S1 = pd.DataFrame()
    df_NBP_S2 = pd.DataFrame()
    df_NBP_S3 = pd.DataFrame()

    dataframes = [df_NBP_S0, df_NBP_S1, df_NBP_S2, df_NBP_S3]
    experiments = ['S0', 'S1', 'S2', 'S3']
    effect_list = [
        cable_pop_list_effect, class_ctem_list_effect, clm_list_effect,
        isam_list_effect, isba_ctrip_list_effect, jsbach_list_effect,
        jules_es_list_effect, lpx_list_effect, ocn_list_effect,
        orchidee_list_effect, orchidee_cnp_list_effect, sdgvm_list_effect,
        visit_list_effect
    ]

    for exp, df in zip(experiments, dataframes):
        df['CABLE-POP'] = mask(exp, 'nbp', vegm, 'CABLE-POP')
        df['CLASS-CTEM'] = mask(exp, 'nbp', vegm, 'CLASS-CTEM')
        df['CLM5.0'] = mask(exp, 'nbp', vegm, 'CLM5.0')
        df['ISAM'] = mask(exp, 'nbp', vegm, 'ISAM')
        df['ISBA-CTRIP'] = mask(exp, 'nbp', vegm, 'ISBA-CTRIP')
        df['JSBACH'] = mask(exp, 'nbp', vegm, 'JSBACH')
        df['JULES-ES'] = mask(exp, 'nbp', vegm, 'JULES-ES')
        df['LPX-Bern'] = mask(exp, 'nbp', vegm, 'LPX-Bern')
        df['OCN'] = mask(exp, 'nbp', vegm, 'OCN')
        df['ORCHIDEE'] = mask(exp, 'nbp', vegm, 'ORCHIDEE')
        df['ORCHIDEE-CNP'] = mask(exp, 'nbp', vegm, 'ORCHIDEE-CNP')
        df['SDGVM'] = mask(exp, 'nbp', vegm, 'SDGVM')
        df['VISIT'] = mask(exp, 'nbp', vegm, 'VISIT')

    for el, mn in zip(effect_list, model_names):
        el.append(df_NBP_S1[mn].sum() - df_NBP_S0[mn].sum())
from mask import mask
import cv2

img = cv2.imread('0004.jpg')
out = mask(img)
print(out)
Esempio n. 22
0
def analysisphoto(img, filtre):

    # Les pixels d'interet pour chaque filtre
    if filtre == 0:
        rmin = 150
        rmax = 255
        gmin = 0
        gmax = 207
        bmin = 0
        bmax = 100
        wmin = 150
        wmax = 240
    elif filtre == 1:
        rmin = 230
        rmax = 250
        gmin = 230
        gmax = 255
        bmin = 230
        bmax = 255
        wmin = 0
        wmax = 0
    else:
        rmin = 0
        rmax = 20
        gmin = 0
        gmax = 20
        bmin = 0
        bmax = 20
        wmin = 0
        wmax = 0

    # les dimensions de la matrice
    width, height = img.size
    count = 0
    pix1 = np.asarray(img)

    lel = []
    for i in range(height):

        for j in range(width):
            # Application du filtre hot
            if filtre == 1:
                good = 0
                goodi =0
                gucci = 0
                if pix1[i][j][0] >rmin :

                    goodi = 1
                if pix1[i][j][1] >gmin:

                    gucci = 1
                if pix1[i][j][2] > bmin:

                    good = 1

                if good == 1 and goodi == 1 and gucci ==1:
                    positionpixel = pixelInteret(0, 0)

                    positionpixel.setpoint(i, j, pix1[i][j])

                    lel.append(positionpixel)

                    count += 1
            # Application des deux autres filtre
            else:
                if pix1[i][j][0] > rmin and pix1[i][j][0] < rmax:

                    if pix1[i][j][1] > gmin and pix1[i][j][1] < gmax:

                        if pix1[i][j][2] > bmin and pix1[i][j][2] < bmax:

                            positionpixel = pixelInteret(0, 0)

                            positionpixel.setpoint(i,j,pix1[i][j])

                            lel.append(positionpixel)

                            count += 1
                elif pix1[i][j][0] > wmin and pix1[i][j][0] < wmax and (pix1[i][j][0] == pix1[i][j][1] == pix1[i][j][2]):
                    positionpixel = pixelInteret(0, 0)

                    positionpixel.setpoint(i, j, pix1[i][j])

                    lel.append(positionpixel)

                    count += 1
    print(count)

    # Initialisation du masque
    masque = mask(height, width)

    # realisation du prmeier masque
    mask1 = masque.setMask(lel)

    # realistion du masque avec les pixels
    mask1 = pixelMask(mask1, width, height)
    print('done')

    return mask1
def segmentation(filenames_1, filenames_2, filenames_3):
    """segment images in folder filenames_1, save mask in folder filenames_2 and save roi for each image
    in folder filenames_3 """

    for files_1, files_2, files_3 in zip(filenames_1, filenames_2,
                                         filenames_3):
        print(files_1)

        ### dobijanje 1. regiona

        img = cv2.imread(files_1, 0)
        img_original = cv2.imread(files_1)

        ### EDGE
        edges = canny_img(img)
        # cv2.imshow("i1",img)
        # cv2.waitKey(0)

        ### POLYGON

        PATCH_SIZE = 10  #### 10
        img_edge, pts2, a = main(PATCH_SIZE, edges)

        ### HOUGH TRANSFORMATION

        lines = StraightLineDetection(img, img_edge)  #original?

        ### INTERPOLATION

        interpolated = roi_edge_2(lines, img_original)

        mask_binary = fill_area(interpolated)
        # cv2.imshow("i1",im)
        # cv2.waitKey(0)
        ## cv2.imwrite(files_2, mask_binary)

        new_img = mask(mask_binary, img_original)
        # cv2.imwrite(files_2, new_img)

        ### dobijanje 2. regiona
        # new_img = cv2.imread(files_2,0)

        edges_2 = canny_img(new_img)  #edge_tumor

        PATCH_SIZE = 10  #### 10
        img_edge_2, pts2, a = main(PATCH_SIZE, edges_2)

        lines_2 = StraightLineDetection(img_edge, img_edge)
        # # cv2.imshow("i", lines_2)
        # # cv2.waitKey(0)

        interpolated_2 = roi_edge_2(lines_2, img_original)

        mask_binary_2 = fill_area(interpolated_2)
        ### SAVE MASK ###
        cv2.imwrite(files_2, mask_binary_2)

        ########### DILATION ########
        # mask_binary_2 = dilation_func(mask_binary_2)

        # cv2.imwrite(files_3, mask_binary_2)
        # new_img_2 = mask(mask_binary_2, im)

        # mask_binary_2 = cv2.imread(files_3,0)
        new_img_2 = mask(mask_binary_2, img_original)
        cv2.imwrite(files_3, new_img_2)

    return
Esempio n. 24
0
import sys
import nltk
nltk.download('words')

# receive parameteres from users
SUMMARIZE = 0  # 0: no summarize, 1: summarize
KEYWORDS_POSITION = 0 # 0: keywords, 1: position

SUMMARIZE = int(sys.argv[1])
KEYWORDS_POSITION = int(sys.argv[2])
FILE_NAME = sys.argv[3]

dataframe = pd.read_csv(FILE_NAME)
rows = refine_label(dataframe)
rows = process(rows)
text, label = mask(rows)
first_dataframe = impute(text, label)


if SUMMARIZE:
	# SUMMARIZE FUNC
	tfidf_result = vectorize(first_dataframe, True)
	Obfuscate(tfidf_result, "summary")
else:
	# NO SUMMARIZE FUNC
	if KEYWORDS_POSITION:
		# Position FUNC
		tfidf_result = vectorize(first_dataframe, False)
		Obfuscate(tfidf_result, "pos")
	else:
		# Keywords Func
    time = np.arange(1901, 2018)

    tau = []

    for i in range(0, 116):
        tau_prel = CVeg[i + 1] / (NPP[i + 1] - ((CVeg[i + 1] - CVeg[i]) /
                                                (time[i + 1] - time[i])))
        tau.append(tau_prel)

    tau_change = tau - (sum(tau[:30]) / len(tau[:30]))
    return (tau, tau_change)


for mn, c in zip(model_names, colours):
    NPP = mask('annual', 'S3', 'npp', total_mask, mn)
    NPP_change = NPP - (sum(NPP[:30]) / len(NPP[:30]))

    if mn in ('CABLE-POP', 'ISAM', 'JULES-ES', 'ORCHIDEE', 'VISIT'):
        ls = '--'
    elif mn in ('CLASS-CTEM', 'ISBA-CTRIP', 'LPX-Bern', 'ORCHIDEE-CNP'):
        ls = '-'
    else:
        ls = '-.'

    ax1.plot(time, NPP_change, color=c, lw=2.0, linestyle=ls, label=mn)

    TAU, TAU_change = tau(mn)
    df_TAU = pd.DataFrame(TAU, columns=[mn])
    ax2.plot(time[1:],
             df_TAU[mn].rolling(window=5, center=True).mean(),
plt.rcParams['ytick.labelsize'] = 11

ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)

df_NBP = pd.DataFrame()
df_CVeg = pd.DataFrame()
df_CSoil = pd.DataFrame()

dataframes = [df_NBP, df_CVeg, df_CSoil]
vars = ['nbp', 'cVeg', 'cSoil']

for df, v in zip(dataframes, vars):
    df['CABLE-POP'] = mask('annual', 'S3', v, total_mask, 'CABLE-POP')
    df['CLASS-CTEM'] = mask('annual', 'S3', v, total_mask, 'CLASS-CTEM')
    df['CLM5.0'] = mask('annual', 'S3', v, total_mask, 'CLM5.0')
    df['ISAM'] = mask('annual', 'S3', v, total_mask, 'ISAM')
    df['ISBA-CTRIP'] = mask('annual', 'S3', v, total_mask, 'ISBA-CTRIP')
    df['JSBACH'] = mask('annual', 'S3', v, total_mask, 'JSBACH')
    df['JULES-ES'] = mask('annual', 'S3', v, total_mask, 'JULES-ES')
    df['LPX-Bern'] = mask('annual', 'S3', v, total_mask, 'LPX-Bern')
    df['OCN'] = mask('annual', 'S3', v, total_mask, 'OCN')
    df['ORCHIDEE'] = mask('annual', 'S3', v, total_mask, 'ORCHIDEE')
    df['ORCHIDEE-CNP'] = mask('annual', 'S3', v, total_mask, 'ORCHIDEE-CNP')
    df['SDGVM'] = mask('annual', 'S3', v, total_mask, 'SDGVM')
    df['VISIT'] = mask('annual', 'S3', v, total_mask, 'VISIT')

df_NBP['mean'] = df_NBP.mean(axis=1)
df_NBP['std'] = df_NBP.std(axis=1)
Esempio n. 27
0
data_path = './faces/'
count = 0
emoji = 0
while True:
    _, img = cap.read()

    img = cv2.resize(img, (600, 400))
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    faces = detector(gray)
    if len(faces) > 0:
        face = faces[0]
        shape_68 = shape_predictor_68(gray, face)
        shape = face_utils.shape_to_np(shape_68)

        masked_img = mask(shape, gray)
        masked = cv2.bitwise_and(gray, masked_img)
        face_m = fa.align(masked, gray, face)
        face_m = cv2.flip(face_m, 1)
        face_m = cv2.resize(face_m, (110, 110))
        cv2.imshow('face', face_m)

    msg = 'press c to capture face for emoji no. ' + str(
        emoji) + ' count ' + str(count)
    cv2.putText(img, msg, (0, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.5,
                (255, 255, 0))
    cv2.imshow('real', img)
    if cv2.waitKey(1) == ord('c'):
        file_name_path = './faces/' + str(emoji) + '/' + str(count) + '.jpg'
        cv2.imwrite(file_name_path, face_m)
        count = count + 1
Esempio n. 28
0
from mask import mask
import matplotlib.pyplot as plt
import scipy.misc as sci
import cv2
from time import time
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
Mask = mask()

inp = 'images/inputs/korean.jpg'
mask = 'images/masks/dmask2.JPG'

inp = sci.imread(inp)
mask = sci.imread(mask)
tic = time()
print('==>start<==')
print(inp.shape)
out = Mask.apply_mask(inp)

print(f'time=> {round(time() - tic, 2)}s')

#sci.imsave('out.jpg', out)

plt.imshow(out)

plt.show()
Esempio n. 29
0
plt.rcParams['ytick.labelsize'] = 11

ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)

model_names = [
    'CLASS-CTEM', 'CLM5.0', 'ISBA-CTRIP', 'JSBACH', 'LPX-Bern', 'SDGVM',
    'VISIT'
]
colours = [
    'tab:orange', 'tab:green', 'tab:purple', 'tab:brown', 'tab:olive', 'gold',
    'purple'
]

for mn, c in zip(model_names, colours):
    monthly_s2 = mask('monthly', 'S2', 'fFire', total_mask, mn)
    monthly_s3 = mask('monthly', 'S3', 'fFire', total_mask, mn)
    annual_s2 = mask('annual', 'S2', 'fFire', total_mask, mn)
    annual_s3 = mask('annual', 'S3', 'fFire', total_mask, mn)

    diff_monthly = (np.array(monthly_s3) - np.array(monthly_s2))
    diff_annual = (np.array(annual_s3) - np.array(annual_s2))

    if mn in ('CABLE-POP', 'ISAM', 'JULES-ES', 'ORCHIDEE', 'VISIT'):
        ls = '--'
    elif mn in ('CLASS-CTEM', 'ISBA-CTRIP', 'LPX-Bern', 'ORCHIDEE-CNP',
                'CAMS_GFAS', 'GFED4s'):
        ls = '-'
    else:
        ls = '-.'