def test_fudge_dice(self, mock_random_randint): mock_random_randint.side_effect = iter([-1, 0, 1, 0, -1]) expected = '-1 (5dF=\x034-\x0f, 0, \x033+\x0f, 0, \x034-\x0f)' actual = dice('5dF') assert expected == actual
def get_class_hit_die(self,creature_class=None): ''' Returns the hit die related to a creature class ''' creature_class = self.get_class_information(creature_class) d = dice(creature_class.get_hit_die_value(),creature_class.get_number_of_hit_dice()) return d
def test_one_d20(self, mock_random_randint): mock_random_randint.return_value = 5 expected = '5 (d20=5)' actual = dice('d20') assert expected == actual
def test_complex_roll(self, mock_random_randint): mock_random_randint.side_effect = iter([1, 2, 3]) expected = u'4 (2d20-d5+4=1, 2, -3)' actual = dice('2d20-d5+4') assert expected == actual
def roll_treasure(self): rolls = dice.dice(self.die).roll() items = [] for i in range(int(rolls)): items.append(self.magic.generate_treasure(self.table)) return ", ".join(items)
# Author: David Ackerson import sys from llr import llr2 from pmi import pmi from dice import dice with open(sys.argv[1], 'r') as f: freqs = {} collocs = [] N = 0 for line in f: tokens = line.strip().split() if len(tokens) == 2: freqs[tokens[1]] = int(tokens[0]) N += 1 else: collocs.append({'freq': int(tokens[0]), 'words': tokens[1:]}) for colloc in collocs: f12 = colloc['freq'] f1 = freqs[colloc['words'][0]] f2 = freqs[colloc['words'][1]] colloc['pmi_rank'] = pmi(f12, f1, f2, N) colloc['llr_rank'] = llr2(f12, f1, f2, N) colloc['dice_rank'] = dice(f12, f1, f2, N) for colloc in sorted(collocs, key=lambda x: x['rank']): print colloc['rank'], colloc['words']
def __init__(self, data): Treasure.__init__(self, data) self.coins = [] for c in self.data["coins"]: split_die = c.split(';') self.coins.append([dice.dice(split_die[0]), split_die[1]])
async def on_message(message): # Basically my Main print(f"{message.channel.guild.name[:4]} {message.channel}: {message.author}: {message.content}") global source_path if message.mention_everyone: message.channel.send("<:notification:544011898941734922>") print("Everyone just got pinged") if message.author == client.user: return if "!join" in message.content.lower(): # bot joins voice channel await join(message) if "!leave" in message.content.lower(): #bot leaves voice channel await leave(message) if "!drip" in message.content.lower(): # simple ping await message.channel.send("drop!") if "!flip" in message.content.lower(): # simple ping alt await message.channel.send("flop!") if "!echo" in message.content.lower(): if message.author.id == authorId: channelDict = { "do":200852342667476992, "valet":675182191520776214, "general":592217976061689866 } msg = message.content.lstrip("!echo ").split() id_or_name = msg[0] if "/" in id_or_name: await client.get_guild(int(id_or_name.split("/")[0])).get_channel(int(id_or_name.split("/")[1])).send(message.content.lstrip("!echo ").split(" ",1)[1]) elif id_or_name in channelDict: try: await message.guild.get_channel(channelDict[id_or_name]).send(message.content.lstrip("!echo ").split(" ",1)[1]) except Exception as e: print(e) else: await message.guild.get_channel(int(id_or_name)).send(message.content.lstrip("!echo ").split(" ",1)[1]) if "!retrofit" in message.content.lower(): if (message.author.id == authorId): await message.add_reaction("🔄") with open(os.path.join(source_path,"msg.txt"), 'w') as file: file.write((str(message.channel.guild.id) + " " + str(message.channel.id) + " " + str(message.id))) retrofit() await client.close() if "!ss" in message.content.lower().strip(): guildId = message.channel.guild.id try: vcId = message.author.voice.channel.id URLmessage = "https://discordapp.com/channels/" + str(guildId) + "/" + str(vcId) await message.channel.send(URLmessage) except Exception as e: await message.channel.send("You aren't in a Voice Channel") print(e) if "!volume" in message.content.lower(): msg = int(message.content.strip("!volume").strip()) await setVol(msg,message) if "!bye" in message.content.lower(): # makes bot go offline if message.author.id == authorId: await message.add_reaction("👋") await client.close() if "!r " in message.content.lower(): await message.add_reaction("🎲") await message.channel.send(dice(message.content.strip("!r"))) if "!domt" in message.content.lower(): async with message.channel.typing(): #added this line await message.add_reaction("🃏") card_file = discord.File(os.path.join(source_path,'images','avdol.jpg'), filename="avdol.jpg") await message.channel.send(file=card_file,embed=drawCard()) if "!pasta" in message.content.lower(): async with message.channel.typing(): #added this line await message.add_reaction("🍜") if "tts" in message.content.lower(): await message.channel.send(getPasta(),tts=True) else: await message.channel.send(getPasta()) if "!forecast " in message.content.lower(): async with message.channel.typing(): #added this line pruned = message.content.lower().split(" ",2)[1].strip() await message.add_reaction("🌀") icon_file = discord.File(os.path.join(source_path,'images','icon.png'), filename="icon.png") await message.channel.send(file=icon_file,embed=getWeather(pruned)) if "!thanos" in message.content.lower(): checkWHID(message) async with message.channel.typing(): for role in message.author.roles: if role.id == 374095810868019200: await snap(message) #await message.add_reaction("<a:snap:583370125592494081>") await message.channel.send("<a:snap:583370125592494081>") if "!weather" in message.content.lower(): # weather commands (plays into an internal function for simplicity) await weather(message) elif "!toggledownfall" in message.content.lower(): #toggles between 'clear' and 'rain' if currentWeather == 'clear': await setWeather('rain',message) await message.add_reaction("🌧") else: await setWeather('clear',message) await message.add_reaction("☀")
def main(argv): #excel summary book = openpyxl.load_workbook("/home/toya/Research/Wasan/data/summary/evaluate.xlsx") sheet = book['パラメータ評価'] sheet.cell(row=int(argv[5]), column=1).value = str(argv[1]) #parameter set min_sList=[i for i in range(38, 43)]*12 #int min_sList2=[i for i in range(35, 45)]*6 #int min_sList3=[i for i in range(50, 65)]*4 #int max_sList=[i for i in range(35, 65)]*2 #int max_sList2=[i for i in range(40, 50)]*6 #int max_sList3=[i for i in range(55, 70)]*4 #int num_sList=[i for i in range(1, 4)]*20 #int num_sList3=[i for i in range(1, 6)]*12 #int sigma_List=[i*0.1 for i in range(15, 66, 10)]*12 #float thresList=[i*0.01 for i in range(1, 21)]*3 #float thresList2=[i*0.1 for i in range(6, 16)]*6 #float thresList3=[i*0.001 for i in range(5, 15)]*6 #float overList=[i*0.1 for i in range(1, 11)]*6 #float overList2=[i*0.1 for i in range(1, 5)]*15 #float overList3=[i*0.1 for i in range(3, 8)]*12 #float #parameters: #argv[1]:COfile #argv[2]:method #argv[3]:KPfile #argv[4]:KPfileAN #argv[5]:pageCount for p in range(30): image_gray=cv2.imread(argv[1], cv2.IMREAD_GRAYSCALE) image=image_gray image_black=(255-image_gray) maskOutputFile = str(argv[3])+"_"+str(min_sList[p])+"-"+str(max_sList[p])+"-"+str(num_sList[p])+"-"+str(thresList[p])+"-"+str(overList[p])+".tif" #switch between the different types of blob detector if int(argv[2])==0: keypoints=SimpleBlobDetector(argv,image) #print ("number of keypoints outside "+str(len(keypoints))) elif int(argv[2])==1: #parameters: http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_log min_s=min_sList[p] max_s=max_sList[p] num_s=num_sList[p] thres=thresList[p] over=overList[p] print(min_s, max_s, num_s, thres, over) blob_List = blob_log(image_black, min_sigma=min_s, max_sigma=max_s, num_sigma=num_s, threshold=thres, overlap=over) # Compute radii in the 3rd column. blob_List[:, 2] = blob_List[:, 2] * sqrt(2) elif int(argv[2])==2: #parameters: http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_dog min_s=min_sList2[p] max_s=max_sList2[p] sigma=sigma_List[p] thres=thresList2[p] over=overList2[p] print(min_s, max_s, sigma, thres, over) blob_List = blob_dog(image_black, min_sigma=min_s, max_sigma=max_s, sigma_ratio=sigma, threshold=thres, overlap=over) #blob_List = blob_dog(image_black, min_sigma=35, max_sigma=40, sigma_ratio=1.5, threshold=1.0, overlap=0.3) blob_List[:, 2] = blob_List[:, 2] * sqrt(2) elif int(argv[2])==3: #parameters: http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.blob_doh min_s=min_sList3[p] max_s=max_sList3[p] num_s=num_sList3[p] thres=thresList3[p] over=overList3[p] print(min_s, max_s, num_s, thres, over) blob_List = blob_doh(image_gray, min_sigma=min_s, max_sigma=max_s, num_sigma=5, threshold=thres, overlap=over) #blob_List = blob_doh(image_gray, min_sigma=65, max_sigma=70, num_sigma=5, threshold=0.005, overlap=0.7) else: print("Blob detector type not supported: "+argv[3]) sys.exit(-1) #Now write the results to file if int(argv[2])==1 or int(argv[2])==2 or int(argv[2])==3: for blob in blob_List: y, x, r = blob cv2.rectangle(image,(int(x-r), int(y-r)), (int(x+r), int(y+r)), (0, 0, 255), 1) outputMask=np.ones((image.shape[0],image.shape[1]),np.uint8) i=0 for blob in blob_List: y, x, r = blob outputMask[int(y-r):int(y+r),int(x-r):int(x+r)]=255 i=i+1 invertOutputMask=255-outputMask cv2.imwrite("/home/toya/Research/Wasan/data/summary/AllKanjiPositionTest/"+str(maskOutputFile.split("/")[-1]),invertOutputMask) else: for kp in keypoints: x = kp.pt[0] y = kp.pt[1] r = kp.size cv2.rectangle(image,(int(x-r), int(y-r)), (int(x+r), int(y+r)), (0, 0, 255), 1) cv2.imwrite(argv[2],image) ############################################################################################################################################################## warnings.simplefilter('ignore', Image.DecompressionBombWarning) im1 = Image.open("/home/toya/Research/Wasan/data/summary/AllKanjiPositionTest/"+str(maskOutputFile.split("/")[-1])) im2 = Image.open(argv[4]) # Error control if only one parameter for inverting image is given. if len(sys.argv) == 7: print("Invert options must be set for both images") sys.exit() # Inverting input images if is wanted. if len(sys.argv) > 6: if int(sys.argv[6]) == 1: im1 = ImageOps.invert(im1) if int(sys.argv[7]) == 1: im2 = ImageOps.invert(im2) # Image resize for squared images. size = 300, 300 # Converting to b/w. gray1 = im1.convert('L') im1 = gray1.point(lambda x: 0 if x < 128 else 255, '1') gray2 = im2.convert('L') im2 = gray2.point(lambda x: 0 if x < 128 else 255, '1') # Dice coeficinet computation res = dice(im1, im2) ################################################################################################################################################################# sheet.cell(row=1, column=p+2).value = str(min_sList[p])+"-"+str(max_sList[p])+"-"+str(num_sList[p])+"-"+str(thresList[p])+"-"+str(overList[p]) sheet.cell(row=int(argv[5]), column=p+2).value = res print("/home/toya/Research/Wasan/data/summary/AllKanjiPositionTest/"+str(maskOutputFile.split("/")[-1])) print(res) #exit() book.save('/home/toya/Research/Wasan/data/summary/evaluate.xlsx') book.close()
contours, hierarchy = cv2.findContours(mask, cv2.RECURS_FILTER, cv2.CHAIN_APPROX_NONE) cnt = max(contours, key=cv2.contourArea) h, w = img.shape[:2] mask = np.zeros((h, w), np.uint8) # Draw the contour on the new mask and perform the bitwise operation mask = cv2.drawContours(mask, [cnt], -1, 255, -1) path = os.path.join(gt_path) gt_filename = str(filename.split('.')[0]) + '_Segmentation.png' image_gt = cv2.imread(os.path.join(path, gt_filename), cv2.IMREAD_GRAYSCALE) dice_score = dice(mask, image_gt) dice_array.append(dice_score) dice_mean = np.mean(dice_array) temp_metrics = pd.Series([filename, dice_score, dice_mean], index=['filename', 'dice', 'mean_dice']) metrics = metrics.append(temp_metrics, ignore_index=True) print(metrics.head(60)) print("_" * 20) plot_results_k(image_rgb, image_gt, mask, dice_score, filename, True) if dice_mean > max_mean_dice: max_mean_dice = dice_mean
__author__ = 'Kris' import Parsers, random, json, Treasure, dice from resources.data import * d100 = dice.dice("1d100") class Table(): def __init__(self, treasure_list): self.treasure_list = treasure_list def generate_treasure(self): treasure_string = "" for k,v in self.treasure_list: treasure_string = treasure_string+("-"*10)+str(k)+("-"*10)+"\n" treasure_string = treasure_string+str(v.roll_treasure)+"\n" return treasure_string def __str__(self): return str(self.generate_treasure()) class tier_table(): def __init__(self, coins_list): self.lines = [] for c in coins_list: self.lines.append(Treasure.Coins(c)) def generate_treasure(self): roll = d100.roll() for c in self.lines: if c.in_range(int(roll)): return c.roll_treasure() def __str__(self): return self.generate_treasure() class individual_treasure_table():
def main(argv): # Take a mosaic, a csv file containing predictions for its labels and the patch size used for the annotations # 1) Create trentative automatic mask images (all affected patches are black) # 2) Find a clear background and clear foreground part, find unknown part, find the connected components of the foregroung # 3) Accumulate labels for all cathegories, carefull to keep the unkwnon updated as it is where the segmentation can grow # 4) run watershed #hardcoded number of layers and names layerNames=["river","decidious","uncovered","evergreen","manmade"] layerDict={} #dictionary so that we know what number corresponds to each layer for i in range(len(layerNames)):layerDict[layerNames[i]]=i # Read parameters patch_size = int(sys.argv[1]) csvFile=sys.argv[2] patch_size = int(argv[1]) csvFile=argv[2] #imageDir, full path! imageDir=argv[3] #read also all the prefixes of all the images that we have imagePrefixes=[] for x in range(4,len(sys.argv)):imagePrefixes.append(sys.argv[x]) for x in range(4,len(argv)):imagePrefixes.append(argv[x]) imageDict={} for i in range(len(imagePrefixes)):imageDict[imagePrefixes[i]]=i #hardcoded output dir outputDir="./outputIm/" #print("AnnotationMask creator main, parameters: csv files: "+str(csvFile)+" image directory"+str(imageDir)+" image prefixes "+str(imagePrefixes)) f = open(csvFile, "r") shapeX={} shapeY={} image={} for pref in imagePrefixes: image[pref] = cv2.imread(imageDir+pref+".jpg",cv2.IMREAD_COLOR) print("Image "+imageDir+pref+".jpg") shapeX[pref]=image[pref].shape[0] shapeY[pref]=image[pref].shape[1] #create a blank image for each layers layerList=[] i=0 for pref in imagePrefixes: layerList.append([]) for x in range(len(layerNames)): layerList[i].append(np.zeros((shapeX[pref],shapeY[pref]),dtype=np.uint8)) i+=1 # go over the csv file, for every line # extract the image prefixes # extract the lables # for every label found, paint a black patch in the correspoding image layer for line in f: #process every line #print(line) pref=line.split("p")[0] patchNumber=int(line.split("h")[1].split(" ")[0]) labelList=line.split(" ")[1].strip().split(";") numStepsX=int(shapeX[pref]/patch_size) numStepsY=int(shapeY[pref]/patch_size) for x in labelList: if x=="":break #now, paint the information of each patch in the layer where it belongs xJump=patchNumber//numStepsY yJump=patchNumber%numStepsY #now find the proper layer (once in the im) currentLayerIm=layerList[imageDict[pref]][layerDict[x]] impa.paintImagePatch(currentLayerIm,xJump*patch_size,yJump*patch_size,patch_size,255) i=0 for pref in imagePrefixes: for x in range(len(layerNames)): #print("shape of current layer image "+repr(layerList[i][x].shape)) cv2.imwrite(outputDir+pref+"layer"+str(x)+".jpg",layerList[i][x]) i+=1 i=0 kernel = np.zeros((3,3),np.uint8) kernel[:]=255 for pref in imagePrefixes: print("starting with prefix "+pref) layerList.append([]) #mask accumulator image maskImage=np.ones((shapeX[pref],shapeY[pref]),dtype=np.uint8) firstLabel=0 #counter so labels from different masks have different labels firstLabelList=[0] for x in range(len(layerNames)): if layerNames[x] in ["river","decidious","uncovered","evergreen"]: #print("starting "+layerNames[x]) #in the case of decidious trees, filter out the snow #if layerNames[x]=="decidious": if False: snowMask=ut.makeSnowMask(cv2.cvtColor(image[pref], cv2.COLOR_BGR2GRAY)) coarseMask=ut.getSnowOutOfGeneratedMask(snowMask,layerList[i][x]) else: coarseMask=layerList[i][x] # Try to refine the segmenation opening = cv2.morphologyEx(coarseMask,cv2.MORPH_OPEN,kernel, iterations = 2) # sure background area iterations was 10 sure_bg = cv2.dilate(opening,kernel,iterations=1) # Finding sure foreground area dist_transform multiplier was 0.17 dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5) ret, sure_fg = cv2.threshold(dist_transform,0.15*dist_transform.max(),255,0) cv2.imwrite(outputDir+pref+"SUREFG"+str(layerNames[x])+".jpg",sure_fg) # Finding unknown region sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg,sure_fg) # Marker labelling ret, markers = cv2.connectedComponents(sure_fg) # Add one to all labels so that sure background is not 0, but 1, also add firstLabel so label numbers are different markers = markers+firstLabel+1 #remark sure background as 1 markers[markers==(firstLabel+1)]=1 firstLabel+=ret firstLabelList.append(firstLabel) # Now, mark the region of unknown with zero markers[unknown==255] = 0 important=layerNames[x] in ["decidious","evergreen"] maskImage=addNewMaskLayer(markers,maskImage,important) cv2.imwrite(outputDir+pref+"CoarseMaskLayer"+str(layerNames[x])+".jpg",layerList[i][x]) #cv2.imwrite(outputDir+str(x)+"layerMask.jpg",cv2.applyColorMap(np.uint8(markers*50),cv2.COLORMAP_JET)) #cv2.imwrite(outputDir+pref+str(x)+"AccumMask.jpg",cv2.applyColorMap(np.uint8(maskImage*50),cv2.COLORMAP_JET)) else: pass #print("skypping layer "+layerNames[x]) cv2.imwrite(outputDir+"finalMask.jpg",cv2.applyColorMap(np.uint8(maskImage*50),cv2.COLORMAP_JET)) #print("starting watershed ") markers = cv2.watershed(image[pref],maskImage) #markers = seg.random_walker(image[pref],maskImage) image[pref][markers == -1] = [0,0,255] cv2.imwrite(outputDir+pref+str(i)+"watershed.jpg",image[pref]) cv2.imwrite(outputDir+pref+str(i)+"markers.jpg",cv2.applyColorMap(np.uint8(markers*50),cv2.COLORMAP_JET)) # now we should reconstruct the individual mask segmenations from the final marker #print(" list Of first labels"+str(firstLabelList)) #now, make layer images, for every interval of layers, only include markers inside of it #while we are doing it, we can also compute the DICE coefficient for j in range(1,len(firstLabelList)): refinedLayer=buildBinaryMask(markers,firstLabelList[j-1],firstLabelList[j]) refinedLayer=np.uint8(refinedLayer) coarseLayer=layerList[i][j-1] manualLayer=np.invert(cv2.imread(imageDir+pref+"layer"+str(j-1)+".jpg",cv2.IMREAD_GRAYSCALE)) #cv2.imwrite(outputDir+pref+str(i)+str(j-1)+"manual.jpg",manualLayer) #cv2.imwrite(outputDir+pref+str(i)+str(j-1)+"coarse.jpg",coarseLayer) cv2.imwrite(outputDir+pref+str(layerNames[j-1])+"refined.jpg",refinedLayer) print(" LAYER "+layerNames[j-1]) currentDice=dice.dice(coarseLayer,manualLayer ) print("*******************************************dice coarse mask "+str(currentDice)) currentDice=dice.dice(refinedLayer,manualLayer ) print("*******************************************dice refined mask "+str(currentDice)) #experiments with taking out the snow mask, not used at the moment. #if layerNames[j-1]=="decidious": # snowMask=ut.makeSnowMask(cv2.cvtColor(image[pref], cv2.COLOR_BGR2GRAY)) # newRefinedLayer=ut.getSnowOutOfGeneratedMask(snowMask,refinedLayer) # newManualLayer=ut.getSnowOutOfMask(snowMask,manualLayer) # currentDice=dice.dice(refinedLayer,newManualLayer ) #print("*******************************************dice refined mask no snow "+str(currentDice)) i+=1
print("1. Dice") print("2. Slots") print("3. Roulette") print("4. 21 points") print("5. Nvuti") print("6. RGBY") print("7. Quit") print() print("#. Help") print() user_input = input(">>> ") if user_input == 'Dice' or user_input == 'dice': clear() dice() sleep(5) clear() elif user_input == 'Slots' or user_input == 'slots': clear() slots() sleep(5) clear() elif user_input == 'Roulette' or user_input == 'roulette': clear() roulette() sleep(5) clear() elif user_input == "21 points": game21() sleep(5)
# Made by Lucien Hammond # Using Matt Galants PyDB import sys, time, random, os from dice import dice import database die = dice() # Main Loop while True: ''' diePreference = input('What die do you to roll?: ') scorePreference = input('Enter ability score want: ') die.roll(int(diePreference), int(scorePreference)) ''' ''' database.set('test', 'This is a test\n') database.set('test', 'to see if we can do multiple lines') break ''' ''' test = database.getlist('test', ' ') # Stuff in your inventory inventory = database.getlist('inventory', ' ') # Stuff on the top bar topBar = database.getlist('topBar', ' ') # Main ability scores abilityScores = database.getlist('abilityScores', ' ')
def test_dice(self): args = [98, 112] expecteds = [(3, 5), (4, 6)] for arg, expected in zip(args, expecteds): self.assertEqual(expected, dice(arg))
import dice dice.dice(0)
def activate(): flag = False # 是否有未激活的部件 # 遍历六个部件 看有没有拥有且没激活的 for i in range(6): if vars.Construct[vars.ConstructMap[ i + 1]] and not vars.Activate[vars.ConstructMap[i + 1]]: # 找到 if not flag: # 如果是第一个 就在前面加上提示语 print("你想激活哪个神之部件?") flag = True print("{}. {}".format(i + 1, vars.ConstructMap[i + 1])) if flag == 0: # 如果没有找到能激活的 print("没有找到能激活的部件。") return else: choice = int(input()) # 用户选择 print("即将激活 {} 。".format(vars.ConstructMap[choice])) # 开始游戏*2 for i in range(2): # 编号说明 print("第 {} 个激活通道的位置编号:".format(i + 1)) print(" 1 | 2 | 3 \n" " 4 | 5 | 6 \n" "-----------\n" " * | * | * \n") # 开始游戏 field = ['_', '_', '_', '_', '_', '_'] result = ['*', '*', '*'] tot = 0 energy = 0 while tot < 6: tot += 2 # 掷骰子 nums = dice.dice(2) place_choice = input("掷得骰子:{} 和 {} \n" "你想放在什么地方?(输入两个数字 以空格隔开)".format( nums[0], nums[1])).split(" ") field[int(place_choice[0]) - 1] = nums[0] field[int(place_choice[1]) - 1] = nums[1] # 更新结果 if field[0] != '_' and field[3] != "_": result[0] = field[0] - field[3] if field[1] != '_' and field[4] != "_": result[1] = field[1] - field[4] if field[2] != '_' and field[5] != "_": result[2] = field[2] - field[5] # 输出情况 print("当前激活通道的状况:\n" + " {} | {} | {} \n".format(field[0], field[1], field[2]) + " {} | {} | {} \n".format(field[3], field[4], field[5]) + "-----------\n" + " {} | {} | {} \n".format(result[0], result[1], result[2])) # 判断是否有特殊情况 flag = False # 是否出现过特殊现象 for j in range(3): if result[j] == 0: flag = True print("第 {} 列出现零解。".format(j + 1)) tot -= 2 field[j] = field[j + 3] = '_' result[j] = '*' elif type(result[j]) == int and result[j] < 0: flag = True print("第 {} 列出现负解。零件走火,伤害一点生命值。".format(j + 1)) life.life_minus(1) # 如果有特殊 就再输出一下 if flag: print( "当前激活通道的状况:\n" + " {} | {} | {} \n".format(field[0], field[1], field[2]) + " {} | {} | {} \n".format(field[3], field[4], field[5]) + "-----------\n" + " {} | {} | {} \n".format(result[0], result[1], result[2])) # 计算能量 for j in range(3): if result[j] == 4: energy += 1 elif result[j] == 5: energy += 2 if energy >= 4: print("成功激活 {} 。另外获得 {} 点上帝之手能量。".format(vars.ConstructMap[choice], energy - 4)) vars.Activate[vars.ConstructMap[choice]] = True vars.GodsHand += energy - 4 break else: print("能量总量 {} ,能量不足,激活失败。".format(energy)) if i == 0: print("耗费一天开启第二个激活通道。") elif i == 1: print("未成功激活。耗费一天时间强行激活。") vars.Activate[vars.ConstructMap[choice]] = True utime.time_add(1)
def main(argv): # Read parameters # mosaic and layer prefix imagePrefix = argv[1] patch_size = int(argv[2]) dataDir = argv[3] # outputFileDir should contain a file with the names of all images called "allFilesList.txt" allImagesFile = "allFilesList.txt" seasonPrefix = "wm" #this can be used to make the patches here, not active, not tested numMosaics = 7 #for i in range(1,numMosaics+1): # Break mosaics into patches, join all patches in the same folder #patch.main(["",imagePrefix+str(i),patch_size,outputDir,outputPrefix+str(i),str(i==1)],csvFileName) #now,we have all the patches and have one single file with all the names of the files # now break into files with the files of one mosaic each # Store the names of the files in a list validFileNameList = [] for i in range(1, numMosaics + 1): validFileName = "valid" + seasonPrefix + str(i) + ".txt" outF = open(dataDir + validFileName, "w") validFileNameList.append(validFileName) createValidationFile(dataDir + allImagesFile, seasonPrefix + str(i), outF) outF.close() #initialize the list of mosaics and make a list with their shapes shapeX = {} shapeY = {} image = {} for pref in imagePrefixes: imageDir = "" for x in imagePrefix.split("/")[:-1]: imageDir = imageDir + x + "/" image[pref] = cv2.imread(imageDir + pref + ".jpg", cv2.IMREAD_COLOR) print("Image " + imageDir + pref + ".jpg") shapeX[pref] = image[pref].shape[0] shapeY[pref] = image[pref].shape[1] #create a blank image for each layer of each mosaic layerList = [] i = 0 for pref in imagePrefixes: layerList.append([]) for x in range(len(layerNames)): layerList[i].append( np.zeros((shapeX[pref], shapeY[pref]), dtype=np.uint8)) i += 1 #now, take every mosaic, exclude it and train a Unet model with the images of all other mosaics # to do this, simply use the corresponding validationFile as the validation file in the unet compute = True lrValues = [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.0001, 0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.00001, 0.00002, 0.00003, 0.00004, 0.00005, 0.00006, 0.00007, 0.00008, 0.00009 ] for lr in lrValues: print("Starting with LR " + str(lr)) for i in range(1, numMosaics + 1): mosaicName = seasonPrefix + str(i) print("Starting Unet computation with mosaic " + mosaicName) learn = trainUnet(validFileNameList[i - 1], mosaicName, dataDir, compute, lr) #now, for the trained model, # take all the files in the validation list # Predict each file pref = imagePrefixes[i - 1] f = open(dataDir + validFileNameList[i - 1], "r") for line in f: #predict image imName = line.strip() img = open_image(dataDir + "images/" + imName) patchNumber = int(line.split("h")[1].split("R")[0]) #print("patch "+str(patchNumber)) #predicted:1) class, 2) id (in this case pixel labels), 3) probabilities pred_class, pred_idx, outputs = learn.predict(img) #print("predicate shape "+str(pred_idx.shape)+" "+str(pred_idx)) predicate = pred_idx.reshape( (pred_idx.shape[1], pred_idx.shape[2])) #print("predicate shape "+str(predicate.shape)) numStepsX = int(shapeX[pref] / patch_size) numStepsY = int(shapeY[pref] / patch_size) for l in codes: if l not in ["Void"]: #print("Code "+str(l)) #now, paint the information of each patch in the layer where it belongs xJump = patchNumber // numStepsY yJump = patchNumber % numStepsY #print("imageDict[pref]"+str(imageDict[pref])) #print("layerDict[l]"+str(layerDict[l])) currentLayerIm = layerList[imageDict[pref]][ layerDict[l]] patch = np.zeros( (predicate.shape[0], predicate.shape[1]), dtype=np.uint8) patch.fill(0) for a in range(predicate.shape[0]): for b in range(predicate.shape[1]): if predicate[a][b] == layerDict[l]: #print("found a pixel "+str(a)+" "+str(b)+" "+str(l)) patch[a][b] = 255 #patch[predicate==layerDict[l]]=255 #cv2.imwrite("./outImg/REALPATCH"+str(patchNumber)+".jpg",patch) #cv2.imwrite(outputDir+pref+"patch"+str(patchNumber)+l+".jpg",patch) replaceImagePatch(currentLayerIm, xJump * patch_size, yJump * patch_size, patch_size, patch) for l in codes: if l not in ["Void"]: # We are finished, store all layer images predictedLayer = layerList[imageDict[pref]][layerDict[l]] cv2.imwrite( outputDir + pref + "UnetMaskLayer" + str(l) + ".jpg", predictedLayer) #Also output DICE #load manual annotation manualLayer = cv2.imread( imageDir + pref + "layer" + str(layerDict[l]) + ".jpg", cv2.IMREAD_GRAYSCALE) if manualLayer is not None: manualLayer = np.invert(manualLayer) currentDice = dice.dice(predictedLayer, manualLayer) print( "*******************************************dice Unet " + l + " " + str(currentDice)) else: print( "*******************************************dice Unet " + l + " NO LAYER ")
def test_unknown_gkey(self): schema = {'groupby': {'gkey': 'submissions'}} with self.assertRaises(KeyError): dice(data, schema)
#use the dice module to simulate throwing 2 dice and return the values you get from the dice. import dice count=0 while count<=1: print(dice.dice()) count+=1
def __init__(self): for i in range(0, 5): self.dice.append(dice(6)) for d in self.dice: d.roll()
def test_constant_roll(self): # This fails so it is "none" actual = dice('1234') assert actual is None
parse_down(dmg_menu_user_input, all_dice) all_dice.pick_your_poison("dmg", current_player) # Main Start of Program if __name__ == '__main__': # sets window size of terminal cmd = 'mode 66,40' os.system(cmd) # logging.basicConfig(filename='logfile.log', level=logging.DEBUG) chosen_character = pick_your_character() os.system("cls") # logging.debug('pick_your_character() has finished.') current_player = player.player(chosen_character) # logging.debug('player.player() initiated.') all_dice = dice.dice() # logging.debug('dice.dice() initiated.') # intro_banner() os.system("cls") # logging.debug('intro_banner has finished.') # list of all the traits and skills traits_ls = [ 'agility', 'smarts', 'spirit', 'strength', 'vigor', 'athletics', 'battle', 'boating', 'common_knowledge', 'driving', 'electronics', 'faith', 'fighting', 'focus', 'gambling', 'hacking', 'healing', 'intimidation', 'language', 'notice', 'occult', 'performance', 'persuasion', 'piloting', 'psionics', 'repair', 'research', 'riding', 'science', 'shooting', 'spellcasting', 'stealth', 'survival', 'taunt', 'thievery', 'weird_science' ] menu_options = [
def test_error_non_select_on_dictionary(self): schema = {'keys': ['title', 'author']} with self.assertRaises(TypeError): dice(select_data, schema)
#!/usr/bin/env python import argparse import json from dice import dice parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('data_file') parser.add_argument('--schema', default='schema.json') opts = parser.parse_args() with open(opts.data_file) as f: data = json.load(f) with open(opts.schema, 'r') as f: schema = json.load(f) print(json.dumps(dice(data, schema), indent=2))
def test_where_gt(self): schema = {'where': 'difficulty>5', 'keys': ['slug']} expected = ['reverse-string', 'word-search', 'wordy'] self.assertEqual(dice(data, schema), expected)
def initiative(self): return dice(1, 20) + 6
def test_error_select_on_list(self): schema = {'select': 'slug'} with self.assertRaises(TypeError): dice(data, schema)
def main(): dices = [dice(),dice(),dice(),dice()]
def __init__(self, data): Treasure.__init__(self, data) self.coins = [] for c in self.data["coins"]: split_die = c.split(';') self.coins.append([dice.dice(split_die[0]), split_die[1]])
from player import player from dice import dice name = input("p1 name: ") money= int(input("p1 money:")) print("\n") name2=input("p2 name: ") money2=int(input("p2 money: ")) p1= player(name,money) p2=player(name2,money2) dices = dice() while(True): print("\n") amount=int(input("amount to bet: ")) dices.roll() print("\n") player=int(input("which player lost? ")) if player ==1: p1.loss(amount) p2.won(amount) else: p2.loss(amount) p1.won(amount)
def roll_treasure(self): rolls = dice.dice(self.die).roll() items = [] for i in range(int(rolls)): items.append(random.choice(self.objects[self.num])) return ", ".join(items) + " (" + self.num + " gp each)"
from dice import dice dice_1 = dice() dice_2 = dice() print(f"Dice 1: {dice_1}\nDice 2: {dice_2}")
def roll_treasure(self): rolls = dice.dice(self.die).roll() items = [] for i in range(int(rolls)): items.append(self.magic.generate_treasure(self.table)) return ", ".join(items)
import dice import compare print('Instructions:') print('You have 10 tries to guess what the number I am thinking of is') print('My number is between 1 and 100') print('Type q to quit') print() print() while True: # initialize answer, try counter, and guess answer = dice.dice() tries = 0 guess = None while guess != answer: guess = raw_input('What number am I thinking of: ') #checks value if can be converted into an integer #and converts it if possible if guess.isdigit(): guess = int(guess) #checks value if it contains ANY letters in it if type(guess) == str: if guess.isalpha(): guess = str(guess) if guess == 'q': lol = 'lmfao' else: print('This command is invalid')
def __init__(self, user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num, reuse): with tf.variable_scope('DinNet', reuse=reuse): self.u = tf.placeholder(tf.int32, [ None, ]) # [B] self.i = tf.placeholder(tf.int32, [ None, ]) # [B] self.j = tf.placeholder(tf.int32, [ None, ]) # [B] self.y = tf.placeholder(tf.int32, [ None, ]) # [B] self.hist_i = tf.placeholder(tf.int32, [None, None]) # [B, T] self.sl = tf.placeholder(tf.int32, [ None, ]) # [B] self.lr = tf.placeholder(tf.float64, []) hidden_units = 128 item_emb_w = tf.get_variable("item_emb_w", [item_count, hidden_units // 2]) item_b = tf.get_variable("item_b", [item_count], initializer=tf.constant_initializer(0.0)) cate_emb_w = tf.get_variable("cate_emb_w", [cate_count, hidden_units // 2]) cate_list = tf.convert_to_tensor(cate_list, dtype=tf.int64) ic = tf.gather(cate_list, self.i) i_emb = tf.concat(values=[ tf.nn.embedding_lookup(item_emb_w, self.i), tf.nn.embedding_lookup(cate_emb_w, ic), ], axis=1) i_b = tf.gather(item_b, self.i) jc = tf.gather(cate_list, self.j) j_emb = tf.concat([ tf.nn.embedding_lookup(item_emb_w, self.j), tf.nn.embedding_lookup(cate_emb_w, jc), ], axis=1) j_b = tf.gather(item_b, self.j) hc = tf.gather(cate_list, self.hist_i) h_emb = tf.concat([ tf.nn.embedding_lookup(item_emb_w, self.hist_i), tf.nn.embedding_lookup(cate_emb_w, hc), ], axis=2) if USE_RNN: rnn_outputs, _ = dynamic_rnn(GRUCell(hidden_units), inputs=h_emb, sequence_length=self.sl, dtype=tf.float32, scope='gru1') hist_i = attention(i_emb, rnn_outputs, self.sl) else: hist_i = attention(i_emb, h_emb, self.sl) #-- attention end --- hist_i = tf.layers.batch_normalization(inputs=hist_i) hist_i = tf.reshape(hist_i, [-1, hidden_units], name='hist_bn') hist_i = tf.layers.dense(hist_i, hidden_units, name='hist_fcn') u_emb_i = hist_i if USE_RNN: hist_j = attention(j_emb, rnn_outputs, self.sl) else: hist_j = attention(j_emb, h_emb, self.sl) #-- attention end --- hist_j = tf.layers.batch_normalization(inputs=hist_j, reuse=True) hist_j = tf.reshape(hist_j, [-1, hidden_units], name='hist_bn') hist_j = tf.layers.dense(hist_j, hidden_units, name='hist_fcn', reuse=True) u_emb_j = hist_j print('shapes:') print( f'(u_emb_i, u_emb_j, i_emb, j_emb) -> ({u_emb_i.get_shape().as_list()}, {u_emb_j.get_shape().as_list()}, {i_emb.get_shape().as_list()}, {j_emb.get_shape().as_list()})' ) #-- fcn begin ------- din_i = tf.concat([u_emb_i, i_emb], axis=-1) din_i = tf.layers.batch_normalization(inputs=din_i, name='b1') if USE_DICE: d_layer_1_i = tf.layers.dense(din_i, 80, activation=None, name='f1') d_layer_1_i = dice(d_layer_1_i, name='dice_1') d_layer_2_i = tf.layers.dense(d_layer_1_i, 40, activation=None, name='f2') d_layer_2_i = dice(d_layer_2_i, name='dice_2') else: d_layer_1_i = tf.layers.dense(din_i, 80, activation=tf.nn.sigmoid, name='f1') d_layer_2_i = tf.layers.dense(d_layer_1_i, 40, activation=tf.nn.sigmoid, name='f2') #if u want try dice change sigmoid to None and add dice layer like following two lines. You can also find model_dice.py in this folder. d_layer_3_i = tf.layers.dense(d_layer_2_i, 1, activation=None, name='f3') din_j = tf.concat([u_emb_j, j_emb], axis=-1) din_j = tf.layers.batch_normalization(inputs=din_j, name='b1', reuse=True) if USE_DICE: d_layer_1_j = tf.layers.dense(din_j, 80, activation=None, name='f1', reuse=True) d_layer_1_j = dice(d_layer_1_j, name='dice_1') d_layer_2_j = tf.layers.dense(d_layer_1_j, 40, activation=None, name='f2', reuse=True) d_layer_2_j = dice(d_layer_2_j, name='dice_2') else: d_layer_1_j = tf.layers.dense(din_j, 80, activation=tf.nn.sigmoid, name='f1', reuse=True) d_layer_2_j = tf.layers.dense(d_layer_1_j, 40, activation=tf.nn.sigmoid, name='f2', reuse=True) d_layer_3_j = tf.layers.dense(d_layer_2_j, 1, activation=None, name='f3', reuse=True) d_layer_3_i = tf.reshape(d_layer_3_i, [-1]) d_layer_3_j = tf.reshape(d_layer_3_j, [-1]) x = i_b - j_b + d_layer_3_i - d_layer_3_j # [B] self.logits = i_b + d_layer_3_i # prediciton for selected items # logits for selected item: item_emb_all = tf.concat( [item_emb_w, tf.nn.embedding_lookup(cate_emb_w, cate_list)], axis=1) item_emb_sub = item_emb_all[:predict_ads_num, :] item_emb_sub = tf.expand_dims(item_emb_sub, 0) item_emb_sub = tf.tile(item_emb_sub, [predict_batch_size, 1, 1]) hist_sub = attention_multi_items(item_emb_sub, h_emb, self.sl) #-- attention end --- hist_sub = tf.layers.batch_normalization(inputs=hist_sub, name='hist_bn', reuse=tf.AUTO_REUSE) hist_sub = tf.reshape(hist_sub, [-1, hidden_units]) hist_sub = tf.layers.dense(hist_sub, hidden_units, name='hist_fcn', reuse=tf.AUTO_REUSE) u_emb_sub = hist_sub item_emb_sub = tf.reshape(item_emb_sub, [-1, hidden_units]) din_sub = tf.concat([u_emb_sub, item_emb_sub], axis=-1) din_sub = tf.layers.batch_normalization(inputs=din_sub, name='b1', reuse=True) d_layer_1_sub = tf.layers.dense(din_sub, 80, activation=tf.nn.sigmoid, name='f1', reuse=True) d_layer_2_sub = tf.layers.dense(d_layer_1_sub, 40, activation=tf.nn.sigmoid, name='f2', reuse=True) d_layer_3_sub = tf.layers.dense(d_layer_2_sub, 1, activation=None, name='f3', reuse=True) d_layer_3_sub = tf.reshape(d_layer_3_sub, [-1, predict_ads_num]) self.logits_sub = tf.sigmoid(item_b[:predict_ads_num] + d_layer_3_sub) self.logits_sub = tf.reshape(self.logits_sub, [-1, predict_ads_num, 1]) #-- fcn end ------- self.mf_auc = tf.reduce_mean(tf.to_float(x > 0)) self.score_i = tf.sigmoid(i_b + d_layer_3_i) self.score_j = tf.sigmoid(j_b + d_layer_3_j) self.score_i = tf.reshape(self.score_i, [-1, 1]) self.score_j = tf.reshape(self.score_j, [-1, 1]) self.p_and_n = tf.concat([self.score_i, self.score_j], axis=-1) print(f'p_and_n -> {self.p_and_n.get_shape().as_list()}') # Step variable self.global_step = tf.Variable(0, trainable=False, name='global_step') self.global_epoch_step = tf.Variable(0, trainable=False, name='global_epoch_step') self.global_epoch_step_op = tf.assign(self.global_epoch_step, self.global_epoch_step + 1) self.loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits( logits=self.logits, # labels=self.y) labels=tf.cast(self.y, tf.float32))) self.trainable_params = tf.trainable_variables() self.opt = tf.train.GradientDescentOptimizer(learning_rate=self.lr) self.gradients = tf.gradients(self.loss, self.trainable_params) self.clip_gradients, _ = tf.clip_by_global_norm(self.gradients, 5) self.train_op = self.opt.apply_gradients( zip(self.clip_gradients, self.trainable_params), global_step=self.global_step)
def roll_treasure(self): rolls = dice.dice(self.die).roll() items = [] for i in range(int(rolls)): items.append(random.choice(self.objects[self.num])) return ", ".join(items)+" ("+self.num+" gp each)"
def DIN(i, j, y, hist_i, sl, item_count, cate_count, cate_list, reuse, is_training): with tf.variable_scope('DinNet', reuse=reuse): hidden_units = 128 item_emb_w = tf.get_variable("item_emb_w", [item_count, hidden_units // 2]) item_b = tf.get_variable("item_b", [item_count], initializer=tf.constant_initializer(0.0)) cate_emb_w = tf.get_variable("cate_emb_w", [cate_count, hidden_units // 2]) cate_list = tf.convert_to_tensor(cate_list, dtype=tf.int64) ic = tf.gather(cate_list, i) i_emb = tf.concat(values=[ tf.nn.embedding_lookup(item_emb_w, i), tf.nn.embedding_lookup(cate_emb_w, ic), ], axis=1) i_b = tf.gather(item_b, i) jc = tf.gather(cate_list, j) j_emb = tf.concat([ tf.nn.embedding_lookup(item_emb_w, j), tf.nn.embedding_lookup(cate_emb_w, jc), ], axis=1) j_b = tf.gather(item_b, j) hc = tf.gather(cate_list, hist_i) h_emb = tf.concat([ tf.nn.embedding_lookup(item_emb_w, hist_i), tf.nn.embedding_lookup(cate_emb_w, hc), ], axis=2) if USE_RNN: rnn_outputs, _ = dynamic_rnn(GRUCell(hidden_units), inputs=h_emb, sequence_length=sl, dtype=tf.float32, scope='gru1') hist_i = attention(i_emb, rnn_outputs, sl) else: hist_i = attention(i_emb, h_emb, sl) #-- attention end --- hist_i = tf.layers.batch_normalization(inputs=hist_i) hist_i = tf.reshape(hist_i, [-1, hidden_units], name='hist_bn') hist_i = tf.layers.dense(hist_i, hidden_units, name='hist_fcn') u_emb_i = hist_i if USE_RNN: hist_j = attention(j_emb, rnn_outputs, sl) else: hist_j = attention(j_emb, h_emb, sl) #-- attention end --- # hist_j = tf.layers.batch_normalization(inputs = hist_j) hist_j = tf.layers.batch_normalization(inputs=hist_j, reuse=True) hist_j = tf.reshape(hist_j, [-1, hidden_units], name='hist_bn') hist_j = tf.layers.dense(hist_j, hidden_units, name='hist_fcn', reuse=True) u_emb_j = hist_j print('shapes:') print( f'(u_emb_i, u_emb_j, i_emb, j_emb) -> ({u_emb_i.get_shape().as_list()}, {u_emb_j.get_shape().as_list()}, {i_emb.get_shape().as_list()}, {j_emb.get_shape().as_list()})' ) #-- fcn begin ------- din_i = tf.concat([u_emb_i, i_emb], axis=-1) din_i = tf.layers.batch_normalization(inputs=din_i, name='b1') if USE_DICE: d_layer_1_i = tf.layers.dense(din_i, 80, activation=None, name='f1') d_layer_1_i = dice(d_layer_1_i, name='dice_1') d_layer_2_i = tf.layers.dense(d_layer_1_i, 40, activation=None, name='f2') d_layer_2_i = dice(d_layer_2_i, name='dice_2') else: d_layer_1_i = tf.layers.dense(din_i, 80, activation=tf.nn.sigmoid, name='f1') d_layer_2_i = tf.layers.dense(d_layer_1_i, 40, activation=tf.nn.sigmoid, name='f2') #if u want try dice change sigmoid to None and add dice layer like following two lines. You can also find model_dice.py in this folder. d_layer_3_i = tf.layers.dense(d_layer_2_i, 1, activation=None, name='f3') din_j = tf.concat([u_emb_j, j_emb], axis=-1) din_j = tf.layers.batch_normalization(inputs=din_j, name='b1', reuse=True) if USE_DICE: d_layer_1_j = tf.layers.dense(din_j, 80, activation=None, name='f1', reuse=True) d_layer_1_j = dice(d_layer_1_j, name='dice_1') d_layer_2_j = tf.layers.dense(d_layer_1_j, 40, activation=None, name='f2', reuse=True) d_layer_2_j = dice(d_layer_2_j, name='dice_2') else: d_layer_1_j = tf.layers.dense(din_j, 80, activation=tf.nn.sigmoid, name='f1', reuse=True) d_layer_2_j = tf.layers.dense(d_layer_1_j, 40, activation=tf.nn.sigmoid, name='f2', reuse=True) d_layer_3_j = tf.layers.dense(d_layer_2_j, 1, activation=None, name='f3', reuse=True) d_layer_3_i = tf.reshape(d_layer_3_i, [-1]) d_layer_3_j = tf.reshape(d_layer_3_j, [-1]) x = i_b - j_b + d_layer_3_i - d_layer_3_j # [B] logits = i_b + d_layer_3_i logits = tf.sigmoid(logits) if not is_training else logits return logits
def diceig(self): dice.dice()
def throw(): print(dice.dice())