def run_full(): splits = get_crossval_data() X = splits[0][0] + splits[1][0] Y1 = splits[0][1] + splits[1][1] Y2 = splits[0][2] + splits[1][2] test_data = get_test_data() remove_features_rfc = [19,20,34] remove_features_lr = [19,20,21,22,23,24,25,26,29,30,31,32,34] not_useful_rfc = [8,11,22,24,28,33,30,31,32]#9,21#30,31,32 remove_features_rfc.extend(not_useful_rfc) not_useful_lr = [3,4,9,11,14,15,16,17,27,28,30,31,32] remove_features_lr.extend(not_useful_lr) z = [True] * len(X[0]) w = [True] * len(X[0]) for i in remove_features_rfc: z[i] = False for i in remove_features_lr: w[i] = False C = 0.03 #C = 0.3 m1 = Model(compress=z, has_none=w, C=C) m1.fit(X, Y1) final = False results = run_model(m1, None, test_data, is_final=final) if not final: print evaluate_test_results(results)
def __init__(self,name,updateRateHz,messagingbus,sendmessagesto): print "Instantiating Force & Moment Test Model ",name # Call superclass constructor Model.__init__(self,name,updateRateHz,messagingbus,sendmessagesto) # Member variables ---------------------------------------------------- # Inputs self.timeOn = 0.0 self.timeOff = 0.0 self.forceStationInput = mat('0.0;0.0;0.0') self.momentStationInput = mat('0.0;0.0;0.0') self.forceStation = mat('0.0;0.0;0.0') self.momentStation = mat('0.0;0.0;0.0') # Register Input Parameters ------------------------------------------- # Input Parameter Name, Member Variable Name, Example of Type) self.registerInputParam('forceStation', 'forceStationInput', self.forceStationInput ) self.registerInputParam('momentStation', 'momentStationInput', self.momentStationInput) self.registerInputParam('timeOn', 'timeOn', self.timeOn) self.registerInputParam('timeOff', 'timeOff', self.timeOff)
def train(training_list, model_path, format, is_crf=True, grid=False): # Read the data into a Note object notes = [] for txt, con in training_list: note_tmp = Note(format) # Create Note note_tmp.read(txt, con) # Read data into Note notes.append(note_tmp) # Add the Note to the list # file names if not notes: print 'Error: Cannot train on 0 files. Terminating train.' return 1 # Create a Machine Learning model model = Model(is_crf=is_crf) # Train the model using the Note's data model.train(notes, grid) # Pickle dump print 'pickle dump' with open(model_path, "wb") as m_file: pickle.dump(model, m_file)
def main(): # NOTE: Cutoff can either be a single integer or it # can be a dictionary where the keys are two-tuples # of atomic numbers (e.g. (40,13)=3.5 for Zr,Al). modelfile = sys.argv[1] submodelfile = sys.argv[2] rotatedsubmodelfile = sys.argv[3] m = Model(modelfile) try: cut = 3.5 # float(sys.argv[2]) cutoff = {} for z1 in m.atomtypes: for z2 in m.atomtypes: cutoff[(z1, z2)] = cut cutoff[(z2, z1)] = cut except: print("You didn't input a cutoff so you much define it in the code.") voronoi_3d(m, cutoff) subm = Model(submodelfile) rotsubm = Model(rotatedsubmodelfile) for atom in subm.atoms: if atom in m.atoms: rotsubm.atoms[atom.id].vp = m.atoms[m.atoms.index(atom)].vp else: print("Couldn't find atom {0} in full model!".format(atom)) icofrac(rotsubm) # rotsubm.write_real_xyz() rotsubm.write_real_xyz(rotatedsubmodelfile[:-4] + ".icofrac.real.xyz")
def __init__(self, tracks=None, devices=None, transport=None, view_scale=None, units=None, patch_bay=None): Model.__init__(self) # the file path to save to self.path = None # transport if (transport is None): transport = Transport() self.transport = transport # time scale if (view_scale is None): view_scale = ViewScale() self.view_scale = view_scale # devices if (devices is None): devices = DeviceAdapterList() self.devices = devices # a list of units on the workspace if (units is None): units = UnitList() self.units = units self.units.add_observer(self.on_change) self.units.add_observer(self.update_transport_duration) self.update_transport_duration() # a list of connections between units if (patch_bay is None): patch_bay = PatchBay() self.patch_bay = patch_bay self.patch_bay.add_observer(self.on_change)
def get(self): ''' ajax for publish network test table ''' id = self.get_argument("id", None) handler = Model('LvsPublish') result = handler.getLvsPublishOne(id) info = result['info_yaml'][result['id']] html = '' success_count = 0 fail_count = 0 for i in info['server']: address_list = i['vip_group'] protocol = str(i['protocol']) descript = i['descript'] for vip_group in address_list: ret = check_server(vip_group['vip'],vip_group['port'],protocol) if ret: html += '%s %s:%s ok\n' % (descript,vip_group['vip'],vip_group['port']) success_count += 1 else: html += '%s %s:%s fail\n' % (descript,vip_group['vip'],vip_group['port']) fail_count += 1 dict = {"success_count":success_count,"fail_count":fail_count,"html":html} self.render2('publishnetworktest.tpl',network=dict)
def post(self): id = self.get_argument("id",None) keyword = self.get_argument("keyword","") handler = Model('LvsManagerConfig') vipinstanceinfo = handler.getLvsManagerConfigVipInstanceList(id) def build(vipinstanceinfo): for row in vipinstanceinfo: for k,v in row.items(): if k=='vip_group': rs=[] for r in v: rs.append(r['vip']+':'+r['port']) row[k]="<br>".join(rs) else: if isinstance(v,unicode): row[k]=str(v.encode('utf-8')) else: row[k]=str(v) return json.dumps(vipinstanceinfo) result=[] for row in vipinstanceinfo: if str(row['descript']).find(keyword)!=-1: result.append(row) ret="" if len(result)>0: ret=build(result) else: ret=build(vipinstanceinfo) self.write(ret)
def user_is_manager(user): handler = Model('LvsAccount') user_info = handler.getAccountOne(user) if user_info['is_manager'] or user_info['is_super_manager'] : return True else: return False
def get(self): ''' show publish.html page ''' handler = Model('LvsPublish') result = handler.getLvsPublish() self.render2('publish.html',publish=result)
def test_binary_classsification_should_out_row_vector_of_0_1_only(self): model = Model([np.random.rand(4, 6), np.random.rand(1, 5)]) prediction = model.predict_binary_classification(np.random.rand(10, 5)) self.assertEqual(prediction.shape, (10, 1)) zeros = len(np.argwhere(prediction == 0)) ones = len(np.argwhere(prediction == 1)) self.assertEqual(zeros + ones, 10)
def login(name,password): ''' :param name: :param password: :return: {"status":1, "username":user,"is_manager":False,"is_super_manager":False} ''' try: ret=requests.post(API_URL+'login', {'user_name':name,'password':password}).json() if ret['status']=='1': user= ret['username'] if user: handler = Model('Account') _find_user_result = handler.getAccountOne(user) if _find_user_result: time_now = timestamptodate(time.time()) handler.UpdateAccountPrivilege(user,ret['is_manager'],ret['is_super_manager']) handler.updateAccountLogintime(user,time_now) else: time_now = timestamptodate(time.time()) user_data = {"username":user,"is_manager":ret['is_manager'],"is_super_manager":ret['is_super_manager'],"login_time":time_now,"register_time":time_now} handler.InsertAccount(user_data) return 1 else: return 0 except Exception as er: return 0
def test_scalars(self): # Create 2 inputs X = helper.make_tensor_value_info('A', TensorProto.INT32, []) Y = helper.make_tensor_value_info('B', TensorProto.INT32, []) # Create one output Z = helper.make_tensor_value_info('C', TensorProto.INT32, []) # Create a node node_def = helper.make_node('Add', ['A', 'B'], ['C']) # Create the model graph_def = helper.make_graph([node_def], "scalar-model", [X, Y], [Z]) onnx_model = helper.make_model(graph_def, producer_name='onnx-example') model = Model() model.BuildFromOnnxModel(onnx_model) schedule = model.OptimizeSchedule() schedule = schedule.replace('\n', ' ') expected_schedule = r'// Target: .+// MachineParams: .+// Delete this line if not using Generator Pipeline pipeline = get_pipeline\(\);.+Func C = pipeline.get_func\(2\);.+{.+}.+' self.assertRegex(schedule, expected_schedule) input1 = np.random.randint(-10, 10, size=()) input2 = np.random.randint(-10, 10, size=()) outputs = model.run([input1, input2]) self.assertEqual(1, len(outputs)) output = outputs[0] expected = input1 + input2 np.testing.assert_allclose(expected, output)
def initialize(self,inputParameterValues): # Inputs self.windVelNED = mat('0.0;0.0;0.0') # Received via message self.altitude = 0.0 self.dcmBodyFromNED = mat([[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]]) self.velECEFinBody = mat('0.;0.;0.') # Internal self.vwindmag = 0.0 self.speedofsound = 0.0 # Outputs self.mach = 0.0 self.alpha = 0.0 self.beta = 0.0 self.density = 0.0 self.pressure = 0.0 self.temperature = 0.0 self.uvw = mat('0.0;0.0;0.0') self.qbar = 0.0 self.qalpha = 0.0 # Call the base class initialize function, which sets all the input params Model.initialize(self,inputParameterValues)
def loadXML(self, path): Model.loadXML(path) view.tree.Flush() view.tree.SetPyData(view.tree.root, Model.mainNode) self.setData(view.tree.root) if g.conf.expandOnOpen: view.tree.ExpandAll()
def test_small_model(self): # Create one input X = helper.make_tensor_value_info('IN', TensorProto.FLOAT, [2, 3]) # Create one output Y = helper.make_tensor_value_info('OUT', TensorProto.FLOAT, [2, 3]) # Create a node node_def = helper.make_node('Abs', ['IN'], ['OUT']) # Create the model graph_def = helper.make_graph([node_def], "test-model", [X], [Y]) onnx_model = helper.make_model(graph_def, producer_name='onnx-example') model = Model() model.BuildFromOnnxModel(onnx_model) schedule = model.OptimizeSchedule() schedule = schedule.replace('\n', ' ') expected_schedule = r'// Target: .+// MachineParams: .+// Delete this line if not using Generator Pipeline pipeline = get_pipeline\(\);.+Func OUT = pipeline.get_func\(1\);.+{.+}.+' self.assertRegex(schedule, expected_schedule) input = np.random.rand(2, 3) - 0.5 outputs = model.run([input]) self.assertEqual(1, len(outputs)) output = outputs[0] expected = np.abs(input) np.testing.assert_allclose(expected, output)
def __init__(self, num_decs, num_objs, ga_func, model_instance, initial_pop=None): Model.__init__(self) self.initialize_decs(num_decs) self.num_objs = num_objs self.ga_func = ga_func self.model_instance = model_instance self.initial_pop = initial_pop
def get_clusters_with_n_numneighs(self,cutoff,numneighs,cluster_types): m = Model(self.model.comment, self.model.lx, self.model.ly, self.model.lz, self.model.atoms[:]) m.generate_neighbors(cutoff) vp_atoms = [] #print(cluster_types) neighs = [[]]*m.natoms for i,atom in enumerate(m.atoms): if(atom.vp.type in cluster_types): vp_atoms.append(atom.copy()) numfound = 0 for i,atomi in enumerate(vp_atoms): for j,atomj in enumerate(vp_atoms[vp_atoms.index(atomi)+1:]): # Get all the neighbors they have in common #common_neighs = [n for n in atomi.neighs if n in atomj.neighs if n.vp.type not in cluster_types] common_neighs = [n for n in atomi.neighs if n in atomj.neighs] if(len(common_neighs) >= numneighs): ind = m.atoms.index(atomi) neighs[ind] = neighs[ind] + copy.copy([x for x in common_neighs if x not in neighs[ind]]) ind = m.atoms.index(atomj) neighs[ind] = neighs[ind] + copy.copy([x for x in common_neighs if x not in neighs[ind]]) for n in common_neighs: ind = m.atoms.index(n) neighs[ind] = neighs[ind] + [x for x in [atomi,atomj] if x not in neighs[ind]] numfound += 1 for i,tf in enumerate(neighs): m.atoms[i].neighs = tf m.check_neighbors() print('Total number of {0} atoms: {1}'.format(cluster_types,len(vp_atoms))) print('Total number of {2}-sharing {0} atoms: {1}'.format(cluster_types,numfound,numneighs)) # Now I should be able to go through the graph/model's neighbors. return self.search(m,cluster_types)
def testErrorOnCategoryNotAllowed(self): mockParser = MockParser([Event('Unsupportd', 'new-type', datetime.date(2012, 12, 10))]) filter = SimpleEventFilter() model = Model(mockParser, filter, filter, []) self.assertRaises(Exception, model.getEventsForDate, datetime.date(2012, 12, 10)) model.allowedTypes = ['new-type'] model.getEventsForDate(datetime.date(2012, 12, 10))
def __init__(self, attrs = {}): self.name = '' self._klasses = None self._fee = 0 self._alt_fee = 0 self._for_user = 0 Model.__init__(self,attrs)
def main(): # sys.argv == [categorize_parameters.txt, modelfile] if(len(sys.argv) <= 2): sys.exit("\nERROR! Fix your inputs!\n\nArg 1: input param file detailing each voronoi 'structure'.\nShould be of the form:\nCrystal:\n 0,2,8,*\n\nArg2: a model file.\n\nOutput is printed to screen.") paramfile = sys.argv[1] modelfiles = sys.argv[2:] from cutoff import cutoff vp_dict = load_param_file(paramfile) m0 = Model(modelfiles[0]) m0.generate_neighbors(cutoff) voronoi_3d(m0, cutoff) set_atom_vp_types(m0, vp_dict) stats0 = VPStatistics(m0) print(modelfiles[0]) #stats0.print_indexes() stats0.print_categories() return if len(modelfiles) > 1: for modelfile in modelfiles[1:]: print(modelfile) m = Model(modelfile) voronoi_3d(m, cutoff) set_atom_vp_types(m, vp_dict) stats = VPStatistics(m) stats.print_categories()
def main(): #list = [0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39,40,41,42,43,44,45,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90,92,93,94,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,258,259,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,278,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,476,477,478,479,480,481,482,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,508,509,510,511,512,513,514,515,516,517,518,519,520,521,523,524,526,528,529,530,531,532,534,535,536,537,538,539,540,541,542,543,544,545,546,547,548,549,550,551,552,553,554,555,556,557,558,559,560,561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,589,590,592,593,594,596,597,598,599,600,601,603,604,605,606,607,608,609,610,611,612,613,614,615,616,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,646,647,648,649,650,651,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,670,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700,701,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756,757,758,759,761,762,763,764,765,766,767,768,769,770,772,773,774,775,776,778,779,781,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840,841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,862,863,864,865,866,867,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,895,896,897,898,899,900,901,902,903,904,905,906,907,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924,925,926,927,928,929,930,931,933,934,935,936,937,938,939,940,941,942,943,944,946,947,948,949,950,951,952,953,954,955,956,957,958,959,960,961,963,964,965,966,967,968,969,970,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1006,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1017,1018,1019,1020,1021,1022,1023,1024,1025,1026,1027,1028,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1070,1071,1072,1073,1074,1075,1076,1077,1078,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094,1095,1097,1098,1099,1100,1101,1102,1103,1104,1105,1106,1107,1108,1110,1111,1113,1114,1115,1116,1117,1118,1119,1120,1121,1122,1123,1124,1125,1127,1128,1129,1130,1131,1132,1133,1134,1135,1136,1137,1138,1139,1140,1141,1142,1143,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1155,1156,1157,1158,1159,1160,1161,1162,1163,1164,1165,1166,1167,1168,1169,1170,1171,1172,1173,1174,1175,1176,1177,1178,1179,1180,1181,1182,1183,1184,1185,1186,1187,1188,1189,1190,1191,1192,1193,1194,1195,1196,1197,1198,1199,1200,1201,1202,1203,1204,1205,1206,1207,1208,1209,1210,1211,1212,1213,1214,1215,1216,1217,1218,1219,1220,1221,1222,1223,1224,1225,1226,1227,1228,1229,1230,1231,1232,1233,1234,1235,1236,1237,1238,1239,1240,1241,1242,1243,1244,1245,1246,1247,1248,1249,1250,1251,1252,1253,1254,1255,1256,1257,1258,1259,1260,1261,1262,1263,1264,1265,1266,1267,1268,1269,1270,1271,1272,1273,1274,1275,1276,1278,1279,1280,1281,1282,1283,1284,1285,1286,1287,1288,1289,1290,1291,1292,1293,1294,1295,1296,1297,1298,1299,1300,1301,1302,1303,1304,1305,1307,1308,1310,1311,1312,1313,1314,1315,1316,1317,1318,1319,1320,1321,1322,1324,1325,1326,1327,1328,1329,1330,1331,1332,1333,1334,1335,1336,1337,1338,1339,1340,1341,1342,1343,1344,1345,1346,1347,1348,1349,1350,1352,1353,1354,1355,1356,1357,1358,1359,1360,1361,1362,1363,1364,1365,1366,1367,1368,1369,1370,1371,1372,1373,1374,1375,1376,1377,1378,1379,1380,1381,1382,1383,1384,1385,1386,1387,1388,1389,1390,1391,1392,1393,1394,1395,1396,1397,1398,1399,1400,1401,1403,1404,1405,1406,1407,1408,1409,1410,1411,1412,1413,1414,1415,1417,1418,1419,1420,1421,1423,1424,1425] list = [0,1,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39,40,41,42,43,44,45,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,92,93,94,96,97,98,99,100,101,102,103,104,105,106,108,109,110,111,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,137,138,139,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,258,259,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,278,280,281,282,283,284,285,286,287,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,344,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,377,378,379,380,381,382,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,471,472,473,474,475,477,478,479,480,481,482,484,485,486,487,488,489,490,491,492,493,494,495,496,497,498,499,500,501,502,503,504,505,506,508,509,510,511,512,513,514,515,516,517,518,519,520,521,523,524,526,527,528,529,530,532,534,535,536,537,538,540,541,542,543,544,545,546,547,548,549,551,552,553,554,555,556,557,558,559,560,561,562,563,564,565,566,567,568,569,570,571,572,573,574,575,576,577,578,579,580,581,582,583,584,585,586,587,588,589,590,592,593,594,596,597,598,599,600,601,603,604,605,606,607,608,609,610,611,612,613,614,615,616,617,618,619,620,621,622,623,624,625,626,627,628,629,630,631,632,633,634,635,636,637,638,639,640,641,642,643,644,646,647,648,649,650,651,653,654,655,656,657,658,659,660,661,662,663,664,665,666,667,668,669,671,672,673,674,675,676,677,678,679,680,681,682,683,684,685,686,687,688,689,690,691,692,693,694,695,696,697,698,699,700,702,703,704,705,706,707,708,709,710,711,712,713,714,715,716,717,718,719,720,721,722,723,724,725,726,727,728,729,730,731,732,733,734,736,737,738,739,740,741,742,743,744,745,746,747,748,749,750,751,752,753,754,755,756,757,758,759,761,762,763,764,765,766,767,768,769,770,772,773,774,775,776,778,779,781,783,784,785,786,787,788,789,790,791,792,793,794,795,796,797,798,799,800,801,802,803,804,805,806,807,808,809,810,811,812,813,814,815,816,817,818,819,820,821,822,823,824,825,826,827,828,829,830,831,832,833,834,835,836,837,838,839,840,841,842,843,844,845,846,847,848,849,850,851,852,853,854,855,856,857,858,859,860,861,863,864,865,866,867,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,895,896,897,898,899,900,901,902,903,904,905,906,907,909,910,911,912,913,914,915,916,917,918,919,920,921,922,923,924,925,926,927,928,929,930,931,933,934,935,936,937,938,939,940,941,942,943,944,946,947,948,949,950,951,952,953,954,955,956,958,959,960,961,963,964,965,966,967,968,969,970,972,973,974,975,976,977,978,979,980,981,982,983,984,985,986,987,988,989,990,991,992,993,994,995,996,997,998,999,1000,1001,1002,1003,1004,1005,1007,1008,1009,1010,1011,1012,1013,1014,1015,1016,1018,1019,1021,1022,1023,1024,1025,1026,1027,1028,1030,1031,1032,1033,1034,1036,1037,1038,1039,1040,1041,1042,1043,1044,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1070,1071,1072,1073,1074,1075,1076,1077,1078,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094,1095,1096,1097,1098,1099,1100,1101,1102,1103,1104,1105,1106,1107,1108,1110,1111,1113,1114,1115,1116,1117,1118,1119,1120,1121,1122,1123,1124,1125,1127,1129,1130,1131,1132,1133,1134,1135,1136,1137,1138,1139,1140,1141,1142,1143,1144,1145,1146,1147,1148,1149,1150,1151,1152,1153,1154,1155,1156,1157,1158,1159,1161,1162,1163,1164,1165,1166,1167,1168,1169,1170,1171,1172,1173,1174,1175,1176,1177,1178,1180,1181,1182,1183,1184,1185,1186,1187,1188,1189,1190,1191,1192,1193,1194,1195,1196,1197,1198,1199,1200,1201,1202,1203,1204,1205,1206,1207,1208,1209,1210,1211,1212,1213,1214,1215,1216,1217,1218,1219,1220,1221,1222,1223,1224,1225,1226,1227,1228,1229,1230,1231,1232,1233,1234,1235,1236,1237,1238,1239,1240,1241,1242,1243,1244,1245,1246,1247,1248,1249,1250,1251,1252,1253,1254,1255,1256,1257,1258,1259,1260,1261,1262,1263,1264,1265,1266,1267,1269,1270,1271,1272,1273,1274,1275,1276,1278,1279,1280,1281,1282,1283,1284,1285,1286,1287,1288,1289,1290,1291,1292,1293,1294,1295,1296,1297,1298,1299,1300,1301,1302,1303,1304,1305,1307,1308,1310,1311,1312,1313,1314,1315,1316,1317,1318,1319,1320,1321,1322,1324,1325,1326,1327,1328,1329,1330,1331,1332,1333,1334,1335,1336,1337,1338,1339,1340,1341,1342,1343,1344,1345,1346,1347,1348,1349,1350,1352,1353,1354,1355,1356,1357,1359,1360,1361,1362,1363,1364,1365,1366,1367,1368,1369,1370,1371,1372,1373,1374,1375,1376,1377,1378,1379,1380,1381,1382,1383,1384,1385,1386,1387,1388,1389,1390,1391,1392,1393,1394,1395,1396,1397,1398,1399,1400,1401,1403,1404,1405,1406,1407,1408,1409,1410,1411,1412,1413,1414,1415,1417,1418,1419,1420,1421,1423,1424,1425] #list = [x-1 for x in list if x > 277] list1 = [] for i in list: if i >= 277: list1.append(i-1) else: list1.append(i) print(list1) list2 = range(0,1426) list3 = [x for x in list2 if x not in list1] print(list3) modelfile = sys.argv[1] m = Model(modelfile) cluster = [atom for atom in m.atoms if atom.id not in list1] #ids = [atom.id for atom in m.atoms if atom.id not in list] for atom in cluster: print(atom) cm = Model("yellow ico cluster in jwh's t3 model", m.lx, m.ly, m.lz, cluster) cm.write_cif("jwh_ico_cluster.cif")
def sample(args): with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f: saved_args = cPickle.load(f) with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f: chars, vocab = cPickle.load(f) model = Model(saved_args, True) with tf.Session() as sess: tf.initialize_all_variables().run() saver = tf.train.Saver(tf.all_variables()) ckpt = tf.train.get_checkpoint_state(args.save_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) ts = model.sample(sess, chars, vocab, args.n, args.prime, args.sample) print("Sampled Output\n") print(ts) print("Converting Text to Speech") tts = gTTS(text=ts, lang='en-uk') tts.save("ts.mp3") audio = MP3("ts.mp3") audio_length = audio.info.length print("Speaker is Getting Ready") mixer.init() mixer.music.load('ts.mp3') mixer.music.play() time.sleep(audio_length+5)
def calc_entropy(K=20): global grammar global NORMALIZE global VERBOSE global start_cat cat_priors = Model('Prior') for cat in start_cat: cat_priors[cat] = 1 normalize(cat_priors) for k in range(0,K): if VERBOSE: sys.stderr.write('Iterating ('+str(k)+'/'+str(K)+'): ') sys.stderr.write(str(cat_priors.keys())+'\n') tot = 0.0 if NORMALIZE: normalize(cat_priors) cat_likes = Model('MarginL') #actually the new prior built from likelihoods for parent in cat_priors: for child in grammar[parent]: cat_likes[child] += grammar[parent][child]*cat_priors[parent]/3.0 cat_priors = cat_likes.copy() # sum over the probability of each category to obtain the overall likelihood of the grammar_prob = 0.0 for cat in cat_priors: grammar_prob += cat_priors[cat] # output the entropy of the grammar sys.stderr.write(str(-math.log(grammar_prob))+'\n') sys.stdout.write(str(-math.log(grammar_prob))+'\n')
def __init__(self, topology, bead_repr=None): """Structure-based Model (SBM) Parameters ---------- topology : mdtraj.Topology object An mdtraj Topology object that describes the molecular topology. bead_repr : str [CA, CACB] A code specifying the desired coarse-grain mapping. The all-atom to coarse-grain mapping. Methods ------- assign_* : Methods assign which atoms have bonded constraints (angle potentials, dihedral, etc.) add_* : Methods add potentials to the Hamiltonian. """ Model.__init__(self, topology, bead_repr=bead_repr) self.Hamiltonian = potentials.StructureBasedHamiltonian() self.mapping.add_atoms()
def predict(args): with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f: saved_args = pickle.load(f) with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f: chars, vocab = pickle.load(f) with open(os.path.join(args.save_dir, 'labels.pkl'), 'rb') as f: labels = pickle.load(f) model = Model(saved_args, deterministic=True) with open(args.data_path, 'r') as f: reader = csv.reader(f) texts = list(reader) texts = map(lambda i: i[0], texts) x = map(lambda i: transform(i.strip().decode('utf8'), saved_args.seq_length, vocab), texts) with tf.Session() as sess: saver =tf.train.Saver(tf.all_variables()) ckpt = tf.train.get_checkpoint_state(args.save_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) start = time.time() results = model.predict_label(sess, labels, x) end = time.time() print 'prediction costs time: ', end - start with open(args.result_path, 'w') as f: writer = csv.writer(f) writer.writerows(zip(texts, results))
def update_ststc_customer_shop(self): query = """REPLACE INTO ststc_customer_shop(seller_nick, buyer_nick, costs, buy_times, last_buy_time) SELECT seller_nick, buyer_nick, SUM(costs) AS costs, SUM(buy_times) AS buy_times, MAX(last_buy_time) AS last_buy_time FROM ((SELECT r.nick AS seller_nick, c.nick AS buyer_nick, SUM(r.auctionPrice) AS costs, COUNT(r.uid) AS buy_times, MAX(`date`) AS last_buy_time FROM rate_list AS r LEFT JOIN customer AS c ON r.uid = c.uid WHERE c.nick=%s) UNION (SELECT r.nick AS seller_nick, c.nick AS buyer_nick, SUM(r.price) AS costs, COUNT(r.uid) AS buy_times, MAX(`date`) AS last_buy_time FROM rate_list_past AS r LEFT JOIN customer AS c ON r.uid = c.uid WHERE c.nick=%s)) AS r""" Model.execute_db(query, self.nick, self.nick)
def main(): modelfile = sys.argv[1] m = Model(modelfile) # Below is the identity rotation array. #rot_arr = [1,0,0,0,1,0,0,0,1] # Below is the correct rotation matrix for JWH t3 icofrac to get it into the orientation in his PRL paper. #rot_arr = [-0.031777, 0.998843, 0.036102, 0.986602, 0.025563, 0.161133, 0.160023, 0.040739, -0.986272] # Below is a (the?) rotation matrix for JWH t1 icofrac. #rot_arr = [ 0.954646, -0.233932, 0.184194, 0.280650, 0.913581, -0.294287, -0.099433, 0.332633, 0.937800 ] # Below is a (the?) rotation matrix of Pei's t1 that gives some planes. Oriented for a specific plane ~. #rot_arr = [ -0.977103, -0.123352, -0.173361, -0.130450, 0.990997, 0.030118, 0.168085, 0.052043, -0.984398 ] # Below is a (the?) rotation matrix of Pei's t2 that gives some planes. Oriented for a specific plane ~. rot_arr = [ 0.985478, -0.010230, -0.169493, 0.009247, 0.999936, -0.006586, 0.169549, 0.004923, 0.985509] # Below is a (the?) rotation matrix of Pei's t3 that gives some planes. Oriented for a specific plane ~. #rot_arr = [0.981624,-0.002765,-0.190808, -0.003436,0.999477,-0.032163, 0.190797,0.032228,0.981100] npra = np.asarray(rot_arr) rot(m,npra) # Write cif file to screen #m.write_cif() m.write_our_xyz()
def run(num_agents, num_items, prefs, dup_values): # Randomly generate some data for N agents and M items if prefs.dist_type == DistTypes.urand_int: m = Model.generate_urand_int(num_agents, num_items, dup_values) elif prefs.dist_type == DistTypes.urand_real: m = Model.generate_urand_real(num_agents, num_items, dup_values) elif prefs.dist_type == DistTypes.zipf_real: m = Model.generate_zipf_real(num_agents, num_items, 2., dup_values) elif prefs.dist_type == DistTypes.polya_urn_real: m = Model.generate_polya_urn_real(num_agents, num_items, 2, 1) elif prefs.dist_type == DistTypes.correlated_real: m = Model.generate_correlated_real(num_agents, num_items) else: raise Exception("Distribution type {0} is not recognized.".format(prefs.dist_type)) # Do our bounding at the root to check for naive infeasibility #is_possibly_feasible, bounding_s = bounds.max_contested_feasible(m) #if not is_possibly_feasible: # print "Bounded infeasible!" # sys.exit(-1) # Compute an envy-free allocation (if it exists) stats = allocator.allocate(m, prefs) return stats
def sim_channels_2_model(sim_data, sim_key, group_key,): # instantiate an empty model model = Model(sim_key, objectives = [], meta = {}) #debug_p('model id ' + str(model.get_model_id())) #debug_p('model num objectives before adding obj' + str(len(model.get_objectives()))) # add meta data to model model.set_meta({'sim_id': sim_data['sim_id'], 'sim_meta':sim_data['meta'], 'sim_key':sim_key, 'group_key':group_key}) for channel_code in objectives_channel_codes: # add model objective m_points = [] for sample_point in channels_sample_points[channel_code]: m_points.append(sim_data[channel_code][sample_point]) # assuming equal weights of objectives #debug_p('adding obj to simulation ' + str(sim_key)) model.add_objective(channel_code, m_points) #debug_p('model num objectives after adding obj' + str(len(model.get_objectives()))) return model
def main(): print(""" skim Copyright (C) 2014 Tanay PrabhuDesai This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. \n\n""") mdl = Model(20,5) print("Model?(y/n):") choice = input() if choice != 'n' and choice != 'N': mdl.load_map() mdl.load_model_base() text = open('./raw_input_1/sherlock.txt','r') strings = text.readlines() text.close() count = 1 for string in strings: mdl.insert_string(string) print("Inserted line: "+str(count)) count += 1 mdl.save_map() print("Start typing: ") while True: inp = input() if inp == "": break print("Predictions:") print(mdl.search_string(inp, 5))
class Train(object): def __init__(self, use_elmo=False, finetune_glove=False): self.vocab = Vocab(config.vocab_path, config.vocab_size) self.batcher = Batcher(config.train_data_path, self.vocab, mode='train', batch_size=config.batch_size, single_pass=False) self.use_elmo = use_elmo self.finetune_glove = finetune_glove time.sleep(15) self.model_dir = os.path.join(train_dir, 'model') if not os.path.exists(self.model_dir): os.mkdir(self.model_dir) self.summary_writer = tf.compat.v1.summary.FileWriter(train_dir) def save_model(self, running_avg_loss, iter): state = { 'iter': iter, 'encoder_state_dict': self.model.encoder.state_dict(), 'decoder_state_dict': self.model.decoder.state_dict(), 'reduce_state_dict': self.model.reduce_state.state_dict(), 'optimizer': self.optimizer.state_dict(), 'current_loss': running_avg_loss } model_save_path = os.path.join( self.model_dir, 'model_%d_%d' % (iter, int(time.time()))) torch.save(state, model_save_path) def setup_train(self, model_file_path=None): self.model = Model( vocab=self.vocab, model_file_path=model_file_path, is_eval=False, use_elmo=self.use_elmo, finetune_glove=self.finetune_glove, ) params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \ list(self.model.reduce_state.parameters()) initial_lr = config.lr_coverage if config.is_coverage else config.lr self.optimizer = Adagrad( params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc) start_iter, start_loss = 0, 0 if model_file_path is not None: state = torch.load(model_file_path, map_location=lambda storage, location: storage) start_iter = state['iter'] start_loss = state['current_loss'] if not config.is_coverage: self.optimizer.load_state_dict(state['optimizer']) if use_cuda: for state in self.optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): state[k] = v.cuda() return start_iter, start_loss def train_one_batch(self, batch): enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \ get_input_from_batch(batch, use_cuda) dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \ get_output_from_batch(batch, use_cuda) self.optimizer.zero_grad() encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder( enc_batch, enc_lens) s_t_1 = self.model.reduce_state(encoder_hidden) step_losses = [] for di in range(min(max_dec_len, config.max_dec_steps)): y_t_1 = dec_batch[:, di] # Teacher forcing final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder( y_t_1, s_t_1, encoder_outputs, encoder_feature, enc_padding_mask, c_t_1, extra_zeros, enc_batch_extend_vocab, coverage, di) target = target_batch[:, di] gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze() step_loss = -torch.log(gold_probs + config.eps) if config.is_coverage: step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1) step_loss = step_loss + config.cov_loss_wt * step_coverage_loss coverage = next_coverage step_mask = dec_padding_mask[:, di] step_loss = step_loss * step_mask step_losses.append(step_loss) sum_losses = torch.sum(torch.stack(step_losses, 1), 1) batch_avg_loss = sum_losses / dec_lens_var loss = torch.mean(batch_avg_loss) loss.backward() self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm) clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm) self.optimizer.step() return loss.item() def trainIters(self, n_iters, model_file_path=None): iter, running_avg_loss = self.setup_train(model_file_path) start = time.time() while iter < n_iters: # print("iteration", iter) batch = self.batcher.next_batch() loss = self.train_one_batch(batch) running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter) iter += 1 if iter % 10000 == 0: self.summary_writer.flush() print_interval = 1000 if iter % print_interval == 0: print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) logging.info( 'steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval, time.time() - start, loss)) start = time.time() if iter % 10000 == 0: self.save_model(running_avg_loss, iter)
assert parameters['tag_scheme'] in ['iob', 'iobes'] assert not parameters['all_emb'] or parameters[ 'pre_emb'] #One of the two needs to be false assert not parameters['pre_emb'] or parameters['word_dim'] > 0 assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb']) # Check evaluation script / folders if not os.path.isfile(eval_script): raise Exception('CoNLL evaluation script not found at "%s"' % eval_script) if not os.path.exists(eval_temp): os.makedirs(eval_temp) if not os.path.exists(models_path): os.makedirs(models_path) # Initialize model model = Model(parameters=parameters, models_path=models_path) print "Model location: %s" % model.model_path # Data parameters lower = parameters['lower'] zeros = parameters['zeros'] tag_scheme = parameters['tag_scheme'] # Load sentences train_sentences = loader.load_sentences(opts.train, lower, zeros) dev_sentences = loader.load_sentences(opts.dev, lower, zeros) test_sentences = loader.load_sentences(opts.test, lower, zeros) # Use selected tagging scheme (IOB / IOBES) update_tag_scheme(train_sentences, tag_scheme) update_tag_scheme(dev_sentences, tag_scheme)
config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.9 with tf.Session(config=config) as sess: timestamp = str(int(time.time())) #print("timestamp: ", time.asctime(time.localtime(time.time()))) print("timestamp: ", timestamp) model_name = "lr{}_bz{}_mg{}_{}".format(FLAGS.learning_rate, FLAGS.batch_size, FLAGS.margin, timestamp) model = Model(max_query_word=FLAGS.query_len_threshold, max_doc_word=FLAGS.doc_len_threshold, num_docs=2, word_vec_initializer=word_vectors, batch_size=FLAGS.batch_size, vocab_size=vocab_size, embedding_size=FLAGS.embedding_dim, learning_rate=FLAGS.learning_rate, filter_size=FLAGS.filter_size, keep_prob=FLAGS.keep_prob) saver = tf.train.Saver() init = tf.global_variables_initializer() sess.run(init) checkpoint_dir = './save_6000/' ckpt = tf.train.get_checkpoint_state(checkpoint_dir=checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print("save file not exits") pass
from model import Model model = Model(0.9) model.add(2) model.add(10) model.add(2) trainSet = [[[0, 1], [1, 0]], [[1, 0], [1, 0]], [[0, 0], [0, 1]], [[1, 1], [0, 1]]] model.train(trainSet, 7000) test = [[0, 1], [1, 0], [0, 0], [1, 1], [0.5, 0.5], [1, 0.5], [0.5, 1], [1, 0.2]] for i in range(len(test)): res = model.run(test[i]) diff = res[0] - res[1] if diff > 0: print('Return 1 ' + str(test[i]) + ' ' + str(int(res[0] * 100)) + '%') else: print('Return 0 ' + str(test[i]) + ' ' + str(int(res[1] * 100)) + '%')
def draw(self): pygame.draw.aaline(gameDisplay, model.RED, self.pos, self.prevPos, 3) def move(self): self.life -= 1 newPos = ((self.pos[0] + (self.move_vector[0] * model.BULLET_SPEED), self.pos[1] + (self.move_vector[1] * model.BULLET_SPEED))) self.prevPos = self.pos self.pos = newPos pygame.init() myfont = pygame.font.SysFont("monospace", 15) model = Model.Model() player = playerShip((model.DISPLAY_W / 2, model.DISPLAY_H / 2)) gameDisplay = pygame.display.set_mode((model.DISPLAY_W, model.DISPLAY_H)) pygame.display.set_caption('ASSTEROIDS') ticker = pygame.time.Clock() debug = False if len(sys.argv) > 1: if sys.argv[1] == 'debug': debug = True game_running = True bullets = []
learning_rate = 1e-4 batch_size = 1 epochs = 200 seq_len = 75 f = open(log_file, 'w') f.close() setup_gpu(gpu_id) ft = fastText.load_model('/home1/zishan/WordEmbeddings/FastText/wiki.hi.bin') labels2Idx = {'SADNESS': 0, 'FEAR/ANXIETY': 1, 'SYMPATHY/PENSIVENESS': 2, 'JOY': 3, 'OPTIMISM': 4, 'NO-EMOTION': 5, 'DISGUST': 6, 'ANGER': 7, 'SURPRISE': 8} train_generator = Generator(train_file, ft, labels2Idx, emotions_dictionary) test_generator = Generator(test_file, ft, labels2Idx, emotions_dictionary) model = Model(labels2Idx) model = model.cuda() optimiser = torch.optim.Adam( [i for i in model.parameters() if i.requires_grad], lr=learning_rate) labels = [i for i, j in labels2Idx.items()] # custom_loss = MY_LOSS() best_f1_english = 0 best_f1_hindi = 0 best_report_english = "" best_report_hindi = "" best_epoch_hindi = 0 best_epoch_english = 0
def main(): # Dataset path train_list = 'video_train_list_middle_cam0.txt' # train: cam0 + cam1 (defined in dataset.py), test: cam1, val: cam0 test_list = 'video_test_list_middle_cam1.txt' val_list = 'video_val_list_middle_cam0.txt' # Learning params training_iters = 10000 # 10 epochs batch_size = 16 display_step = 100 n_hidden = 64 sequence = 20 im_height = 112 im_width = 112 n_classes = 10 keep_rate = 0.5 learning_rate = 0.001 x = tf.placeholder(tf.float32, [batch_size, sequence, im_height, im_width, 3]) target_x = tf.placeholder(tf.float32, [batch_size, sequence, im_height, im_width, 3]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_var = tf.placeholder(tf.float32) input_imgs = tf.reshape(x, [-1, im_height, im_width, 3]) input_imgs = input_imgs * 255 tf.summary.image("input frames", input_imgs, 20) with tf.variable_scope("model") as scope: pred, encoded = Model.alexnet_encoder(x, keep_var, n_classes, n_hidden * 2) output_tensor, generated_imgs = Model.decoder(encoded[:, :n_hidden]) output_imgs = tf.reshape(output_tensor, [-1, im_height, im_width, 3]) output_imgs = output_imgs * 255 tf.summary.image("generated frames", output_imgs, 20) with tf.variable_scope("model_cross") as scope: target_x_reshape = tf.reshape(target_x, [-1, 112, 112, 3]) pred_cross, encoded_cross = Model.alexnet_encoder( tf.concat(0, [target_x_reshape, generated_imgs]), keep_var, n_classes, n_hidden * 2) with tf.variable_scope("model_cross", reuse=True) as scope: pred_cross_gen, encoded_cross_gen = Model.alexnet_encoder( generated_imgs, keep_var, n_classes, n_hidden * 2) _, encoded_cross_test = Model.alexnet_encoder(target_x, keep_var, n_classes, n_hidden * 2) # cross_entropy_loss_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred_cross_gen, y)) cross_entropy_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(pred_cross, tf.concat(0, [y, y]))) x_flattened = tf.reshape(target_x, [-1, 112 * 112 * 3]) rec_loss = Model.get_reconstruction_cost(x_flattened, output_tensor) # rec_loss_feature = Model.get_reconstruction_cost_feature(encoded_cross_gen, encoded_cross) tf.summary.scalar('rec_loss_loss', rec_loss) # tf.summary.scalar('rec_loss_feature_loss', rec_loss_feature) tf.summary.scalar('cross_entropy_loss_1', cross_entropy_loss) # tf.summary.scalar('cross_entropy_loss_2', cross_entropy_loss_2) # tf.summary.scalar('perception_loss_loss', Perception_loss) # tf.summary.scalar('cross_entropy_loss', cross_entropy_loss) loss = 10 * cross_entropy_loss + rec_loss optimizer = layers.optimize_loss( loss, tf.contrib.framework.get_or_create_global_step(), learning_rate=learning_rate, optimizer='Adam', update_ops=[]) # Evaluation correct_pred = tf.equal(tf.argmax(pred_cross, 1), tf.argmax(tf.concat(0, [y, y]), 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) merged = tf.summary.merge_all() # Init init = tf.initialize_all_variables() # Load dataset dataset = Dataset(train_list, test_list, val_list, n_classes=n_classes, shuffleType='seq', seqLength=sequence, CNN_type='Alex') saver_all = tf.train.Saver() # Launch the graph with tf.Session() as sess: print 'Loading weights' sess.run(init) with tf.variable_scope("model") as scope: load_with_skip('bvlc_alexnet.npy', sess, ['fc8']) # Skip weights from fc8 with tf.variable_scope("model_cross") as scope: load_with_skip('bvlc_alexnet.npy', sess, ['fc8']) train_writer = tf.summary.FileWriter('./logs/train', sess.graph) print 'Start training' step = 1 epoch = 1 while step < training_iters: batch_xs, batch_ys, _, target_batch_tx = dataset.next_batch_cross( batch_size, 'train') sess.run(optimizer, feed_dict={ x: batch_xs, target_x: target_batch_tx, y: batch_ys, keep_var: keep_rate }) # train_writer.add_summary(output_merged, step) # Display training status if step % display_step == 0: acc, output_merged = sess.run( [accuracy, merged], feed_dict={ x: batch_xs, target_x: target_batch_tx, y: batch_ys, keep_var: 1. }) train_writer.add_summary(output_merged, step) batch_loss = sess.run(loss, feed_dict={ x: batch_xs, target_x: target_batch_tx, y: batch_ys, keep_var: 1. }) print >> sys.stderr, "Iter {}, Epoch {}: Training Loss = " \ "{:.4f}, Accuracy = {:.4f}, Learning Rate = {:.4f}".format(step, epoch, batch_loss, acc, learning_rate) epoch += 1 val_feature = np.zeros((batch_size, n_hidden * 2 * sequence), int) val_label = np.zeros((batch_size, 1), int) for _ in range(int(dataset.val_size / batch_size)): batch_tx, batch_ty, label_ty, batch_target = dataset.next_batch_cross( batch_size, 'val') output_feature = sess.run(encoded_cross_gen, feed_dict={ x: batch_tx, target_x: batch_target, y: batch_ty, keep_var: 1. }) output_feature = output_feature.reshape( batch_size, sequence, n_hidden * 2) output_feature = output_feature.reshape( batch_size, sequence * n_hidden * 2) val_feature = np.concatenate((val_feature, output_feature)) val_label = np.concatenate((val_label, label_ty)) val_feature = val_feature[8:, :] val_label = val_label[8:, :] test_feature = np.zeros((batch_size, n_hidden * 2 * sequence), int) test_label = np.zeros((batch_size, 1), int) for _ in range(int(dataset.test_size / batch_size)): batch_tx, batch_ty, label_ty, batch_target = dataset.next_batch_cross( batch_size, 'test') output_feature = sess.run(encoded_cross_test, feed_dict={ x: batch_tx, target_x: batch_target, y: batch_ty, keep_var: 1. }) output_feature = output_feature.reshape( batch_size, sequence, n_hidden * 2) output_feature = output_feature.reshape( batch_size, sequence * n_hidden * 2) test_feature = np.concatenate( (test_feature, output_feature)) test_label = np.concatenate((test_label, label_ty)) test_feature = test_feature[8:, :] test_label = test_label[8:, :] clf = svm.SVC() val_label = np.ravel(val_label) test_label = np.ravel(test_label) # print test_label clf.fit(val_feature, val_label) SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) predicted = clf.predict(test_feature) print accuracy_score(test_label, predicted) step += 1 print "Finish!" save_path = saver_all.save( sess, "save_model_cross_test/finetuned_cross_loss.ckpt") print("Model saved in file: %s" % save_path)
# -*- coding: utf-8 -* ''' 实现模型的调用 ''' from flyai.dataset import Dataset from model import Model data = Dataset() model = Model(data) p = model.predict( image_path='images/091.Mockingbird/Mockingbird_0087_79600.jpg') print(p)
def inference_bucket(config): """Inference for bucket. """ # create model and compile model = Model(config) model.compile() sess = model.sess # restore model saver = tf.train.Saver() saver.restore(sess, config.restore) sys.stderr.write('model restored' + '\n') ''' print(tf.global_variables()) print(tf.trainable_variables()) ''' num_buckets = 0 total_duration_time = 0.0 bucket = [] while 1: try: line = sys.stdin.readline() except KeyboardInterrupt: break if not line: break line = line.strip() if not line and len(bucket) >= 1: start_time = time.time() inp, feed_dict = build_input_feed_dict(model, bucket) logits_indices, sentence_lengths = sess.run( [model.logits_indices, model.sentence_lengths], feed_dict=feed_dict) tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0]) for i in range(len(bucket)): if 'bert' in config.emb_class: j = inp.example['bert_wordidx2tokenidx'][0][i] out = bucket[i] + ' ' + tags[j] else: out = bucket[i] + ' ' + tags[i] sys.stdout.write(out + '\n') sys.stdout.write('\n') bucket = [] duration_time = time.time() - start_time out = 'duration_time : ' + str(duration_time) + ' sec' tf.logging.info(out) num_buckets += 1 total_duration_time += duration_time if line: bucket.append(line) if len(bucket) != 0: start_time = time.time() inp, feed_dict = build_input_feed_dict(model, bucket) logits_indices, sentence_lengths = sess.run( [model.logits_indices, model.sentence_lengths], feed_dict=feed_dict) tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0]) for i in range(len(bucket)): if 'bert' in config.emb_class: j = inp.example['bert_wordidx2tokenidx'][0][i] out = bucket[i] + ' ' + tags[j] else: out = bucket[i] + ' ' + tags[i] sys.stdout.write(out + '\n') sys.stdout.write('\n') duration_time = time.time() - start_time out = 'duration_time : ' + str(duration_time) + ' sec' tf.logging.info(out) num_buckets += 1 total_duration_time += duration_time out = 'total_duration_time : ' + str(total_duration_time) + ' sec' + '\n' out += 'average processing time / bucket : ' + str( total_duration_time / num_buckets) + ' sec' tf.logging.info(out) sess.close()
date_str = now.strftime("%Y-%m-%d_%H-%M-%S/") dir_name = 'seed_{}/'.format(args.seed) path = './data/' + args.method + '/' + args.env + '/' + dir_name if os.path.exists(path) is False: os.makedirs(path) action_dim = env.action_space.shape[0] state_dim = env.observation_space.shape[0] device = 'cpu' print(torch.cuda.is_available()) if torch.cuda.is_available(): device = 'cuda:0' print('Using GPU Accel') model = Model(state_dim, action_dim, def_layers=[200]).to(device) # model = MDNModel(state_dim, action_dim, def_layers=[200, 200]) replay_buffer_size = 100000 model_replay_buffer = SARSAReplayBuffer(replay_buffer_size) model_optim = ModelOptimizer(model, model_replay_buffer, lr=args.model_lr) # model_optim = MDNModelOptimizer(model, replay_buffer, lr=args.model_lr) methods = {'ilqr': iLQR, 'shooting': ShootingMethod, 'mppi': MPPI} # mpc_planner = iLQR(model, T=args.horizon) # mpc_planner = ShootingMethod(model, T=args.horizon) # mpc_planner = MPPI(model, T=args.horizon) mpc_planner = methods[args.method](model, T=args.horizon) max_frames = args.max_frames
class LogViewer(QtWidgets.QMainWindow, Ui_MainWindow): # ----------------------------------------------------------------------------- def __init__(self, argv): logging.basicConfig(filename='trace.log', level=logging.DEBUG) self.changeDetection = False self.app = QtWidgets.QApplication(argv) QtWidgets.QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.model = Model() self.setupUi(self) self.lineEdit_XSD.setText("./xml/xsd/sentinel.xsd") self.pb_SearchDir.clicked.connect(self.on_pb_SearchDir) self.treeViewDirectory.clicked.connect(self.on_treeViewDirectoryClicked) self.treeViewDirectory.doubleClicked.connect(self.on_treeViewDirectoryDoubleClicked) self.pb_reload.clicked.connect(self.on_pb_reloadClicked) self.pb_valid.clicked.connect(self.on_pb_validClicked) self.checkBox_syntaxHightlighting.clicked.connect(self.on_checkBox_syntaxHightlighting) self.lineEdit_goto.setValidator(QtGui.QIntValidator(0,2147483647)); self.pushButton_goto.clicked.connect(self.scrollToLine) self.actionExit.triggered.connect(self.on_quit) # ----------------------------------------------------------------------------- def on_pb_SearchDir( self ): ''' Called when the user presses the Browse button ''' logging.debug( "Browse button pressed" ) options = QtWidgets.QFileDialog.Options() options |= QtWidgets.QFileDialog.DontUseNativeDialog directoryName = QtWidgets.QFileDialog.getExistingDirectory( None, "Select Output Folder", "", options=options) if directoryName: logging.debug( "setting directory name: " + directoryName ) self.model.setDirectoryName( directoryName ) self.refreshDir() if self.changeDetection == True: self._pathToWatch = directoryName self._fileSysWatcher = QFileSystemWatcher() self._initialContent = os.listdir(self._pathToWatch) for f in self._initialContent: if (f != "trace.log"): self._fileSysWatcher.addPath(f) self._fileSysWatcher.fileChanged.connect(self.slotDirChanged) self._fileSysWatcher.directoryChanged.connect(self.slotDirChanged) def on_pb_reloadClicked(self): self.refreshDir() def on_pb_validClicked(self): self.validateXML2(self.model.getFileName(), self.lineEdit_XSD.text()) def slotDirChanged(self, path): logging.debug("Detected Change!!" + path) def on_treeViewDirectoryClicked(self, index): path = self.sender().model().filePath(index) logging.debug("on_treeViewDirectoryClicked on: " + path) self.model.setFileName(path) self.refreshAll() def on_treeViewDirectoryDoubleClicked(self, index): path = self.sender().model().filePath(index) logging.debug("on_treeViewDirectoryDoubleClicked on: " + path) if os.path.isfile(path): subprocess.Popen(["uex", path]) def on_checkBox_syntaxHightlighting(self): if self.checkBox_syntaxHightlighting.isChecked(): self.highlighter = Highlighter(self.textEdit.document()) else: self.highlighter = None if (self.model.getFileName() != None): self.refreshAll() def scrollToLine(self): if (self.lineEdit_goto.text() != ''): cursor = QtGui.QTextCursor(self.textEdit.document().findBlockByLineNumber(int(self.lineEdit_goto.text())-1)) format = QtGui.QTextBlockFormat() format.setBackground(Qt.yellow) cursor.setBlockFormat(format) self.textEdit.moveCursor(QtGui.QTextCursor.End) self.textEdit.setTextCursor(cursor) def on_directoryChanged(self, index): path = self.sender().model().filePath(index) logging.debug("on_fileChanged on: " + path) # ----------------------------------------------------------------------------- def refreshDir(self): directory = self.model.getDirectoryName() self.lineEdit_SearchDir.setText(directory) fileSystemModel = QFileSystemModel() fileSystemModel.setRootPath('') self.treeViewDirectory.setModel(fileSystemModel) self.treeViewDirectory.setRootIndex(fileSystemModel.index(directory)) self.treeViewDirectory.setAnimated(False) self.treeViewDirectory.setIndentation(20) self.treeViewDirectory.setSortingEnabled(True) self.treeViewDirectory.sortByColumn(0, Qt.AscendingOrder) self.treeViewDirectory.setWindowTitle("Dir View") self.treeViewDirectory.selectionModel().currentChanged.connect(self.on_treeViewDirectoryClicked) # ----------------------------------------------------------------------------- def refreshAll( self ): ''' Updates the widgets whenever an interaction happens. Typically some interaction takes place, the UI responds, and informs the model of the change. Then this method is called, pulling from the model information that is updated in the GUI. ''' self.lineEdit_SearchDir.setText(self.model.getDirectoryName()) self.lineEdit.setText( self.model.getFileName() ) self.textEdit.setText( self.model.getFileContents() ) # ----------------------------------------------------------------------------- def validateXML2(self, xml_path: str, xsd_path: str): if (xml_path == None): self.textEdit_XSD.setText("No file selected") return if (xsd_path == None): self.textEdit_XSD.setText("No xsd selected") return try: with open(xsd_path, 'rb') as f: schema_root = etree.XML(f.read()) schema = etree.XMLSchema(schema_root) xmlparser = etree.XMLParser(schema=schema) with open(xml_path, 'rb') as f: etree.fromstring(f.read(), xmlparser) except etree.XMLSyntaxError as e: logging.debug(e) for error in e.error_log: self.textEdit_XSD.setText("ERROR ON LINE %s: %s" % (error.line, error.message.encode("utf-8"))) return False except etree.DocumentInvalid as e: logging.debug(e) for error in e.error_log: self.textEdit_XSD.setText("ERROR ON DOCUMENT ERROR LINE %s: %s" % (error.line, error.message.encode("utf-8"))) return False except etree.XMLSchemaParseError as e: logging.debug(e) for error in e.error_log: self.textEdit_XSD.setText("ERROR ON PARSING FILE AT LINE %s: %s" % (error.line, error.message.encode("utf-8"))) return False except: logging.debug('Something strange...') self.textEdit_XSD.setText('Something strange...') return False else: self.textEdit_XSD.setText("Success") return True # ----------------------------------------------------------------------------- def on_quit(self, q): self.app.quit() # ----------------------------------------------------------------------------- def start_gui(self): self.show() self.app.exec_()
def main(): """ Create the model and start the training """ # Get the CL arguments args = get_arguments() # Check if the network architecture is valid if args.arch not in VALID_ARCHS: raise ValueError("Network architecture %s is not supported!" % (args.arch)) # Check if the method to compute importance is valid if args.imp_method not in MODELS: raise ValueError("Importance measure %s is undefined!" % (args.imp_method)) # Check if the optimizer is valid if args.optim not in VALID_OPTIMS: raise ValueError("Optimizer %s is undefined!" % (args.optim)) # Create log directories to store the results if not os.path.exists(args.log_dir): print('Log directory %s created!' % (args.log_dir)) os.makedirs(args.log_dir) # Generate the experiment key and store the meta data in a file exper_meta_data = { 'DATASET': 'SPLIT_MNIST', 'NUM_RUNS': args.num_runs, 'TRAIN_SINGLE_EPOCH': args.train_single_epoch, 'IMP_METHOD': args.imp_method, 'SYNAP_STGTH': args.synap_stgth, 'FISHER_EMA_DECAY': args.fisher_ema_decay, 'FISHER_UPDATE_AFTER': args.fisher_update_after, 'OPTIM': args.optim, 'LR': args.learning_rate, 'BATCH_SIZE': args.batch_size, 'MEM_SIZE': args.mem_size } experiment_id = "SPLIT_MNIST_META_%s_%s_%r_%s-" % ( args.imp_method, str(args.synap_stgth).replace('.', '_'), str(args.batch_size), str(args.mem_size)) + datetime.datetime.now( ).strftime("%y-%m-%d-%H-%M") snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data) # Get the subset of data depending on training or cross-validation mode if args.online_cross_val: num_tasks = K_FOR_CROSS_VAL else: num_tasks = args.num_tasks - K_FOR_CROSS_VAL # Load the dataset data_labs = [np.arange(TOTAL_CLASSES)] datasets = construct_split_mnist(data_labs) # Variables to store the accuracies and standard deviations of the experiment acc_mean = dict() acc_std = dict() # Reset the default graph tf.reset_default_graph() graph = tf.Graph() with graph.as_default(): # Set the random seed tf.set_random_seed(args.random_seed) # Define Input and Output of the model x = tf.placeholder(tf.float32, shape=[None, INPUT_FEATURE_SIZE]) #x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]) learning_rate = tf.placeholder(dtype=tf.float32, shape=()) if args.imp_method == 'PNN': y_ = [] for i in range(num_tasks): y_.append( tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])) else: y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]) # Define the optimizer if args.optim == 'ADAM': opt = tf.train.AdamOptimizer(learning_rate=learning_rate) elif args.optim == 'SGD': opt = tf.train.GradientDescentOptimizer( learning_rate=learning_rate) elif args.optim == 'MOMENTUM': #base_lr = tf.constant(args.learning_rate) #learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - model.train_step / training_iters), OPT_POWER)) opt = tf.train.MomentumOptimizer(learning_rate, OPT_MOMENTUM) # Create the Model/ contruct the graph model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after, args.fisher_ema_decay, learning_rate, network_arch=args.arch) # Set up tf session and initialize variables. if USE_GPU: config = tf.ConfigProto() config.gpu_options.allow_growth = True else: config = tf.ConfigProto(device_count={'GPU': 0}) time_start = time.time() with tf.Session(config=config, graph=graph) as sess: runs = train_task_sequence(model, sess, datasets, args) # Close the session sess.close() time_end = time.time() time_spent = time_end - time_start # Store all the results in one dictionary to process later exper_acc = dict(mean=runs) # If cross-validation flag is enabled, store the stuff in a text file if args.cross_validate_mode: acc_mean, acc_std = average_acc_stats_across_runs( runs, model.imp_method) fgt_mean, fgt_std = average_fgt_stats_across_runs( runs, model.imp_method) cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_MNIST_%s_%s' % ( args.imp_method, args.optim) + '.txt' with open(cross_validate_dump_file, 'a') as f: if MULTI_TASK: f.write( 'GPU:{} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'. format(USE_GPU, args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1, :].mean())) else: f.write( 'NUM_TASKS: {} \t EXAMPLES_PER_TASK: {} \t MEM_SIZE: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {} (+-{})\t Fgt: {} (+-{})\t QR: {}\t Time: {}\n' .format(args.num_tasks, args.examples_per_task, args.mem_size, args.arch, args.learning_rate, args.synap_stgth, acc_mean, acc_std, fgt_mean, fgt_std, QR, str(time_spent))) # Store the experiment output to a file snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
random_state=42) print("Total/Train/Dev: {:d}/{:d}/{:d}".format(data_len, len(y_train), len(y_dev))) # Training # ================================================== global_graph = tf.Graph() with global_graph.as_default(): sess = tf.Session(graph=global_graph) with sess.as_default(): cnn = Model(sequence_length=x_train.shape[1], num_classes=y_train.shape[1], vocab_size=vocsize, batch_size=FLAGS.batch_size, embedding_size=FLAGS.embedding_dim, l2_reg_lambda=FLAGS.l2_reg_lambda) # Define Training procedure global_step = tf.Variable(0, name="global_step", trainable=False) optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) grads_and_vars = optimizer.compute_gradients(cnn.loss) train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) loss_summary = tf.summary.scalar('loss', cnn.loss) acc_summary = tf.summary.scalar('accuracy', cnn.accuracy) train_summary_op = tf.summary.merge([loss_summary, acc_summary]) train_summary_dir = os.path.join(out_dir, "summaries", "train")
def inference_line(config): """Inference for raw string. """ def get_entity(doc, begin, end): for ent in doc.ents: # check included if ent.start_char <= begin and end <= ent.end_char: if ent.start_char == begin: return 'B-' + ent.label_ else: return 'I-' + ent.label_ return 'O' def build_bucket(nlp, line): bucket = [] doc = nlp(line) for token in doc: begin = token.idx end = begin + len(token.text) - 1 temp = [] ''' print(token.i, token.text, token.lemma_, token.pos_, token.tag_, token.dep_, token.shape_, token.is_alpha, token.is_stop, begin, end) ''' temp.append(token.text) temp.append(token.tag_) temp.append('O') # no chunking info entity = get_entity(doc, begin, end) temp.append(entity) # entity by spacy temp = ' '.join(temp) bucket.append(temp) return bucket import spacy nlp = spacy.load('en') # create model and compile model = Model(config) model.compile() sess = model.sess # restore model saver = tf.train.Saver() saver.restore(sess, config.restore) tf.logging.info('model restored' + '\n') while 1: try: line = sys.stdin.readline() except KeyboardInterrupt: break if not line: break line = line.strip() if not line: continue # create bucket try: bucket = build_bucket(nlp, line) except Exception as e: sys.stderr.write(str(e) + '\n') continue inp, feed_dict = build_input_feed_dict(model, bucket) logits_indices, sentence_lengths = sess.run( [model.logits_indices, model.sentence_lengths], feed_dict=feed_dict) tags = config.logit_indices_to_tags(logits_indices[0], sentence_lengths[0]) for i in range(len(bucket)): if 'bert' in config.emb_class: j = inp.example['bert_wordidx2tokenidx'][0][i] out = bucket[i] + ' ' + tags[j] else: out = bucket[i] + ' ' + tags[i] sys.stdout.write(out + '\n') sys.stdout.write('\n') sess.close()
def test(opt): """ model configuration """ if 'CTC' in opt.Prediction: if opt.baiduCTC: converter = CTCLabelConverterForBaiduWarpctc(opt.character) else: converter = CTCLabelConverter(opt.character) else: converter = AttnLabelConverter(opt.character) opt.num_class = len(converter.character) if opt.rgb: opt.input_channel = 3 model = Model(opt) print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel, opt.hidden_size, opt.num_class, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction, opt.SequenceModeling, opt.Prediction) # model = torch.nn.DataParallel(model,output_device = device).to(device) model = model.to(device) # load model print('loading pretrained model from %s' % opt.saved_model) state_dict = torch.load(opt.saved_model, map_location=device) if True: state_dict = {k[7:]: v for k, v in state_dict.items()} model.load_state_dict(state_dict) opt.exp_name = '_'.join(opt.saved_model.split('/')[1:]) # print(model) """ keep evaluation model and result logs """ os.makedirs(f'./result/{opt.exp_name}', exist_ok=True) os.system(f'cp {opt.saved_model} ./result/{opt.exp_name}/') """ setup loss """ if 'CTC' in opt.Prediction: criterion = torch.nn.CTCLoss(zero_infinity=True).to(device) else: criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to( device) # ignore [GO] token = ignore index 0 """ evaluation """ model.eval() with torch.no_grad(): if opt.benchmark_all_eval: # evaluation with 10 benchmark evaluation datasets benchmark_all_eval(model, criterion, converter, opt, calculate_infer_time=True) else: log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a') AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD) eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt) evaluation_loader = torch.utils.data.DataLoader( eval_data, batch_size=opt.batch_size, shuffle=False, num_workers=int(opt.workers), collate_fn=AlignCollate_evaluation, pin_memory=True) _, accuracy_by_best_model, _, _, _, _, _, _ = validation( model, criterion, evaluation_loader, converter, opt) log.write(eval_data_log) print(f'{accuracy_by_best_model:0.3f}') log.write(f'{accuracy_by_best_model:0.3f}\n') log.close()
parser.add_argument('-pos-len', help='Spatial length of expected training data', type=int, required=True) parser.add_argument('-batch-size', help='Expected batch size of the input data, must match tfrecords', type=int, required=True) args = vars(parser.parse_args()) (model_variables_prefix, model_config_json) = common.load_model_paths(args) name_scope = args["name_scope"] pos_len = args["pos_len"] batch_size = args["batch_size"] def log(s): print(s,flush=True) with open(model_config_json) as f: model_config = json.load(f) num_bin_input_features = Model.get_num_bin_input_features(model_config) num_global_input_features = Model.get_num_global_input_features(model_config) NUM_POLICY_TARGETS = 2 NUM_GLOBAL_TARGETS = 56 NUM_VALUE_SPATIAL_TARGETS = 1 EXTRA_SCORE_DISTR_RADIUS = 60 BONUS_SCORE_RADIUS = 30 log("Constructing validation input pipe") def parse_tf_records_input(serialized_example): example = tf.parse_single_example(serialized_example,raw_input_features) binchwp = tf.decode_raw(example["binchwp"],tf.uint8) ginc = example["ginc"] ptncm = example["ptncm"] gtnc = example["gtnc"]
_mode, _inp_img = "conditional", True elif args.model == "cvae_cnn3": from model import CvaeCnn3 as Model _mode, _inp_img = "conditional", True elif args.model == "cvae_cnn2": from model import CvaeCnn2 as Model _mode, _inp_img = "conditional", True elif args.model == "cvae_fc3": from model import CvaeFc3 as Model _mode, _inp_img = "conditional", False elif args.model == "vae": from model import VariationalAutoencoder as Model _mode, _inp_img = "unsupervised", False elif args.model == "cnn": from model import CNN as Model _mode, _inp_img = "supervised", True save_path = "./log/%s/" % (args.model) opt["save_path"] = save_path else: sys.exit("unknown model !") if _mode == "conditional": if args.rotate: opt["label_size"] = 1 else: opt["label_size"] = 10 model = Model(**opt) mnist_train(model=model, epoch=args.epoch, save_path=save_path, mode=_mode, input_image=_inp_img, rotate = args.rotate)
import tensorflow as tf from model import Model from path import MODEL_PATH, LOG_PATH import config from utils import load_word2vec_embedding import numpy as np # 超参 parser = argparse.ArgumentParser() parser.add_argument("-e", "--EPOCHS", default=30, type=int, help="train epochs") parser.add_argument("-b", "--BATCH", default=128, type=int, help="batch size") args = parser.parse_args() # 数据获取辅助类 dataset = Dataset(epochs=args.EPOCHS, batch=args.BATCH) # 模型操作辅助类 modelpp = Model(dataset) ''' 使用tensorflow实现自己的算法 ''' # 得到训练和测试的数据 unit_num = config.embeddings_size # 默认词向量的大小等于RNN(每个time step) 和 CNN(列) 中神经单元的个数, 为了避免混淆model中全部用unit_num表示。 time_step = config.max_sequence # 每个句子的最大长度和time_step一样,为了避免混淆model中全部用time_step表示。 DROPOUT_RATE = config.dropout LEARN_RATE=config.leanrate TAGS_NUM = config.label_len # ——————————————————定义神经网络变量—————————————————— class NER_net:
def train_and_eval(width, height, device, learning_rate, epochs, train_loader, test_loader, font_to_label, print_every_k_batches, mode, conv_filters=[32, 32, 64, 64], max_pooling=(2, 3), kernel=3): model = Model(width, height, mode=mode, conv_filters=conv_filters, max_pooling=max_pooling, kernel=kernel) model.to(device) optimizer = Adam(model.parameters(), lr=learning_rate) losses = defaultdict(list) # Train model.train() for epoch in range(epochs): print(f"Epoch {epoch}") running_loss, epoch_loss = 0, 0 start_time = time.time() for i, data in enumerate(train_loader): img1, _, img2, _, label = data img1, img2, label = img1.type( torch.FloatTensor).to(device), img2.type( torch.FloatTensor).to(device), label.type( torch.FloatTensor).to(device) optimizer.zero_grad() prediction = model(img1, img2) loss = bce_loss(prediction, label) loss.backward() running_loss += loss.item() epoch_loss += loss.item() optimizer.step() losses[epoch].append(loss.item()) if i % print_every_k_batches == print_every_k_batches - 1: print( f" [{i+1:4d}] loss: {running_loss/print_every_k_batches:.3f} ({round(time.time() - start_time, 3)} s)" ) running_loss = 0.0 start_time = time.time() print('> Epoch: {} - Global average loss: {:.4f}\n'.format( epoch, epoch_loss / len(train_loader.dataset))) path_to_result_folder = os.path.join('results', str(time.time())) os.mkdir(path_to_result_folder) torch.save(model, os.path.join(path_to_result_folder, 'model')) with open(os.path.join(path_to_result_folder, "loss_history.json"), 'w') as f: json.dump(losses, f, indent=4) print(f"Saved in folder {path_to_result_folder}") # Test model.eval() predictions, labels, font_pairs = [], [], [] for data in test_loader: img1, label1, img2, label2, label = data img1, img2, label = img1.type(torch.FloatTensor).to(device), img2.type( torch.FloatTensor).to(device), label.type( torch.FloatTensor).to(device) predictions += model(img1, img2).detach().tolist() labels += label.detach().tolist() font_pairs += [(l1, l2) for l1, l2 in zip(label1.detach().tolist(), label2.detach().tolist())] with open(os.path.join(path_to_result_folder, "predictions.json"), 'w') as f: json.dump( { "predictions": predictions, "labels": labels, "font_pairs": font_pairs, "font_to_label": font_to_label }, f, indent=4) accuracy = 1 - np.mean(np.abs(np.array(labels) - np.round(predictions))) print(f"\n> Test accuracy: {accuracy}") return accuracy
def main(): parser = argparse.ArgumentParser() parser.add_argument('-c', '--continue', dest='continue_path', required=False) args = parser.parse_args() ## load dataset train_batch_gnr, train_set = get_dataset_batch(ds_name='train') test_gnr, test_set = get_dataset_batch(ds_name='test') ## build graph network = Model() placeholders, restored = network.build() gt_size = config.patch_size - config.edge gt = tf.placeholder(tf.float32, shape=(None, ) + (gt_size, gt_size) + (config.nr_channel * config.ratio * config.ratio, ), name='gt') loss_squared = squared_error_loss(gt, restored) loss_reg = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) loss = loss_reg + loss_squared ## train config global_steps = tf.Variable(0, trainable=False) boundaries = [ train_set.minibatchs_per_epoch * 5, train_set.minibatchs_per_epoch * 40 ] values = [0.0001, 0.0001, 0.0001] lr = tf.train.piecewise_constant(global_steps, boundaries, values) opt = tf.train.AdamOptimizer(lr) # in order to update BN in every iter update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train = opt.minimize(loss) ## init tensorboard tf.summary.scalar('loss_regularization', loss_reg) tf.summary.scalar('loss_error', loss - loss_reg) tf.summary.scalar('loss', loss) tf.summary.scalar('learning_rate', lr) merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter( os.path.join(config.log_dir, 'tf_log', 'train'), tf.get_default_graph()) ## create a session tf.set_random_seed(12345) # ensure consistent results global_cnt = 0 epoch_start = 0 g_list = tf.global_variables() saver = tf.train.Saver(var_list=g_list) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if args.continue_path: ckpt = tf.train.get_checkpoint_state(args.continue_path) saver.restore(sess, ckpt.model_checkpoint_path) epoch_start = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[1]) global_cnt = epoch_start * train_set.minibatchs_per_epoch ## training file = open('./psnr_ratioG2', 'w+') for epoch in range(epoch_start + 1, config.nr_epoch + 1): for _ in range(train_set.minibatchs_per_epoch): global_cnt += 1 lr_images, sr_images = sess.run(train_batch_gnr) # 128*7*7*3,128*9*9*27 feed_dict = { placeholders['data']: lr_images[:, :, :, :1], gt: sr_images[:, :, :, :1 * config.ratio * config.ratio], global_steps: global_cnt, placeholders['is_training']: True, } _, loss_v, loss_reg_v, lr_v, summary = sess.run( [train, loss, loss_reg, lr, merged], feed_dict=feed_dict) if global_cnt % config.show_interval == 0: train_writer.add_summary(summary, global_cnt) print( "e:{},{}/{}".format( epoch, global_cnt % train_set.minibatchs_per_epoch, train_set.minibatchs_per_epoch), 'loss: {:.3f}'.format(loss_v), 'loss_reg: {:.3f}'.format(loss_reg_v), 'lr: {:.4f}'.format(lr_v), ) ## save model if epoch % config.snapshot_interval == 0: saver.save(sess, os.path.join(config.log_model_dir, 'epoch-{}'.format(epoch)), global_step=global_cnt) if epoch % config.test_interval == 0: psnrs = [] for _ in range(test_set.testing_minibatchs_per_epoch): lr_image, hr_image = sess.run(test_gnr) feed_dict = { placeholders['data']: lr_image[:, :, :, :1], placeholders['is_training']: False, } restored_v = sess.run([restored], feed_dict=feed_dict) restored_img_y = from_sub_pixel_to_img( restored_v[0][0], config.ratio) if epoch == 199: img = np.clip(restored_img_y[:, :, 0], 0, 1) * 255 img = img.astype('uint8') cv2.imwrite('./output/{}.png'.format(global_cnt), img) global_cnt += 1 edge = int(config.edge / 2 * config.ratio) psnr_y = compare_psnr( hr_image[0, edge:-edge, edge:-edge, :1], restored_img_y) psnrs.append(psnr_y) file.write(str(np.mean(psnrs)) + '\n') print('average psnr is {:2.2f} dB'.format(np.mean(psnrs))) print('Training is done, exit.') file.close()
def main(): # Dataset path train_list = 'video_train_list_middle_cam0.txt' # train: cam0 + cam1 (defined in dataset.py), test: cam1, val: cam0 test_list = 'video_test_list_middle_cam1.txt' val_list = 'video_val_list_middle_cam0.txt' # Learning params training_iters = 100 # 10 epochs batch_size = 120 display_step = 1 n_hidden = 64 sequence = 20 im_height = 112 im_width = 112 n_classes = 10 keep_rate = 0.5 learning_rate = 0.001 x = tf.placeholder(tf.float32, [batch_size, sequence, im_height, im_width, 3]) x_cross = tf.placeholder(tf.float32, [batch_size, sequence, im_height, im_width, 3]) target_x = tf.placeholder(tf.float32, [batch_size, sequence, im_height, im_width, 3]) y = tf.placeholder(tf.float32, [None, 1]) y_source = tf.placeholder(tf.float32, [None, 10]) y_target = tf.placeholder(tf.float32, [None, 10]) keep_var = tf.placeholder(tf.float32) input_imgs = tf.reshape(x, [-1, im_height, im_width, 3]) input_imgs = input_imgs * 255 tf.summary.image("input frames", input_imgs, 20) pred, encoded = Model.encoder(x, n_hidden * 2, n_classes, keep_var) output_tensor, _ = Model.decoder(encoded[:, :n_hidden]) output_imgs = tf.reshape(output_tensor, [-1, im_height, im_width, 3]) output_imgs = output_imgs * 255 tf.summary.image("generated frames", output_imgs, 20) x_flattened = tf.reshape(x_cross, [-1, 112 * 112 * 3]) rec_loss = Model.get_reconstruction_cost(x_flattened, output_tensor) tf.summary.scalar('rec_loss_loss', rec_loss) # Cross-entropy loss loss = rec_loss optimizer = layers.optimize_loss( loss, tf.contrib.framework.get_or_create_global_step(), learning_rate=learning_rate, optimizer='Adam', update_ops=[]) merged = tf.summary.merge_all() # Init init = tf.initialize_all_variables() # Load dataset dataset_source = Dataset( train_list, test_list, val_list, '1', n_classes=n_classes, shuffleType='seq', seqLength=sequence, CNN_type='Alex' ) # Paths output camera 0, so here '1' indicates the target view data dataset_target = Dataset(train_list, test_list, val_list, '0', n_classes=n_classes, shuffleType='seq', seqLength=sequence, CNN_type='Alex') saver_all = tf.train.Saver() # Launch the graph with tf.Session() as sess: print 'Loading weights' sess.run(init) train_writer = tf.summary.FileWriter('./logs/train', sess.graph) print 'Start training' step = 1 epoch = 1 while step < training_iters: batch_xs, batch_ys, batch_label, batch_xs_cross = dataset_source.next_batch_cross( batch_size, 'train') batch_xs_target, batch_ys_target, batch_label_target = dataset_target.next_batch( batch_size, 'train') pair_label = np.zeros((batch_size, 1), int) for i in range(batch_size): if batch_label[i] == batch_label_target[i]: pair_label[i] = 1 output_merged, _ = sess.run( [merged, optimizer], feed_dict={ x: batch_xs, x_cross: batch_xs_cross, target_x: batch_xs_target, y: pair_label, keep_var: keep_rate, y_source: batch_ys, y_target: batch_ys_target }) train_writer.add_summary(output_merged, step) if step % display_step == 0: batch_xs, batch_ys, batch_label, batch_xs_cross = dataset_source.next_batch_cross( batch_size, 'train') batch_xs_target, batch_ys_target, batch_label_target = dataset_target.next_batch( batch_size, 'train') pair_label = np.zeros((batch_size, 1), int) for i in range(batch_size): if batch_label[i] == batch_label_target[i]: pair_label[i] = 1 rec_loss_value = sess.run(rec_loss, feed_dict={ x: batch_xs, x_cross: batch_xs_cross, target_x: batch_xs_target, y_source: batch_ys, y_target: batch_ys_target, y: pair_label, keep_var: 1. }) epoch += 1 step += 1 print "Finish!" save_path = saver_all.save( sess, "save_model_siamese/finetuned_cross_loss.ckpt")
parser.add_argument('--sample_length', type=int, default=800, help='number of strokes to sample') parser.add_argument( '--scale_factor', type=int, default=10, help='factor to scale down by for svg output. smaller means bigger output' ) sample_args = parser.parse_args() with open(os.path.join('save', 'config.pkl')) as f: saved_args = cPickle.load(f) model = Model(saved_args, True) sess = tf.InteractiveSession() saver = tf.train.Saver(tf.all_variables()) ckpt = tf.train.get_checkpoint_state('save') print "loading model: ", ckpt.model_checkpoint_path saver.restore(sess, ckpt.model_checkpoint_path) def sample_stroke(): [strokes, params] = model.sample(sess, sample_args.sample_length) draw_strokes(strokes, factor=sample_args.scale_factor, svg_filename=sample_args.filename + '.normal.svg') draw_strokes_random_color(strokes,
val_nums = val_generator.names_num print("====================dataset scale====================") print("=========> Train dataset: {},{}".format(train_json_path, train_nums)) print("=========> Val dataset: {},{}".format(val_json_path, val_nums)) print("=====================================================\n") # train_steps = int(train_nums/model_config.param['BATCH_SIZE']) train_steps = 10000 val_steps = int(val_nums / model_config.param['BATCH_SIZE']) model_config.set_param(['TRAIN_STEPS', 'VALIDATION_STEPS'], [train_steps, val_steps]) model_config.show_config() model = Model(model_config) now = datetime.datetime.now() log_dir = os.path.join( base_dir, "{}_{}_{:%Y%m%dT%H%M}".format(model_name, data_version, now)) checkpoint_path = os.path.join(log_dir, "ep_*epoch*.h5") checkpoint_path = checkpoint_path.replace("*epoch*", "{epoch:04d}") if IS_SAVE: callbacks = [ LRTensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=False), # tf.keras.callbacks.TensorBoard(log_dir=log_dir,
dump = args["dump"] def log(s): print(s, flush=True) # Model ---------------------------------------------------------------- print("Building model", flush=True) with open(model_config_json) as f: model_config = json.load(f) pos_len = 19 # shouldn't matter, all we're doing is exporting weights that don't depend on this if name_scope is not None: with tf.name_scope(name_scope): model = Model(model_config, pos_len, {}) else: model = Model(model_config, pos_len, {}) def volume(variable): shape = variable.get_shape() variable_parameters = 1 for dim in shape: variable_parameters *= dim.value return variable_parameters total_parameters = 0 for variable in tf.global_variables(): variable_parameters = volume(variable)
vocabulary, word_to_idx, idx_to_word = pickle.load(f) logger.append("VOCABULARY LOADED.\n") # -------------------------------------------------------------------------------------------------------------------- # # RUN tf.reset_default_graph() tf.set_random_seed(12345) np.random.seed(12345) model = Model(experiment=FLAGS.EXPERIMENT, mode=FLAGS.MODE, vocabulary_size=FLAGS.VOCABULARY_SIZE, embedding_dim=FLAGS.EMBEDDING_DIM, state_dim=FLAGS.STATE_DIM, down_state_dim=FLAGS.DOWN_STATE_DIM, sent_dim=FLAGS.SENT_DIM, cont_dim=FLAGS.CONT_DIM, initializer=tf.contrib.layers.xavier_initializer(), pad_idx=word_to_idx["<pad>"], eos_idx=word_to_idx["<eos>"], num_epochs=FLAGS.NUM_EPOCHS ) logger.append("TRAINABLE VARIABLES.") tf_utils.trainable_parameters(logger) saver = tf.train.Saver() timer.__enter__() logger.append("TF SESSION STARTING.\n") with tf.Session() as session: # writer = tbc.get_deep_writers("./")
from sklearn.model_selection import train_test_split from sklearn.metrics import auc, roc_curve cuda.empty_cache() def compute_auc(y_true, y_prob): fpr, tpr, thres = roc_curve(y_true, y_prob) auc_score = auc(fpr, tpr) return auc_score data = read_data('../data/classic_kt.dat') train_data, test_data = train_test_split(data, test_size=.2) model = Model(13, 64) model = model.cuda() optimizer = optim.Adam(model.parameters(), 5e-4) dl_train = DataLoader(train_data) dl_test = DataLoader(test_data) for ep in range(10): # 10-epochs i = 0 for x, y, z in dl_train.sampling(72): loss = model.forward( tensor(x).cuda(), tensor(y).cuda(), tensor(z).long().cuda(), True) optimizer.zero_grad() clip_grad_value_(model.parameters(), 10)
for piece in multi_gpus.split(","): piece = piece.strip() multi_gpu_device_ids.append("/GPU:" + str(int(piece))) num_gpus_used = len(multi_gpu_device_ids) # MODEL ---------------------------------------------------------------- printed_model_yet = False # Avoid loading initial weights, just ignore them, if we've already started training and we have weights. # We detect this by detecting the TF estimator "checkpoint" index file. initial_weights_already_loaded = os.path.exists( os.path.join(traindir, "checkpoint")) if swa_sub_epoch_scale is not None: with tf.device("/cpu:0"): with tf.compat.v1.variable_scope("swa_model"): swa_model = Model(model_config, pos_len, placeholders={}) swa_saver = tf.compat.v1.train.Saver( max_to_keep=10000000, save_relative_paths=True, ) swa_assign_placeholders = {} swa_wvalues = {} swa_weight = 0.0 assign_ops = [] for variable in itertools.chain(tf.compat.v1.model_variables(), tf.compat.v1.trainable_variables()): if variable.name.startswith("swa_model/"): placeholder = tf.compat.v1.placeholder(variable.dtype, variable.shape) assign_ops.append(tf.compat.v1.assign(variable, placeholder)) swa_assign_placeholders[variable.name] = placeholder
def __init__(self, model_name, device): Model.__init__(self, model_name, device, 'facial_landmarks_detection_model')
def load_model(num_class): dim_feature = 1024 model = Model(num_class, args.sa, args.omega) return model, dim_feature
def _train(dataset_name: str, backbone_name: str, path_to_data_dir: str, path_to_checkpoints_dir: str, path_to_resuming_checkpoint: Optional[str]): dataset = DatasetBase.from_name(dataset_name)(path_to_data_dir, DatasetBase.Mode.TRAIN, Config.IMAGE_MIN_SIDE, Config.IMAGE_MAX_SIDE) dataloader = DataLoader(dataset, batch_size=Config.BATCH_SIZE, sampler=DatasetBase.NearestRatioRandomSampler(dataset.image_ratios, num_neighbors=Config.BATCH_SIZE), num_workers=8, collate_fn=DatasetBase.padding_collate_fn, pin_memory=True) Log.i('Found {:d} samples'.format(len(dataset))) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") backbone = BackboneBase.from_name(backbone_name)(pretrained=True) model = nn.DataParallel( Model( backbone, dataset.num_classes(), pooler_mode=Config.POOLER_MODE, anchor_ratios=Config.ANCHOR_RATIOS, anchor_sizes=Config.ANCHOR_SIZES, rpn_pre_nms_top_n=Config.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=Config.RPN_POST_NMS_TOP_N, anchor_smooth_l1_loss_beta=Config.ANCHOR_SMOOTH_L1_LOSS_BETA, proposal_smooth_l1_loss_beta=Config.PROPOSAL_SMOOTH_L1_LOSS_BETA ).to(device) ) optimizer = optim.SGD(model.parameters(), lr=Config.LEARNING_RATE, momentum=Config.MOMENTUM, weight_decay=Config.WEIGHT_DECAY) scheduler = WarmUpMultiStepLR(optimizer, milestones=Config.STEP_LR_SIZES, gamma=Config.STEP_LR_GAMMA, factor=Config.WARM_UP_FACTOR, num_iters=Config.WARM_UP_NUM_ITERS) step = 0 time_checkpoint = time.time() losses = deque(maxlen=1000) all_losses = {'anchor_objectness_loss': deque(maxlen=1000), 'anchor_transformer_loss': deque(maxlen=1000), 'proposal_class_loss': deque(maxlen=1000), 'proposal_transformer_loss': deque(maxlen=1000), 'vertex_loss': deque(maxlen=1000)} #summary_writer = SummaryWriter(os.path.join(path_to_checkpoints_dir, 'summaries')) should_stop = False num_steps_to_display = Config.NUM_STEPS_TO_DISPLAY num_steps_to_snapshot = Config.NUM_STEPS_TO_SNAPSHOT num_steps_to_finish = Config.NUM_STEPS_TO_FINISH if path_to_resuming_checkpoint is not None: step = model.module.load(path_to_resuming_checkpoint, optimizer, scheduler) Log.i(f'Model has been restored from file: {path_to_resuming_checkpoint}') # if torch.cuda.is_available(): # device_count = torch.cuda.device_count() # assert Config.BATCH_SIZE % device_count == 0, 'The batch size is not divisible by the device count' # Log.i('Start training with {:d} GPUs ({:d} batches per GPU)'.format(torch.cuda.device_count(), # Config.BATCH_SIZE // torch.cuda.device_count())) while not should_stop: for _, (_, image_batch, _, bboxes_batch, labels_batch, vertices_batch) in enumerate(dataloader): batch_size = image_batch.shape[0] image_batch = image_batch.to(device) bboxes_batch = bboxes_batch.to(device) labels_batch = labels_batch.to(device) vertices_batch = vertices_batch.to(device) anchor_objectness_losses, anchor_transformer_losses, proposal_class_losses, proposal_transformer_losses, vertex_losses = \ model.train().forward(image_batch, bboxes_batch, labels_batch, vertices_batch) loss = 0 anchor_objectness_loss = anchor_objectness_losses.mean() loss += anchor_objectness_loss all_losses['anchor_objectness_loss'].append(anchor_objectness_loss.item()) anchor_transformer_loss = anchor_transformer_losses.mean() loss += anchor_transformer_loss all_losses['anchor_transformer_loss'].append(anchor_transformer_loss.item()) proposal_class_losses = proposal_class_losses[proposal_class_losses > 0] if proposal_class_losses.nelement() > 0: proposal_class_loss = proposal_class_losses.mean() loss += proposal_class_loss all_losses['proposal_class_loss'].append(proposal_class_loss.item()) proposal_transformer_losses = proposal_transformer_losses[proposal_transformer_losses > 0] if proposal_transformer_losses.nelement() > 0: proposal_transformer_loss = proposal_transformer_losses.mean() loss += proposal_transformer_loss all_losses['proposal_transformer_loss'].append(proposal_transformer_loss.item()) vertex_losses = vertex_losses[vertex_losses > 0] if vertex_losses.nelement() > 0: vertex_loss = vertex_losses.mean() loss += vertex_loss all_losses['vertex_loss'].append(vertex_loss.item()) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() losses.append(loss.item()) # summary_writer.add_scalar('train/anchor_objectness_loss', anchor_objectness_loss.item(), step) # summary_writer.add_scalar('train/anchor_transformer_loss', anchor_transformer_loss.item(), step) # summary_writer.add_scalar('train/proposal_class_loss', proposal_class_loss.item(), step) # summary_writer.add_scalar('train/proposal_transformer_loss', proposal_transformer_loss.item(), step) # summary_writer.add_scalar('train/loss', loss.item(), step) step += 1 if step == num_steps_to_finish: should_stop = True if step % num_steps_to_display == 0: elapsed_time = time.time() - time_checkpoint time_checkpoint = time.time() steps_per_sec = num_steps_to_display / elapsed_time samples_per_sec = batch_size * steps_per_sec eta = (num_steps_to_finish - step) / steps_per_sec / 3600 avg_loss = sum(losses) / len(losses) lr = scheduler.get_lr()[0] avg_vertex_loss = sum(all_losses['vertex_loss'])/len(all_losses['vertex_loss']) Log.i(f'[Step {step}] Avg. Loss = {avg_loss:.6f}, Vertex Loss = {avg_vertex_loss:.6f}, Learning Rate = {lr:.8f} ({samples_per_sec:.2f} samples/sec; ETA {eta:.1f} hrs)') print(sum(all_losses['anchor_objectness_loss'])/len(all_losses['anchor_objectness_loss']), sum(all_losses['anchor_transformer_loss'])/len(all_losses['anchor_transformer_loss']), sum(all_losses['proposal_class_loss'])/len(all_losses['proposal_class_loss']), sum(all_losses['proposal_transformer_loss'])/len(all_losses['proposal_transformer_loss']), sum(all_losses['vertex_loss'])/len(all_losses['vertex_loss'])) if step % num_steps_to_snapshot == 0 or should_stop: path_to_checkpoint = model.module.save(path_to_checkpoints_dir, step, optimizer, scheduler) Log.i(f'Model has been saved to {path_to_checkpoint}') val(model, dataset_name, path_to_data_dir, device) if should_stop: break Log.i('Done')