예제 #1
0
 def insn_store_tuple(self, insn):
     [arg_reg, tuple_reg] = insn.regs
     i, offset, n = insn.params
     r = [opcodes.arg, tuple_reg, arg_reg, i]
     # XXX this is a bit of a hack. Because of the confusing implementation of compile_rands,
     #     we have no way of passing the tuple to its continuation (when it's needed)
     if insn.target != 'dead' and insn.target != tuple_reg:
         print 'tuple move hack'
         trace()
         r.extend([opcodes.mov, insn.target, tuple_reg])
     return r
예제 #2
0
 def insn_store_tuple (self, insn):
     [arg_reg, tuple_reg] = insn.regs
     i, offset, n = insn.params
     r = [opcodes.arg, tuple_reg, arg_reg, i]
     # XXX this is a bit of a hack. Because of the confusing implementation of compile_rands,
     #     we have no way of passing the tuple to its continuation (when it's needed)
     if insn.target != 'dead' and insn.target != tuple_reg:
         print 'tuple move hack'
         trace()
         r.extend ([opcodes.mov, insn.target, tuple_reg])
     return r
예제 #3
0
 def forward_pass(self, inputs):
     self.inputs = inputs
     if len(inputs.shape) > 1:
         self.means = np.mean(inputs, axis=1)
         self.stddevs = np.std(inputs, axis=1)
         self.total_means += self.means
         self.total_stddevs += self.stddevs
         self.total += 1
         self.means = self.means[None].T
         self.stddevs = self.stddevs[None].T
     else:
         self.means = self.total_means / self.total
         self.stddevs = self.total_stddevs / self.total
     ret = (inputs - self.means) / (self.stddevs + self.alpha
                                    )  # TODO: optimize
     assert ret.shape == inputs.shape
     trace()
     return ret
예제 #4
0
        word_lst.append('')



df['Word_lst'] = word_lst # Adding Adj, Verb, Adverb info to Data set
df['Org_lst'] = org_lst   # Adding Organization List
df['Lost_Cause'] = cause_str_lst # Adding Loss Cause
df['Company_Names'] = cmpy_lst # Adding Organization Names (Assume NNP Tags are most of the times corresponds to ORGANIZATION)

df.to_csv('write_into_file.csv', index=False)

print 'Company Names Verification'
company_lst = []
for item in cmpy_lst:
    company_names = " ".join(cmpy_data.Company_Names)
    clst=[]
    if not item:
        company_lst.append('')
        continue
    else:
        for it in item:

            lst = it.split()
            clst.append([i for i in lst if i in company_names])
    company_lst.append(clst)

trace()

df.to_csv('file_intel_1.csv', index=False) # Writing Data Frame to CSV File.
trace() # TO halt the program at this stage.
예제 #5
0
def main(args):
    criterion = nn.CrossEntropyLoss()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    training_net = TrainingMLPModel(**mlp_params).to(device)
    inference_net = InferenceMLPModel(**mlp_params).to(device)

    optimizer = optim.Adam(training_net.parameters(), lr=3e-4)

    train_loader = get_dataloader('train')
    val_loader = get_dataloader('val')

    best_val_loss = np.inf
    best_val_epoch = 0
    num_val_batches = len(val_loader)
    if num_val_batches == 0:
        print('No validation batches!')
        return

    output_path = os.path.join('exps', args.exp_name)
    os.makedirs(output_path, exist_ok=True)
    cur_ckpt_path = ckpt_path.format(args.exp_name)

    all_val_labels = json.load(
        open(imgs_and_objs_align_dict_file.format('val'), 'r'))
    obj_names = list(json.load(open(obj_new_id_to_name_file, 'r')).keys())

    for epoch in range(NUM_EPOCHS):
        for i, data in enumerate(train_loader):
            inputs = data['descs'].float().to(device)
            num_descs = data['num_descs'].long().to(device)
            labels = data['label'].squeeze().long().to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = training_net(inputs, num_descs, labels)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            if i % PRINT_EVERY == PRINT_EVERY - 1:  # print every N mini-batches
                print('[%d, %5d] Train loss: %.3f' %
                      (epoch + 1, i + 1, loss.item()))
                break

        # perform validation
        running_val_loss = 0
        inference_net.load_state_dict(training_net.state_dict())
        all_val_gt_labels, all_val_preds = [], []

        for i, data in enumerate(val_loader):
            inputs = data['descs'].float().to(device)
            num_descs = data['num_descs'].long().to(device)
            labels = data['label'].squeeze().long().to(device)
            img_ids = data['img_id']
            train_outputs = training_net(inputs, num_descs, labels)
            running_val_loss += criterion(train_outputs, labels).item()

            inference_outputs = inference_net(inputs, num_descs)
            trace()
            relevant_gt_labels, pred_labels = eval_batch_prediction_gqa(
                all_val_labels, inference_outputs, img_ids)
            all_val_gt_labels += relevant_gt_labels
            all_val_preds += pred_labels

        accuracy = round(accuracy_score(all_val_gt_labels, all_val_preds), 5)
        plot_cm(all_val_preds, all_val_gt_labels, obj_names,
                get_cm_path(output_path, epoch, accuracy))

        cur_val_loss = running_val_loss / NUM_VAL_EPOCHS
        print('Epoch %d Val loss: %.3f' % (epoch + 1, cur_val_loss))
        if best_val_loss > cur_val_loss:
            best_val_loss = cur_val_loss
            best_val_epoch = epoch
            torch.save(training_net.state_dict(), cur_ckpt_path)
        else:
            if best_val_epoch + EARLY_STOPPING <= epoch:
                print('Early stopping after {} epochs'.format(epoch))
                print(f'Best val epoch was {best_val_epoch}')

                return
                # perform one last validation with best params
                # inference_net.load_state_dict(torch.load(ckpt_path))
                #
                # all_val_gt_labels, all_val_preds = [], []
                #
                # for i, data in enumerate(val_loader):
                #     inputs = data['descs'].float().to(device)
                #     num_descs = data['num_descs'].long().to(device)
                #     labels = data['label'].squeeze().long().to(device)
                #     img_ids = data['img_id']
                #     train_outputs = training_net(inputs, num_descs, labels)
                #     running_val_loss += criterion(train_outputs, labels).item()
                #
                #     inference_outputs = inference_net(inputs, num_descs)
                #     relevant_gt_labels, pred_labels = eval_batch_prediction(all_val_labels, inference_outputs, img_ids)
                #     all_val_gt_labels += relevant_gt_labels
                #     all_val_preds += pred_labels
                #
                # accuracy = round(accuracy_score(all_val_gt_labels, all_val_preds), 5)
                # plot_cm(all_val_preds, all_val_gt_labels, obj_names, get_cm_path(output_path, 'best', accuracy))

        val_loader = get_dataloader('val')
		for each in corpandrunner:
			if len(corpandrunner[each]) < count:
				ezbuilds.append(each + " needs " + str(len(corpandrunner[each])) + " sets to build")
				corpandrunnerlist.append(corpandrunner[each])
		count += 1
	count = 0
	for sentence in ezbuilds:
		print str(count) + ". " + sentence
		count += 1
#select deck pairing, and review set +data packs required
	while 1 is 1:
		review = ''
		review = input("which build?:") 
		if review == '':
			mainmenu()
	#add number of cards in each set
		for setpresence in corpandrunnerlist[review]:
			presencevalue = 0
			for card in corpdecks[ezbuilds[review].split('+')[0]]:
				if card[1] == setpresence:
					presencevalue += card[2]
			for card in runnerdecks[ezbuilds[review].split('+')[1].split(' needs ')[0]]:
				if card[1] == setpresence:
					presencevalue += card[2]
			print setpresence + ": " + str(presencevalue) + " cards"
			sleep(.25)
		print "to build " + ezbuilds[review]
		print "+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n"
trace()
mainmenu()
예제 #7
0
                corpandrunnerlist.append(corpandrunner[each])
        count += 1
    count = 0
    for sentence in ezbuilds:
        print str(count) + ". " + sentence
        count += 1


#select deck pairing, and review set +data packs required
    while 1 is 1:
        review = ''
        review = input("which build?:")
        if review == '':
            mainmenu()
    #add number of cards in each set
        for setpresence in corpandrunnerlist[review]:
            presencevalue = 0
            for card in corpdecks[ezbuilds[review].split('+')[0]]:
                if card[1] == setpresence:
                    presencevalue += card[2]
            for card in runnerdecks[ezbuilds[review].split('+')[1].split(
                    ' needs ')[0]]:
                if card[1] == setpresence:
                    presencevalue += card[2]
            print setpresence + ": " + str(presencevalue) + " cards"
            sleep(.25)
        print "to build " + ezbuilds[review]
        print "+-+-++-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\n"
trace()
mainmenu()
예제 #8
0
파일: initparams.py 프로젝트: camm/code
def modelB_freeE_C(modeltpl, elastic, convolved, expdata, initparfile=None):
  """Estimate initial beamline parameters for the modelB_freeE_C
  This is a hard-coded model consisting of a linear background, and elastic line, and a convolution:
    b0+b1*E  +  +e0(Q)*Elastic(E)  +  c0*Resolution(E)xSimulated(Q,E)
    We load Resolution(E)xSimulated(Q,E) as Convolved(Q,E)
    e0(Q) are a set of fitting parameters, one for each Q
  Initial values are estimated as follows:
  b0:0.0
  b1:0.0
  Evaluation of the model at E=0:
    e0*elastic(Q,0) + c0*convolved(Q,0) ~ experiment(Q,0) {Eq.1},
    with 'convolved' the convolution of the experimental resolution and the Fourier transform
    of the simulated intermediate structure factor
  For the lowest Q, we assume contributions fromt the elastic line and simuation are equal. Thus:
    c0*convolved(Qmin,0) ~ 1/2*experiment(Qmin,0) ---> provides estimation for c0
    e0(Qmin)*elastic(Qmin,0) ~ 1/2*experiment(Qmin,0) ---> provides estimation for e0
  For the remaining Q, we use {Eq.1} substituting the c0 found above.
  Finally, eshift:0.0
  
  Arguments:
    model: beamline template model file (xml format)
    elastic: Nexus file containing the elastic line
    convolved: Nexus file containing convolution of the resolution and simulated structure factor
    expdata: Nexus file containing the experimental data
    [initparfile]: Output the initial parameters as a string in file with name initparfile

  Returns:
    initparms: string with initial values for the parameters
  """
  from simulation.src.molmec.ffupdate.ff_update import loadFFtpl,updateTemplate
  from mantid.simpleapi import LoadNexus

  wse=LoadNexus(Filename=elastic,OutputWorkspace='elastic')
  wsc=LoadNexus(Filename=convolved,OutputWorkspace='convolved')
  wsx=LoadNexus(Filename=expdata,OutputWorkspace='experiment')

  parl,template=loadFFtpl(modeltpl)
  pard={}
  for par in parl: pard[par._name]=par

  nhist=wsx.getNumberHistograms()
  le=len(wsx.readX(0))
  for ws in wse,wsc:
    if ws.getNumberHistograms()!=nhist or len(ws.readX(0))!=le:
      error_message='%s %d histograms of length %d do not conform to those of experiment'%(ws.getName(),ws.getNumberHistograms(),len(ws.readX(0)))
      ws.getName()+' histograms do not conform to those of experiment'
      g_log.error(error_message)
      raise StandardError(error_message)

  pard['b0'].setValue(1e-10) # needs to be positive
  pard['b1'].setValue(0.)
  ezero=le/2 # assume E=0 in the middle of the histogram span
  pard['c0'].setValue(0.5*wsx.readY(0)[ezero]/wsc.readY(0)[ezero])
  pard['e0.0'].setValue(0.5*wsx.readY(0)[ezero]/wse.readY(0)[ezero])
  trace()
  for ihist in range(1,nhist):
    pard['e0.'+str(ihist)].setValue((wsx.readY(ihist)[ezero] - pard['c0']._value*wsc.readY(ihist)[ezero]) / wse.readY(ihist)[ezero])
  pard['eshift'].setValue(0.)
  template=updateTemplate(template,parl)

  if initparfile: open(initparfile,'w').write(template)
  return template