Example #1
0
	def evaluate(self, env):
		if evaluate(self.condition, env):
			#print self.condition, env, "is true"
			return evaluate(self.if_true, env)
		else:
			#print self.condition, env, "is false"
			return evaluate(self.if_false, env)
Example #2
0
	def evaluate(self, env):
		ev1 = evaluate(self.v1, env)
		ev2 = evaluate(self.v2, env)
		if isinstance(ev1, str) or isinstance(ev2, str): # string comparison needs special-casing
			if set([ev1, ev2]) == set(["", identifier("Null")]):
				return True
		return ev1 == ev2
Example #3
0
	def evaluate(self, env): # shall return a structure
		if not self.fields:
			return evaluate(self.value, env)
		else:
			struct = evaluate(self.struct, env)
			if not isinstance(struct, dict):
				#print self.struct, struct, self.fields, self.value
				raise ForvelkiError("unable to update field %s of non-structure %s" % (self.fields[0], self.struct))
			nd = dict(struct)
			fields = deque(self.fields)
			updated_field = fields.popleft()
			next_struct = nd[updated_field]() if updated_field in nd else {}
			nd[updated_field] = closure(field_update(next_struct, fields, self.value), env)
			return evaluate(closed_structure(nd), env) 
Example #4
0
	def evaluate(self, env):
		function_value = evaluate(self.function_expr, env) # Schauen Sie bitte auf die Methode call in der Klasse function.
		if not isinstance(function_value, tuple):
			raise NotCallable(function_value)
		
		function = function_value[0]
		function_env = function_value[1]
		
		if len(self.act_args) != len(function.args):
			raise WrongNumOfArguments
		
		fenv = dict(function_env)
		if function.name:
			fenv[function.name] = closure(function_value, {})
		fenv.update(zip(function.args, map(lambda a: closure(a, env), self.act_args)))
		#for key in function_env:
		#	fenv[key] = function_env[key]
		
		#print "%s(%s)"%(self.function_expr, ','.join(map(str, evaluate(self.act_args, env))))
		return function.call(fenv)
Example #5
0
	def evaluate(self, env):
		clo_str = evaluate(self.struct, env)
		if isinstance(clo_str, str):
			# special-case: strings acts like lists
			s = clo_str
			if not len(s): 
				raise NoSuchField(self.field_name)
			if self.field_name == "hd":
				return s[0]
			elif self.field_name == "tl":
				return s[1:] or identifier("Null")
			else:
				raise NoSuchField(self.field_name)
		else:
			try:
				this_closure = clo_str[self.field_name]
			except KeyError:
				raise NoSuchField(self.field_name) 
			else:
				return this_closure()
def val(sess, val_vars):
    sess.run(val_vars.it.initializer)

    preds = []
    preds_b = []
    preds_m = []
    preds_s = []
    preds_d = []
    gts = []
    indices = tf.argmax(val_vars.logits, 1)
    feed_dict = [
        val_vars.loss_metric, val_vars.acc_metric, val_vars.num_word_metric,
        indices, val_vars.gts
    ]

    if FLAGS.bmsd:
        unshift = 1 if FLAGS.zero_based else 0
        indices_b = tf.argmax(val_vars.logits_b, 1) + unshift
        indices_m = tf.argmax(val_vars.logits_m, 1) + unshift
        indices_s = tf.argmax(val_vars.logits_s, 1) + unshift
        indices_d = tf.argmax(val_vars.logits_d, 1) + unshift
        feed_dict.append(indices_b)
        feed_dict.append(indices_m)
        feed_dict.append(indices_s)
        feed_dict.append(indices_d)

    try:
        step = 0
        while True:
            if FLAGS.bmsd:
                (loss, _), (acc, _), (
                    num_word,
                    _), pred, gt, pred_b, pred_m, pred_s, pred_d = sess.run(
                        feed_dict)

                preds_b.append(pred_b)
                preds_m.append(pred_m)
                preds_s.append(pred_s)
                preds_d.append(pred_d)
            else:
                (loss, _), (acc, _), (num_word,
                                      _), pred, gt = sess.run(feed_dict)

            preds.append(pred)
            gts.append(gt)

    except tf.errors.OutOfRangeError:
        logger.info('[*] Validation loss: %.3f, acc: %.4f, num_word: %.2f' %
                    (loss, acc, num_word))
        print('[*] Validation loss: %.3f, acc: %.4f, num_word: %.2f' %
              (loss, acc, num_word))

        if FLAGS.bmsd:
            flat_preds = list(itertools.chain(*preds))
            flat_gts = list(itertools.chain(*gts))
            flat_preds_b = list(itertools.chain(*preds_b))
            flat_preds_m = list(itertools.chain(*preds_m))
            flat_preds_s = list(itertools.chain(*preds_s))
            flat_preds_d = list(itertools.chain(*preds_d))

            score, base_scores = evaluate(flat_gts, flat_preds)
            print('\n[!] Label Score: %.4f' % (score))

            bmsd_score, bmsd_scores = evaluate_bmsd(flat_gts, flat_preds_b,
                                                    flat_preds_m, flat_preds_s,
                                                    flat_preds_d)
            print('BMSD Score: %.4f' % (bmsd_score))

            final_score = 0.0
            for r, s1, s2 in zip([1.0, 1.2, 1.3, 1.4], base_scores,
                                 bmsd_scores):
                final_score += r * max(s1, s2)
            final_score /= 4.0

            print('[!] Final Score: %.4f' % (final_score))
        else:
            flat_preds = list(itertools.chain(*preds))
            flat_gts = list(itertools.chain(*gts))
            score, base_scores = evaluate(flat_gts, flat_preds)
            print('\n[!] Label Score: %.4f' % (score))

        return score
Example #7
0
def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore,
             visualize):
    net.eval()

    val_loss = AverageMeter()
    inputs_all, gts_all, predictions_all = [], [], []
    for data in val_loader:
        inputs, gts = data
        N = inputs.size(0)
        inputs = inputs.to(device)
        gts = gts.to(device)

        with torch.no_grad():
            outputs = net(inputs)

        predictions = outputs.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()

        val_loss.update(criterion(outputs, gts).data / N, N)

        if random.random() > train_args['val_img_sample_rate']:
            inputs_all.append(None)
        else:
            inputs_all.append(inputs.squeeze_(0).cpu())
        gts_all.append(gts.squeeze_(0).cpu().numpy())
        predictions_all.append(predictions)

    acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, 21)

    if mean_iu > train_args['best_record']['mean_iu']:
        train_args['best_record']['val_loss'] = val_loss.avg
        train_args['best_record']['epoch'] = epoch
        train_args['best_record']['acc'] = acc
        train_args['best_record']['acc_cls'] = acc_cls
        train_args['best_record']['mean_iu'] = mean_iu
        train_args['best_record']['fwavacc'] = fwavacc

    val_visual = []
    for data in zip(inputs_all, gts_all, predictions_all):
        if data[0] is None:
            continue
        input_pil = restore(data[0])
        gt_pil = colorize_mask(data[1])
        predictions_pil = colorize_mask(data[2])
        val_visual.extend([visualize(input_pil.convert('RGB')),
                           visualize(gt_pil.convert('RGB')),
                           visualize(predictions_pil.convert('RGB'))])
    val_visual = torch.stack(val_visual, 0)
    val_visual = make_grid(val_visual, nrow=3, padding=5)

    print('--------------------------------------------------------------------')
    print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
        epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

    print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
        train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
        train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

    print('--------------------------------------------------------------------')

    net.train()
    return val_loss.avg, val_visual
Example #8
0
def validate(val_loader, net, criterion, optimizer, epoch, train_args, restore, visualize):
    net.eval()

    val_loss = AverageMeter()
    inputs_all, gts_all, predictions_all = [], [], []

    for vi, data in enumerate(val_loader):
        inputs, gts = data
        N = inputs.size(0)
        inputs = Variable(inputs).cuda()
        gts = Variable(gts).cuda()
        
        with torch.no_grad():
            outputs = net(inputs)
        
        predictions = outputs.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()

        val_loss.update(criterion(outputs, gts).data / N, N)

        if random.random() > train_args['val_img_sample_rate']:
            inputs_all.append(None)
        else:
            inputs_all.append(inputs.squeeze_(0).cpu())
        gts_all.append(gts.squeeze_(0).cpu().numpy())
        predictions_all.append(predictions)

    acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, 21)

    if mean_iu > train_args['best_record']['mean_iu']:
        train_args['best_record']['val_loss'] = val_loss.avg
        train_args['best_record']['epoch'] = epoch
        train_args['best_record']['acc'] = acc
        train_args['best_record']['acc_cls'] = acc_cls
        train_args['best_record']['mean_iu'] = mean_iu
        train_args['best_record']['fwavacc'] = fwavacc
        snapshot_name = 'epoch_%d_loss_%.5f_acc_%.5f_acc-cls_%.5f_mean-iu_%.5f_fwavacc_%.5f_lr_%.10f' % (
            epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc, optimizer.param_groups[1]['lr']
        )
        #torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, snapshot_name + '.pth'))
        #torch.save(optimizer.state_dict(), os.path.join(ckpt_path, exp_name, 'opt_' + snapshot_name + '.pth'))

        if train_args['val_save_to_img_file']:
            pass
            #to_save_dir = os.path.join(ckpt_path, exp_name, str(epoch))
            #check_mkdir(to_save_dir)

        val_visual = []
        for idx, data in enumerate(zip(inputs_all, gts_all, predictions_all)):
            if data[0] is None:
                continue
            input_pil = restore(data[0])
            gt_pil = colorize_mask(data[1])
            predictions_pil = colorize_mask(data[2])
            if train_args['val_save_to_img_file']:
                pass
                #input_pil.save(os.path.join(to_save_dir, '%d_input.png' % idx))
                #predictions_pil.save(os.path.join(to_save_dir, '%d_prediction.png' % idx))
                #gt_pil.save(os.path.join(to_save_dir, '%d_gt.png' % idx))
            val_visual.extend([visualize(input_pil.convert('RGB')), visualize(gt_pil.convert('RGB')),
                               visualize(predictions_pil.convert('RGB'))])
        val_visual = torch.stack(val_visual, 0)
        val_visual = vutils.make_grid(val_visual, nrow=3, padding=5)

    print('--------------------------------------------------------------------')
    print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
        epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))

    print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
        train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
        train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))

    print('--------------------------------------------------------------------')

    net.train()
    return val_loss.avg, val_visual
Example #9
0
def validate(val_sets, net, criterion, optimizer, epoch, train_args):
    net.eval()

    val_loss = AverageMeter()
    inputs_all, gts_all, predictions_all = [], [], []

    for val_set in val_sets:

        data = val_set['data']
        datashape = data.shape[1:]

        zeropad_shape = np.ceil(np.divide(datashape, 8)).astype(np.int) * 8
        p = zeropad_shape - datashape  # padding
        p_b = np.ceil(p / 2).astype(np.int)  # padding before image
        p_a = np.floor(p / 2).astype(np.int)  # padding after image

        data_pad = np.pad(data, ((0, 0), (p_b[0], p_a[0]), (p_b[1], p_a[1]),
                                 (p_b[2], p_a[2])),
                          mode='constant',
                          constant_values=((0, 0), (0, 0), (0, 0), (0, 0)))

        inputs = data_pad[:5, :, :, :]  # just use t1 & flair
        inputs = np.expand_dims(inputs, axis=0)

        labels = data_pad[5:6, :, :, :]
        labels[labels != 0] = 1
        labels = np.int64(labels)
        labels = np.eye(2)[labels]
        labels = np.moveaxis(labels, -1, 1)
        labels = np.float32(labels)

        inputs = torch.from_numpy(inputs)
        labels = torch.from_numpy(labels)

        N = inputs.size(0)  # batch-size
        inputs = Variable(inputs).cuda()
        labels = Variable(labels).cuda()

        with torch.no_grad():
            outputs = net(inputs)

        loss = criterion(outputs, labels) / N

        val_loss.update(loss.data, N)

        predictions = outputs.data.max(1)[1].squeeze_(1).squeeze_(
            0).cpu().numpy()
        p_up = predictions.shape - p_a
        predictions = predictions[p_b[0]:p_up[0], p_b[1]:p_up[1],
                                  p_b[2]:p_up[2]]

        gts_all.append(data[5:6, :, :, :].squeeze())
        predictions_all.append(predictions)

    acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, N)

    if mean_iu > train_args['best_record']['mean_iu']:
        train_args['best_record']['val_loss'] = val_loss.avg
        train_args['best_record']['epoch'] = epoch
        train_args['best_record']['acc'] = acc
        train_args['best_record']['acc_cls'] = acc_cls
        train_args['best_record']['mean_iu'] = mean_iu
        train_args['best_record']['fwavacc'] = fwavacc
        snapshot_name = 'epoch_%d_loss_%.5f_mean-iu_%.5f_lr_%.10f' % (
            epoch, val_loss.avg, mean_iu, optimizer.param_groups[0]['lr'])
        torch.save(net.state_dict(), os.path.join(savedir_nets1,
                                                  'bestnet.pth'))
        torch.save(optimizer.state_dict(),
                   os.path.join(savedir_nets1, 'bestnet_opt.pth'))

        torch.save(net.state_dict(),
                   os.path.join(savedir_nets1, snapshot_name + '.pth'))
        torch.save(optimizer.state_dict(),
                   os.path.join(savedir_nets1, snapshot_name + '_opt.pth'))

    print(
        '--------------------------------------------------------------------')
    print(
        '[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]'
        % (epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))
    print(
        'best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]'
        % (train_args['best_record']['val_loss'],
           train_args['best_record']['acc'],
           train_args['best_record']['acc_cls'],
           train_args['best_record']['mean_iu'],
           train_args['best_record']['fwavacc'],
           train_args['best_record']['epoch']))
    print(
        '--------------------------------------------------------------------')

    net.train()
    return
Example #10
0
	def call(self, env):
		for assign in self.assigns:
			env[assign.name] = closure(assign.value, env)
		return evaluate(self.expr, env)
Example #11
0
	def evaluate(self, env):
		return not evaluate(self.value, env)
Example #12
0
	def evaluate(self, env):
		return evaluate(self.v1, env) >= evaluate(self.v2, env)
Example #13
0
def validate(inputs, labels, net, criterion, optimizer, epoch, train_args):
    net = net.eval()

    val_loss = AverageMeter()
    inputs_all, gts_all, predictions_all = [], [], []

    for idx in range(0,inputs.__len__()-1,2):

        input1 = inputs[idx]
        input2 = inputs[idx+1]
        label1 = labels[idx]
        label2 = labels[idx+1]

        input = np.concatenate((input1, input2), axis=0)
        label = np.concatenate((label1, label2), axis=0)

        input_t = torch.from_numpy(input)
        label_t = torch.from_numpy(label)

        N = input_t.size(0)  # batch-size
        input_t = Variable(input_t).cuda()
        label_t = Variable(label_t).cuda()

        with torch.no_grad():
            output = net(input_t)

        loss = criterion(output, label_t)

        val_loss.update(loss.data, N)

        predictions = output.data.max(1)[1].squeeze_(1).squeeze_(0).cpu().numpy()
        label = np.argmax(label, axis=1)

        gts_all.append(label.squeeze())
        predictions_all.append(predictions)

    acc, acc_cls, mean_iu, fwavacc = evaluate(predictions_all, gts_all, 4)

    if mean_iu > train_args['best_record']['mean_iu']:
        train_args['best_record']['val_loss'] = val_loss.avg
        train_args['best_record']['epoch'] = epoch
        train_args['best_record']['acc'] = acc
        train_args['best_record']['acc_cls'] = acc_cls
        train_args['best_record']['mean_iu'] = mean_iu
        train_args['best_record']['fwavacc'] = fwavacc
        snapshot_name = 'epoch_%d_loss_%.5f_mean-iu_%.5f_lr_%.10f' % (
            epoch, val_loss.avg, mean_iu, optimizer.param_groups[0]['lr'])
        torch.save(net.state_dict(), os.path.join(savedir_nets2, snapshot_name + '.pth'))
        torch.save(optimizer.state_dict(), os.path.join(savedir_nets2, snapshot_name + '_opt.pth'))
        torch.save(net.state_dict(), os.path.join(savedir_nets2, 'bestnet.pth'))
        torch.save(optimizer.state_dict(), os.path.join(savedir_nets2, 'bestnet_opt.pth'))

    print('--------------------------------------------------------------------')
    print('[epoch %d], [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f]' % (
        epoch, val_loss.avg, acc, acc_cls, mean_iu, fwavacc))
    print('best record: [val loss %.5f], [acc %.5f], [acc_cls %.5f], [mean_iu %.5f], [fwavacc %.5f], [epoch %d]' % (
        train_args['best_record']['val_loss'], train_args['best_record']['acc'], train_args['best_record']['acc_cls'],
        train_args['best_record']['mean_iu'], train_args['best_record']['fwavacc'], train_args['best_record']['epoch']))
    print('--------------------------------------------------------------------')

    net.train()
    return