Exemplo n.º 1
0
def main():
    optimize(systems)

    # Display all the systems
    print('Autonomous Systems:')
    print()
    print(
        tabulate([[asys, asys.local_preference] for asys in systems],
                 headers=['Autonomous System', 'Local Preference'],
                 tablefmt='pretty'))
    print()

    # Display all the neighbours
    for asys in systems:
        print(f'Neighbours of {asys}:')
        print()
        print(
            tabulate(asys.display_neighbour_info(),
                     headers=['Autonomous System', 'Weight'],
                     tablefmt='pretty'))
        print()

    # Display all the Routing Tables
    for asys in systems:
        print(f'Routing Table for {asys}:')
        print()
        print(
            tabulate(
                asys.display_routing_table(),
                headers=['Autonomous System', 'Weight', 'Hops', 'Next Jump'],
                tablefmt='pretty'))
        print()
Exemplo n.º 2
0
def start_intensively_optimal_construct_Q_from(P, internal=5, rounds=5):
    '''
	construct a init Q for further refinement
	'''

    lengths = q_config[0]["side_lengths"]
    p0 = P[0]
    p1 = P[1]
    q1 = p0 + (p1 - p0) / np.linalg.norm(p1 - p0) * lengths[0]

    Q = np.array([P[0], q1])
    Q = kabsched_Q(Q, P[0:2])

    for i in range(2, P.shape[0]):
        print 'now: ', i
        tmp_p = P[0:i + 1]
        Q = expand_Q_from(Q, tmp_p)
        for _ in range(5):
            Q = expand_Q_from(Q[0:-1], tmp_p)
        if i < 500:
            Q = optimize(Q, tmp_p, 5)
        elif i % internal == 0:
            Q = optimize(Q, tmp_p, rounds)

    print rmsd(Q, P)
    print "lower bound: ", lower_bound(Q, P)

    return Q
Exemplo n.º 3
0
def source(source_file: SourceFile, args: Args) -> List[Instruction]:
    code: List[Instruction] = []
    errors: List[ParseError] = []
    span = source_file.span()
    if args.assertions:
        # An assertion at the start makes the property tests happy
        code.append(StartTapeAssertion(Span(source_file, 0, 0)))
    for sub in _split_on(span, set(['\n'])):
        try:
            code += _line(sub, args)
        except ParseError as err:
            errors.append(err)
    loops = []
    for instr in code:
        if instr.loop_level_change() == 1:
            loops.append(instr)
        elif instr.loop_level_change() == -1:
            if len(loops):
                loops.pop(-1)
            else:
                errors.append(SingleParseError('Unmatched "]"', instr.span()))
        elif instr.loop_level_change() != 0:
            assert False, 'Invalid value ' + str(
                instr.loop_level_change()) + ' for loop level change'
    for instr in loops:
        errors.append(SingleParseError('Unmatched "["', instr.span()))
    if errors:
        raise MultiParseError(errors)
    if args.optimize:
        optimize.optimize(code)
    return code
Exemplo n.º 4
0
def initialize(dictionary, basic_vars, non_basic_vars):
    m, n = np.shape(dictionary)
    has_neg = any(dictionary[:-1, 0] < 0)
    if not has_neg:
        return dictionary, basic_vars, non_basic_vars, False
    init_dict = np.vstack((np.c_[dictionary[:-1], np.ones((m - 1, 1))], np.r_[np.zeros(n), -1]))
    init_non_basic_vars =  np.r_[non_basic_vars, 0]
    init_basic_vars = np.array(basic_vars)
    entering, leaving = (len(init_non_basic_vars),
                         min(range(m - 1), key = lambda i: init_dict[i, 0]))

    pivot.pivot_for(init_dict, init_basic_vars,
                    init_non_basic_vars, entering, leaving)
    optimize.optimize(init_dict, init_basic_vars, init_non_basic_vars)
    return init_dict, init_basic_vars,  init_non_basic_vars, True
Exemplo n.º 5
0
def test_train():
    loss_arr = []

    for preds, losses, i, epoch in optimize(content_targets,
                                            style_target,
                                            content_weight,
                                            style_weight,
                                            tv_weight,
                                            vgg_path,
                                            epochs=1,
                                            print_iterations=1,
                                            batch_size=4,
                                            save_path='ckp_temp/fns.ckpt',
                                            slow=False,
                                            learning_rate=1e-3,
                                            debug=False,
                                            type=0,
                                            save=False):
        style_loss, content_loss, tv_loss, loss = losses
        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        loss_arr.append(loss)
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)

    if len(loss_arr) > 2:
        loss_arr.remove(min(loss_arr))
        loss_arr.remove(max(loss_arr))

    growth = get_growth(loss_arr)
    print('growth: %f' % growth)
    if growth >= 0:
        print('TEST TRAINING FAILED, LOSS IS NOT DECLINING.')
        exit(0)
    elif growth < 0:
        print('TEST TRAINING SUCCESS.')
Exemplo n.º 6
0
def get_clusters(path):
  sizes = get_sizes(os.path.join(path, "sizes.json"))
  mois = get_mois(path, sizes)

  clusters = optimize(mois)

  return clusters
Exemplo n.º 7
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    content_targets = list_files(options.train_path)
    kwargs = {
            "epochs":options.epochs,
            "print_iterations":options.checkpoint_iterations,
            "batch_size":options.batch_size,
            "checkpoint_dir":os.path.join(options.checkpoint_dir,'fns.ckpt'),
            "summary_dir":options.summary_dir,
            "learning_rate":options.learning_rate
            }
    args = [
            content_targets,
            style_target,
            options.content_weight,
            options.style_weight,
            options.tv_weight,
            options.vgg_path
            ]
    start_time = time.time()
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
        print('{0} ---------- Epoch: {1}, Iteration: {2}----------'.format(time.ctime(), epoch, i))
        print('Total loss: {0}, Style loss: {1}, Content loss: {2}, TV loss: {3}'
                .format(loss, style_loss, content_loss, tv_loss))
    print("Training complete! Total training time is {0} s".format(time.time() - start_time))
Exemplo n.º 8
0
def dominate(mu, cov, cost, prices, risk_tolerance):

    # start date for the based portfolio to be determined ... always assign to past 6 months (ie rebalance the period)
    start_date = (datetime.now() -
                  relativedelta(months=6)).strftime("%Y-%m-%d")

    # get the number of days in the backtest period ... to determine target returns and variances later
    days = business_days(start_date, datetime.now().strftime("%Y-%m-%d"))

    # call backtest to get the value of the portfolio
    portfolio_value = back_test(portfolio,
                                start_date,
                                end_date=None,
                                dollars=None)[0].sum(axis=1)

    # calculate portfolio returns
    portfolio_returns = (portfolio_value / portfolio_value.shift(1) -
                         1).dropna()

    # assign the target return and variance
    target_returns = (gmean(portfolio_returns + 1, axis=0) - 1) * days
    target_variance = portfolio_returns.var() * days

    mu_p2 = mu[0] if single_period else mu[1]
    cov_p2 = cov[0] if single_period else cov[1]

    soln, agg_soln = optimize(mu=(mu[0], mu_p2),
                              sigma=(cov[0], cov_p2),
                              alpha=(0.05, 0.10),
                              return_target=(target_returns, target_returns),
                              costs=cost,
                              prices=prices,
                              gamma=risk_tolerance[2])

    return soln, agg_soln
Exemplo n.º 9
0
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
    
    
    w = np.zeros(X_train.shape[0]).reshape(X_train.shape[0],1)
    b = 0

    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate)
    
    w = parameters["w"]
    b = parameters["b"]
    
    Y_prediction_test = predict(w,b,X_test)
    Y_prediction_train = predict(w,b,X_train)

    
    
    print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
    print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

    
    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test, 
         "Y_prediction_train" : Y_prediction_train, 
         "w" : w, 
         "b" : b,
         "learning_rate" : learning_rate,
         "num_iterations": num_iterations}
    
    return d
Exemplo n.º 10
0
    def generate(self, content, init_image, optimizer_iterations, optimizer_checkpoint):
        if not self.content_is_const:
            self.content.set_value(content)
        self.generated_image.set_value(init_image)
    
        # scipy optimize requires that the parameters are of type float64
        x0 = self.generated_image.get_value().astype('float64')
    
        # our record. Start with the style and the content.
        style2 = self.style[:, :, :x0.shape[2], :x0.shape[3]]
        if style2.shape[2] < x0.shape[2]:
            style2 = np.concatenate((style2, np.zeros((x0.shape[0], x0.shape[1], x0.shape[2] - self.style.shape[2], style2.shape[3]))), axis=2)
        if style2.shape[3] < x0.shape[3]:
            style2 = np.concatenate((style2, np.zeros((x0.shape[0], x0.shape[1], style2.shape[2], x0.shape[3] - self.style.shape[3]))), axis=3)
    
        xs = [content, style2, x0]
    
        overall_start_time = time.time()
        last_loss_val = 0.
        for x, i, loss_val, iter_duration in optimize.optimize(x0, self.generated_image, self.f_outputs, num_iterations=optimizer_iterations, checkpoint_iterations=optimizer_checkpoint):
            if self.print_progress:
                print('iteration %d' % (i,))
                print('Current loss value: %f' % (loss_val,))
                print('Iteration %d completed in %fs' % (i, iter_duration))
            xs.append(x)
            last_loss_val = loss_val

        if self.print_progress:
            overall_end_time = time.time()
            print('Optimization completed in %fs' % (overall_end_time - overall_start_time,))
    
        return xs, last_loss_val
Exemplo n.º 11
0
def main():
    """Compiles program.scratch to a scratch project."""
    parser = Lark.open("grammar.lark",
                       parser="lalr",
                       transformer=ScratchTransformer,
                       postlex=ScratchIndenter())

    with open("program.scratch") as source_file:
        source_code = source_file.read()
    parsed = parser.parse(source_code)
    # print(parsed)
    parsed = optimize(parsed)
    parsed = scratchify(parsed)

    backdrop_md5 = md5sum("resources/backdrop.svg")

    for i in parsed["targets"]:
        i["costumes"] = [{
            "assetId": backdrop_md5,
            "name": "backdrop",
            "md5ext": f"{backdrop_md5}.svg",
            "dataFormat": "svg",
            "rotationCenterX": 240,
            "rotationCenterY": 180
        }]

    try:
        with open("parsed.json", "w") as parsed_json_file:
            json.dump(parsed, parsed_json_file, indent="\t")
    except ValueError:
        print(parsed)

    create_project_files(parsed)
Exemplo n.º 12
0
def main():
    opts = get_opts()

    style_target = read_img(opts.style)
    content_targets = glob.glob('%s/*' % opts.train_path)
    style_name = os.path.splitext(os.path.basename(opts.style))[0]

    kwargs = {
        "epochs": opts.epochs,
        "print_iterations": opts.checkpoint_iterations,
        "batch_size": opts.batch_size,
        "save_path": os.path.join(opts.checkpoint_dir, '%s.ckpt' % style_name),
        "learning_rate": opts.learning_rate
    }

    args = [
        content_targets, style_target, opts.content_weight, opts.style_weight,
        opts.tv_weight, opts.net_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)

        print('style: %s, content:%s, tv: %s' % to_print)
        if opts.output:
            preds_path = '%s/%s_%s.png' % (opts.output_dir, epoch, i)

            quickpaint.eval_mul_dims(opts.output, preds_path,
                                     opts.checkpoint_dir)

    cmd_text = 'python quickpaint.py --checkpoint %s ...' % opts.checkpoint_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Exemplo n.º 13
0
def main():
    ### 注意这里的引用关系,实际上是argparse.parse_args()!
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    ###

    ### 这里就是用自己写的get_img去拿出参数里面的style图像!
    style_target = get_img(options.style)
    ###

    ### 处理传进来的参数!
    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate
    }
    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1
        args = [
            content_targets, style_target, options.content_weight,
            options.style_weight, options.tv_weight, options.vgg_path
        ]


###

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
Exemplo n.º 14
0
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          num_iterations=2000,
          learning_rate=0.5,
          print_cost=False):
    w, b = initialize_with_zeros(X_train.shape[0])
    # Gradient descent ( 1 line of code)
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations,
                                        learning_rate, print_cost)
    w = parameters["w"]
    b = parameters["b"]
    # Predict test/train set examples ( 2 lines of code)
    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)
    ### END CODE HERE ###
    # Print train/test Errors
    print("train accuracy: {} %".format(
        100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
    print("test accuracy: {} %".format(
        100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
    d = {
        "costs": costs,
        "Y_prediction_test": Y_prediction_test,
        "Y_prediction_train": Y_prediction_train,
        "w": w,
        "b": b,
        "learning_rate": learning_rate,
        "num_iterations": num_iterations
    }
    return d
	def getTimepointSlicesAt(self, slice):
		"""
		Sets the slice to show
		"""
		self.slice = slice
		# if we're showing each slice of one timepoint
		# instead of one slice of each timepoint, call the
		# appropriate function
		self.slices = []
		if not self.showTimepoints:
			return self.setTimepoint(self.timepoint)
		
		count = self.dataUnit.getNumberOfTimepoints()
		for tp in range(0, count):
			if self.dataUnit.isProcessed():
				image = self.dataUnit.doPreview(self.slice, 1, tp)
				image.Update()
				self.ctf = self.dataUnit.getSourceDataUnits()[0].getColorTransferFunction()
				Logging.info("Using ", image, "for gallery", kw = "preview")
			else:
				image = self.dataUnit.getTimepoint(tp)
				x, y, z = self.dataUnit.getDimensions()
				image = optimize.optimize(image, updateExtent = (0, x - 1, 0, y - 1, self.slice, self.slice))
				self.ctf = self.dataUnit.getColorTransferFunction()

			image = lib.ImageOperations.getSlice(image, self.slice)
			image.Update()
			tp = vtk.vtkImageData()
			tp.DeepCopy(image)
			self.slices.append(tp)
			
		self.calculateBuffer()
		self.updatePreview()
Exemplo n.º 16
0
def simplex(dictionary, basic_vars, non_basic_vars):
    dict_, basic, non_basic = initialize_and_reconstruct(
        dictionary, basic_vars, np.array(non_basic_vars))
    res, _ = optimize(dict_, basic, non_basic)
    if res == UNBOUNDED:
        raise Infeasible()
    return dict_, basic, non_basic
Exemplo n.º 17
0
def main():
    print("Enter main")
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    print("Get image target")
    style_target = get_img(options.style)
    if not options.slow:
        with log_time_usage("get images targets"):
            content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "debug": options.debug,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "tensorboard_dir": options.tensorboard_dir
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    print("Start training")
    with log_time_usage("Training completed in"):
        for preds, losses, i, epoch, time_info in optimize(*args, **kwargs):
            style_loss, content_loss, tv_loss, loss = losses

            print(
                'Epoch %d, Iteration: %d, Loss: %s, AVG batch time: %.2f, total_time: %.2f, ETA (in h): %.2f'
                % (epoch, i, loss, *time_info))
            to_print = (style_loss, content_loss, tv_loss)
            print('style: %s, content:%s, tv: %s' % to_print)
            if options.test:
                assert options.test_dir != False
                preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
                if not options.slow:
                    ckpt_dir = os.path.dirname(options.checkpoint_dir)
                    evaluate.ffwd_to_img(options.test, preds_path,
                                         options.checkpoint_dir)
                else:
                    # TODO: img is not defined
                    # save_img(preds_path, img)
                    pass
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Exemplo n.º 18
0
def main():
    print('ml5.js Style Transfer Training!')
    print('Note: This traning will take a couple of hours.')
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow":options.slow,
        "epochs":options.epochs,
        "print_iterations":options.checkpoint_iterations,
        "batch_size":options.batch_size,
        "save_path":os.path.join(options.checkpoint_dir,'fns.ckpt'),
        "learning_rate":options.learning_rate,
    }
    
    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,
        style_target,
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.vgg_path
    ]

    print('Training is starting!...')
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test,preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
    print('Converting model to ml5js')
    dump_checkpoints(kwargs['save_path'], options.model_dir)
    print('Done! Checkpoint saved. Visit https://ml5js.org/docs/StyleTransfer for more information')
Exemplo n.º 19
0
def main():
    args = docopt.docopt(__doc__)
    data = read_data(args['<in>'])
    score, result = solve(data)
    print(score)
    score2, result2 = optimize(result, data)
    print(score2)
    format_solution(args['<out>'], result2, prefix=str(score))
Exemplo n.º 20
0
def main():
    check_version()
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow":options.slow,
        "epochs":options.epochs,
        "print_iterations":options.checkpoint_iterations,
        "batch_size":options.batch_size,
        "save_path":os.path.join(options.checkpoint_dir,'fns.ckpt'),
        "learning_rate":options.learning_rate,
        "device":options.device,
        "total_iterations":options.total_iterations,
        "base_model_path":options.base_model_path,
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,
        style_target,
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        sys.stdout.flush()
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test,preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint-dir %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Exemplo n.º 21
0
def do_optimization():
    global _optimization_status
    # last opt still running
    if _optimization_status == "running":
        return jsonify({"result": "error",
                        "reason": "last optimization still running"})
    try:
        # lock and do optimization
        _optimization_status = "running"

        kids = request.json
        _sqllog.info("kids: %s", kids)

        orders_by_dt = defaultdict(list)

        conn = pymssql.connect(**sqlinfo)  # connection
        for kid in kids:
            cursor = conn.cursor()
            cursor.execute("""
                SELECT
                    ov.M1 AS M1,
                    ov.M2 AS M2,
                    ov.T1 AS T1,
                    ov.T2 AS T2,
                    ov.S1 AS S1,
                    ov.S2 AS S2,
                    ov.Wire AS Wire,
                    ov.Require_Date as Require_Date,
                    ov.do_time as do_time
                FROM  optimization_view AS ov
                WHERE ov.kid = %s
            """, kid)
            *parts, reqdate, do_time = cursor.fetchall()[0]
            reqdate = reqdate.strftime("%Y-%m-%d")
            orders_by_dt[reqdate].append(Kanban(kid, tuple(parts), do_time))

        cursor = conn.cursor(as_dict=True)
        cursor.execute("""
            SELECT
                bw.wcenter_No AS machineId,
                bw.ParentWcenter_No AS pwc,
                bw.RouteCode AS rc
            FROM MES_BASE_WCENTER AS bw
        """)
        machines = cursor.fetchall()
        conn.close()

        # result = {reqdate: optimize(orders, machines)
        #           for reqdate, orders in orders_by_dt.items()}

        result = [{"date": reqdate, "result": optimize(orders, machines)}
                  for reqdate, orders in orders_by_dt.items()]

        _optimization_status = "idle"

        return jsonify(result)
    finally:
        _optimization_status = "idle"
Exemplo n.º 22
0
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          num_iterations=2000,
          learning_rate=0.5,
          print_cost=False):
    """
    Builds the logistic regression model by calling the function you've implemented previously
    
    Arguments:
    X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
    Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
    X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
    Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
    num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
    learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
    print_cost -- Set to true to print the cost every 100 iterations
    
    Returns:
    d -- dictionary containing information about the model.
    """

    ### START CODE HERE ###
    # initialize parameters with zeros (≈ 1 line of code)
    w, b = initialize_with_zeros(X_train.shape[0])

    # Gradient descent (≈ 1 line of code)
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations,
                                        learning_rate, print_cost)

    # Retrieve parameters w and b from dictionary "parameters"
    w = parameters["w"]
    b = parameters["b"]

    # Predict test/train set examples (≈ 2 lines of code)
    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)

    ### END CODE HERE ###

    # Print train/test Errors
    print("train accuracy: {} %".format(
        100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
    print("test accuracy: {} %".format(
        100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

    d = {
        "costs": costs,
        "Y_prediction_test": Y_prediction_test,
        "Y_prediction_train": Y_prediction_train,
        "w": w,
        "b": b,
        "learning_rate": learning_rate,
        "num_iterations": num_iterations
    }

    return d
Exemplo n.º 23
0
def main(style,
         test=False,
         test_dir='test',
         train_path=TRAIN_PATH,
         slow=False,
         epochs=NUM_EPOCHS,
         checkpoint_iterations=CHECKPOINT_ITERATIONS,
         batch_size=BATCH_SIZE,
         checkpoint_dir=CHECKPOINT_DIR,
         learning_rate=LEARNING_RATE,
         content_weight=CONTENT_WEIGHT,
         style_weight=STYLE_WEIGHT,
         tv_weight=TV_WEIGHT,
         vgg_path=VGG_PATH):
    #parser = build_parser()
    #options = parser.parse_args()
    #check_opts(options)

    style_target = get_img(style)
    if not slow:
        content_targets = _get_files(train_path)
    elif test:
        content_targets = [test]

    kwargs = {
        "slow": slow,
        "epochs": epochs,
        "print_iterations": checkpoint_iterations,
        "batch_size": batch_size,
        "save_path": checkpoint_dir,
        "learning_rate": learning_rate
    }

    if slow:
        if epochs < 10:
            kwargs['epochs'] = 1000
        if learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, content_weight, style_weight, tv_weight,
        vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if test:
            assert test_dir != False
            preds_path = '%s/%s_%s.png' % (test_dir, epoch, i)
            if not slow:
                ckpt_dir = os.path.dirname(checkpoint_dir)
                evaluate.ffwd_to_img(test, preds_path, checkpoint_dir)
            else:
                save_img(preds_path, img)
Exemplo n.º 24
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "device_and_number": options.device_and_number
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    import time
    from datetime import datetime
    start_time = time.time()
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
        delta_time, start_time = time.time() - start_time, time.time()
        print(
            'Current Time = {}; Time Elapsed = {}; Epoch = {}; Iteration = {}; Loss = {}'
            .format(datetime.now().strftime("%Y %B %d, %H:%M:%S"), delta_time,
                    epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('Loss values: style = %s; content = %s; tv = %s' % to_print)
        sys.stdout.flush()
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:  # if uses GPU, uses RAM that it doesn't have, so it's slow here
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Exemplo n.º 25
0
def Newton_hook(x0,f,df,delta,N_max,tol_f,tol_x,alpha,beta,delta_min):
    k = 0
    x = copy(x0)
    converged = 0
    diag = []
    while not(converged):
        if k > N_max or delta < delta_min:
            break
        # compute the Newton-Raphson update step
        r = f(x)
        res = np.linalg.norm(r,2)
        J = df(x)
        # linalg.solve uses LUP decomposition
        dx = -np.linalg.solve(J,r)
        size = np.linalg.norm(dx,2)

        diag.append([k,res,size,delta])
        print('Step %d, delta=%e, res=%e size of NR step=%e ...' % (k,delta,res,size))        

        if size < delta:            
            accept = try_step(x,dx,f,res) # set z <- x+dx, compute f(z), return accept = 1 iff |f(z)|< res
            if accept:
                # the Newton-Raphson step reduced the residual, accept it
                x += dx
                k += 1
                print('Accepted full NR step.')
            else:
                # the Newton-Raphson step increased the residual, reject it and decrease delta
                delta *= alpha
                print('Rejected full NR step, resetting delta=%e.' %(delta))
        else:
            # the Newton-Raphson step lies outside the trust region
            dx = optimize(x,dx,f,df,delta)
            accept = try_step(x,dx,f,res)
            if accept:
                # the update step on the edge of the trust region gives a smaller residual, accept and increase delta
                x += dx
                delta *= beta
                k += 1
                print('Accepted constrained minimization step, resetting delta=%e.' % (delta))
            else:
                # the update step on the edge of the trust region gives a larger residual, reject and reduce delta
                delta *= alpha
                print('Rejected constrained minimization step, resetting delta=%e.' % (delta))

                
        r = f(x)
        res = np.linalg.norm(r,2)
        err = np.linalg.norm(dx,2)
        if res < tol_f and err < tol_x:
            converged = 1

    if converged == 0:
        print('No convergence after %d iterations, delta = %e.' % (k,delta))

    return x,np.asarray(diag)
Exemplo n.º 26
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    if not os.path.exists(options.test_dir):
        os.mkdir(options.test_dir)
    if not os.path.exists(options.checkpoint_dir):
        os.mkdir(options.checkpoint_dir)

    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "gpu_fraction": options.gpu_fraction
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Exemplo n.º 27
0
def generate_samples(save_path, generator, encoder, target, loss_func,
                     n_samples):
    mean_loss = 0
    for i in range(n_samples):
        generated_image, _, loss = optimize(generator, encoder, target,
                                            loss_func)
        generated_image = to_pil_image(generated_image)
        generated_image.save('{}_{}.png'.format(save_path, i))
        mean_loss += loss / n_samples
    with open('{}_metrics.json'.format(save_path), 'w') as f:
        f.write(json.dumps({'mean_final_loss': mean_loss}, indent=2))
Exemplo n.º 28
0
def model(X_train,
          Y_train,
          X_test,
          Y_test,
          num_iterations=2000,
          learning_rate=0.5,
          print_cost=False):
    """
    Builds the logistic regression model by calling the function you've implemented previously

    Arguments:
    X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
    Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
    X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
    Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
    num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
    learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
    print_cost -- Set to true to print the cost every 100 iterations

    Returns:
    d -- dictionary containing information about the model.
    """
    # 用0初始化参数
    w, b = initialize_with_zeros(X_train.shape[0])

    # 梯度下降
    parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations,
                                        learning_rate, print_cost)

    # 从参数表中取出 w 和 b
    w = parameters["w"]
    b = parameters["b"]

    # 预测测试集和训练集中的样本
    Y_prediction_test = predict(w, b, X_test)
    Y_prediction_train = predict(w, b, X_train)

    print("训练集精度:{} %".format(100 -
                              np.mean(np.abs(Y_prediction_train - Y_train)) *
                              100))
    print(
        "测试集精度:{} %".format(100 -
                            np.mean(np.abs(Y_prediction_test - Y_test)) * 100))

    d = {
        "costs": costs,
        "Y_prediction_test": Y_prediction_test,
        "Y_prediction_train": Y_prediction_train,
        "w": w,
        "b": b,
        "learning_rate": learning_rate,
        "num_iterations": num_iterations
    }
    return d
Exemplo n.º 29
0
def opt(reform):
    return optimize(
        input_dict,
        "mean_pct_loss",
        reform,
        verbose=False,
        seed=0,
        # Reforms don't always improve upon one another with the
        # default tolerance of 0.01.
        tol=0.0001,
    )
Exemplo n.º 30
0
def model(train_X, train_Y, test_X, test_Y, num_iteration, learning_rate=0.5):
    m = train_X.shape[1]
    dim = train_X.shape[0]
    W_ini, b_ini = initialize_with_zeros(dim)
    W, b = optimize(W_ini, b_ini, train_X, train_Y, num_iteration,
                    learning_rate)
    train_Y_hat = predict(W, b, train_X)
    test_Y_hat = predict(W, b, test_X)
    train_accuracy = (1 - np.mean(abs(train_Y - train_Y_hat))) * 100
    test_accuracy = (1 - np.mean(abs(test_Y - test_Y_hat))) * 100
    return train_accuracy, test_accuracy
Exemplo n.º 31
0
def check_optimizer(f):
    def check(a, b):
        return f(a) > f(b)

    a = optimize(f)
    for i in range(len(a) - 1):
        if not check(a[i], a[i + 1]):
            raise ValueError("something is wrong")
    if f(a[0]) - f(a[-1]) <= 5:
        raise ValueError("it is a bad optimizer, Mitya luzer")
    print('everything is OK, Mitya the best')
Exemplo n.º 32
0
 def test_optimize_multi_param(self):
     print(colored("optimize test with three params four functions", 'blue'))
     warnings.warn("This test contains nondeterministic code, please check the results manually!!", RuntimeWarning)
     functions = get_all_f(os.path.join(cwd, "results/prism_results/multiparam_synchronous_3.txt"), "prism", True)
     functions = functions[3]
     print(functions)
     d = [0.2, 0.3, 0.4, 0.1]
     print(d)
     result = optimize.optimize(functions, ["p", "q1", "q2"], [[0, 1], [0, 1], [0, 1]], d)
     print("parameter point", result[0])
     print("function values", result[1])
     print("distance", result[2])
Exemplo n.º 33
0
 def test_optimize_two_param(self):
     print(colored("optimize test with two params three functions", 'blue'))
     warnings.warn("This test contains nondeterministic code, please check the results manually!!", RuntimeWarning)
     functions = get_all_f(os.path.join(cwd, "results/prism_results/asynchronous_2.txt"), "prism", True)
     functions = functions[2]
     print("functions", functions)
     d = pickle_load(os.path.join(data_dir, "data.p"))
     print("data_point", d)
     result = optimize.optimize(functions, ["p", "q"], [[0, 1], [0, 1]], d)
     print("parameter point", result[0])
     print("function values", result[1])
     print("distance", result[2])
Exemplo n.º 34
0
def aux_optim(algorithm, run_id=0, func_id=5, dim=2, pop_size=30, max_f_evals='auto', target_error=10e-8):
    '''
        Auxiliary function for multiprocessing.
    '''
    np.random.seed()

    print("Run ID: ", run_id)
    errorHist, fitnessHist = optimize(algorithm, func_id=func_id, dim=dim, pop_size=pop_size, max_f_evals=max_f_evals,
                              target_error=target_error, verbose=True)

    errorHist["Run"] = np.ones(errorHist.shape[0], dtype=int)*run_id
    return errorHist, fitnessHist
	def updateRendering(self):
		"""
		Update the Rendering of this module
		"""
		data = self.getInput(1)
		x,y,z = self.dataUnit.getDimensions()
		data = optimize.optimize(image = data, updateExtent = (0, x-1, 0, y-1, 0, z-1))
		if data.GetNumberOfScalarComponents() > 3:
			extract = vtk.vtkImageExtractComponents()
			extract.SetInput(data)
			extract.SetComponents(1, 1, 1)
			data = extract.GetOutput()
		if data.GetNumberOfScalarComponents() > 1:
			self.luminance.SetInput(data)
			data = self.luminance.GetOutput()
		
		z = self.parameters["Slice"]
		ext = (0, x - 1, 0, y - 1, z, z)

		voi = vtk.vtkExtractVOI()
		voi.SetVOI(ext)
		voi.SetInput(data)
		slice = voi.GetOutput()
		self.geometry.SetInput(slice)         
		
		self.warp.SetInput(self.geometry.GetOutput())
		self.warp.SetScaleFactor(self.parameters["Scale"])		
		self.merge.SetGeometry(self.warp.GetOutput())
		
		if slice.GetNumberOfScalarComponents() == 1:
			maptocol = vtk.vtkImageMapToColors()
			ctf = self.getInputDataUnit(1).getColorTransferFunction()
			maptocol.SetInput(slice)
			maptocol.SetLookupTable(ctf)
			maptocol.Update()
			scalars = maptocol.GetOutput()
		else:
			scalars = slice
			
		self.merge.SetScalars(scalars)
		data = self.merge.GetOutput()
		
		if self.parameters["Normals"]:
			self.normals.SetInput(data)
			self.normals.SetFeatureAngle(self.parameters["FeatureAngle"])
			print "Feature angle=", self.parameters["FeatureAngle"]            
			data = self.normals.GetOutput()
		
		self.mapper.SetInput(data)
		self.mapper.Update()
		VisualizationModule.updateRendering(self)
		self.parent.Render()
Exemplo n.º 36
0
    def save(self):
        """
        Apply a series of actions from a set of (action, arg) tuples, probably
        as parsed from a URL. Each action is a code into PROCESSORS.

        Then save the mogrified image.
        """
        from settings import PROCESSORS
        from .filesystem import makedirs

        if self.im is None:
            # If we got here something very strange is going on that I can't even
            # predict.
            return  # pragma: no cover
        makedirs(self.output_path)
        for action, arg in self.actions:
            action = PROCESSORS[action]
            if self.frames:
                new_frames = []
                for frame in self.frames:
                    new_frames.append(action.process(frame, arg))
                self.frames = new_frames
            else:
                self.im = action.process(self.im, arg)

        self.im = optimize.optimize(self.im, fmt=self.format, quality=self.quality)

        kwargs = {
            'format': self.format,
            'optimize': True,
            'quality': self.quality,
        }
        if self.format == 'jpeg':
            kwargs['progressive'] = True

        if self.filename.startswith('s3://'):
            import cStringIO
            from filesystem import s3
            output = cStringIO.StringIO()
            if self.frames:
                images2gif.write_gif(output, self.frames)
            else:
                self.im.save(output, **kwargs)
            output.reset()
            s3.put_file(output, self.filename)
        else:
            if self.frames:
                images2gif.write_gif(self.filename, self.frames)
            else:
                self.im.save(self.filename, **kwargs)
	def setTimepoint(self, timepoint, update = 1):
		"""
		Sets the timepoint to display
		"""
		if self.timepoint == timepoint and self.slices:
			return
		
		self.timepoint = timepoint
		if not scripting.renderingEnabled:
			return
		# if we're showing one slice of each timepointh
		# instead of each slice of one timepoint, call the
		# appropriate function
		if self.showTimepoints:
			return self.getTimepointSlicesAt(self.slice)

		if self.visualizer.getProcessedMode():
			image = self.dataUnit.doPreview(scripting.WHOLE_DATASET_NO_ALPHA, 1, self.timepoint)
			self.ctf = self.dataUnit.getSourceDataUnits()[0].getColorTransferFunction()
#			Logging.info("Using ", image, "for gallery", kw = "preview")
		else:
			image = self.dataUnit.getTimepoint(timepoint)
			self.ctf = self.dataUnit.getColorTransferFunction()

		#self.imagedata = lib.ImageOperations.imageDataTo3Component(image,ctf)
		self.imagedata = image
		self.imagedata.SetUpdateExtent(self.imagedata.GetWholeExtent())
		self.imagedata.Update()
		
		x, y, z = self.dataUnit.getDimensions()
		
		self.slices = []

		for i in range(z):
			image = optimize.optimize(image = self.imagedata, updateExtent = (0, x - 1, 0, y - 1, i, i))
			image = lib.ImageOperations.getSlice(image, i)
			
			lib.messenger.send(None, "update_progress", i / float(z), "Loading slice %d / %d for Gallery view" % (i + 1, z + 1))
			self.slices.append(image)
		
		lib.messenger.send(None, "update_progress", 1.0, "All slices loaded.")  
		self.calculateBuffer()
		if update:
			print "Updating preview"
			self.updatePreview()
			self.Refresh()
	def updateRendering(self, input = None):
		"""
		Update the Rendering of this module
		"""
		self.updateMethod()
		self.updateQuality()
		self.updateInterpolation()
		self.setShading(self.parameters["UseShading"])
		
		if not input:
			input = self.getInput(1)
		x, y, z = self.dataUnit.getDimensions()

		input = optimize.optimize(image = input, updateExtent = (0, x - 1, 0, y - 1, 0, z - 1))
		
		ncomps = input.GetNumberOfScalarComponents()
		Logging.info("Number of comps=", ncomps, kw = "rendering")
		dataType = input.GetScalarType()
		if (ncomps > 1 or dataType not in [3, 5]) and self.parameters["Method"] == TEXTURE_MAPPING:
			self.setParameter("Method", 0)
			lib.messenger.send(None, "update_module_settings")
		if ncomps > 1:
			self.volumeProperty.IndependentComponentsOff()
		else:
			self.volumeProperty.IndependentComponentsOn()
			
		Logging.info("Rendering using, ", self.mapper.__class__, kw = "rendering")
		self.mapper.SetInput(input)
		if self.mapperUpdated:
			self.volume.SetMapper(self.mapper)
			self.mapperUpdated = False
		if not self.volumeAdded:
			self.parent.getRenderer().AddVolume(self.volume)
			self.volumeAdded = True
			
		VisualizationModule.updateRendering(self, input)
		self.parent.Render()
		if self.parameters["Method"] == TEXTURE_MAPPING_3D:
			if not self.mapper.IsRenderSupported(self.volumeProperty, self.renderer):
				lib.messenger.send(None, \
								"show_error", \
								"3D texture mapping not supported", \
								"Your graphics hardware does not support 3D accelerated texture mapping. \
								Please use one of the other volume rendering methods.")
	def execute(self, inputs, update = 0, last = 0):
		"""
		Execute filter in input image and return output image
		"""
		if not lib.ProcessingFilter.ProcessingFilter.execute(self,inputs):
			return None

		self.eventDesc = "Converting image data to polygonal data"
		inputImage = self.getInput(1)
		inputImage.Update()
		self.scalarRange = inputImage.GetScalarRange()
		lib.messenger.send(self, "update_IsoValue")
		
		x, y, z = self.dataUnit.getDimensions()
		input = optimize.optimize(image = inputImage, updateExtent = (0, x - 1, 0, y - 1, 0, z - 1))
		self.contour.SetInput(input)
		
		self.contour.SetValue(0, self.parameters["IsoValue"])

		polyOutput = self.contour.GetOutput()

		#TODO: should decimateLevel and preserveTopology be instance variables?
		decimateLevel = self.parameters["Simplify"] 
		preserveTopology = self.parameters["PreserveTopology"] 
		if decimateLevel != 0:            
			self.decimate.SetPreserveTopology(preserveTopology)
			if not preserveTopology:
				self.decimate.SplittingOn()
				self.decimate.BoundaryVertexDeletionOn()
			else:
				self.decimate.SplittingOff()
				self.decimate.BoundaryVertexDeletionOff()
			self.decimate.SetTargetReduction(decimateLevel / 100.0)
			
			Logging.info("Decimating %.2f%%, preserve topology: %s" \
						% (decimateLevel, preserveTopology), kw = "visualizer")
			self.decimate.SetInput(polyOutput)
			polyOutput = self.decimate.GetOutput()
		
		polyOutput.Update()
		self.setPolyDataOutput(polyOutput)
		return inputImage
Exemplo n.º 40
0
def eval(type, index=30):
    """Executes the experiment
    
    Parameters
    ----------
    type : a string representing the type of covariance matrix to optimize against, either 'sample' or 'shrunk'
    index : benchmark index size to use
    
    Returns
    -------
    dictionary : returns a dictionary with sample statistics for the information ratio, mean excess return,
        standard deviation of excess returns, and tracking error
    """
    # get the portfolio parameters
    port_params = params.get_portfolio_params(index=index)

    # instantiate the porfolio object
    port = portfolio.Portfolio(port_params, proxy={"http": "http://proxy.jpmchase.net:8443"})

    # setup the periodicity
    roll = 60
    rollperiod = relativedelta.relativedelta(months=roll)
    outsample = 60
    outsampleperiod = relativedelta.relativedelta(months=outsample)

    # setup 
    dates = port.get_trading_dates()
    start = dates[0] + rollperiod
    end = dates[-1]

    delta = relativedelta.relativedelta(end, start)
    periods = (delta.years * 12) + delta.months

    portvalue = port.get_portfolio_historic_position_values()
    
    # constant benchmark weights
    #returns = port.get_portfolio_historic_returns()
    active = port.get_active_returns()
    bench_returns = port.get_benchmark_returns()
    bench_weights = port.get_benchmark_weights()

    expected_excess_returns = port.get_expected_excess_stock_returns()

    e = []; te = [];

    for i in xrange(roll, periods+roll+1):
        
        # setup the dates to calculate returns for the covariance matrixes
        start = dates[i-roll]
        end = dates[i]

        active_returns = active.ix[start:end]
        
        # compute the sample covariance matrix, cov of active returns
        cov = port.get_covariance_matrix(active_returns)
        
        # actual realized returns
        y = ((portvalue.ix[end:end].as_matrix() / portvalue.ix[dates[i-1]:dates[i-1]].as_matrix()) - 1)[0]
        
        # alphas
        # apparently, cvxopt.matrix requires the input ndarray to be F_CONTIGUOUS which i discovered reading the C source code
        # F_CONTIGUOUS is found in ndarray.flags and is a boolean which ensure a Fortran-contiguous array
        # np.require forces that to be the case; this took me a really long time to figure out
        a0 = np.require(expected_excess_returns.ix[end:end].transpose().as_matrix(), dtype=np.float64, requirements=['F'])
        a = matrix(a0)
        
        if type == 'sample':
            S = matrix(cov.as_matrix())
        elif type == 'shrunk':
            # compute the shrunk covariance matrix, sigma
            sigma, shrinkage = port.get_shrunk_covariance_matrix(cov)
            S = matrix(sigma.as_matrix())
        else:
            raise ValueError('Type must be either of the two strings: sample or shrunk')
        
        # get the optimized weights
        # this is horribly naive because i'm only including the constaints provided in the example
        # i spent a considerable amount of time looking at the documentation, forums, and source
        # code trying to become comfortable with the package to no avail
        x = op.optimize(a, S)
        
        # optimized expected active portfolio returns
        e_ = (x.T * y).sum()
        e.append(e_)
        
        # weighted benchmark returns
        b = (bench_returns.ix[end:end] * bench_weights.ix[end:end]).sum()
        
        # tracking error
        te.append(e_ - b)

    return {
        'information_ratio': port.information_ratio(np.array([e])),
        'mean_excess_return': np.array([e]).mean(),
        'stdev_excess_return': np.array([e]).std(),
        'tracking_error': np.array([te]).std()
    }
    def updateRendering(self):
        """
		Update the Rendering of this module
		"""
        self.mapper.AddObserver("ProgressEvent", lib.messenger.send)
        lib.messenger.connect(self.mapper, "ProgressEvent", self.updateProgress)
        dataUnit = self.getInputDataUnit(1)
        inputDataUnit2 = self.getInputDataUnit(2)

        settings = inputDataUnit2.getSettings()
        filterList = settings.get("FilterList")

        if not dataUnit:
            dataUnit = self.dataUnit
        self.mapper.SetLookupTable(dataUnit.getColorTransferFunction())
        self.mapper.ScalarVisibilityOn()

        min, max = self.data.GetScalarRange()

        # if (min,max) != self.scalarRange:
        self.setScalarRange(min, max)

        dataUnit = self.getInputDataUnit(1)
        self.mapper.ColorByArrayComponent(0, 0)
        self.mapper.SetScalarRange(min, max)
        self.mapper.SetColorModeToMapScalars()
        self.mapper.SetLookupTable(dataUnit.getColorTransferFunction())
        self.mapper.ScalarVisibilityOn()

        self.updateOpacity()
        # self.actor2.GetProperty().SetOpacity(opacity)

        polyinput = self.getPolyDataInput(1)
        if polyinput:
            Logging.info("Using polydata input", kw="visualizer")
            VisualizationModule.updateRendering(self, polyinput)
        else:
            input = self.getInput(1)

            x, y, z = self.dataUnit.getDimensions()
            input = optimize.optimize(image=input, updateExtent=(0, x - 1, 0, y - 1, 0, z - 1))
            self.contour.SetInput(input)
            polyinput = self.contour.GetOutput()

        input2 = self.getInput(2)
        minval, maxval = input2.GetScalarRange()

        input2.Update()
        self.contour2.SetInput(input2)
        print "Generating", maxval - 1, "values in range", 1, maxval
        self.contour2.GenerateValues(maxval - 1, 1, maxval)
        n = self.contour2.GetNumberOfContours()
        for i in range(0, n):
            self.contour2.SetValue(i, int(self.contour2.GetValue(i)))

        self.mapper2.ColorByArrayComponent(0, 0)
        self.mapper2.SetScalarRange(min, max)

        if not self.parameters["MarkColor"]:
            self.mapper2.SetLookupTable(inputDataUnit2.getColorTransferFunction())
        else:
            self.setLookupTableBasedOnDistance(self.parameters["Distance"])

        print "\n\n\n*** INPUT DATAUNIT2=", inputDataUnit2
        self.mapper2.SetColorModeToMapScalars()
        self.mapper2.ScalarVisibilityOn()

        polyinput2 = self.contour2.GetOutput()

        decimateLevel = self.parameters["Simplify"]
        preserveTopology = self.parameters["PreserveTopology"]
        if decimateLevel != 0:
            self.decimate.SetPreserveTopology(preserveTopology)
            if not preserveTopology:
                self.decimate.SplittingOn()
                self.decimate.BoundaryVertexDeletionOn()
            else:
                self.decimate.SplittingOff()
                self.decimate.BoundaryVertexDeletionOff()
            self.decimate.SetTargetReduction(decimateLevel / 100.0)

            Logging.info(
                "Decimating %.2f%%, preserve topology: %s" % (decimateLevel, preserveTopology), kw="visualizer"
            )
            self.decimate.SetInput(polyinput)
            polyinput = self.decimate.GetOutput()

        if self.parameters["Normals"]:
            angle = self.parameters["FeatureAngle"]
            Logging.info("Generating normals at angle", angle, kw="visualizer")
            self.normals.SetFeatureAngle(angle)
            self.normals.SetInput(polyinput)
            polyinput = self.normals.GetOutput()

        self.mapper.SetInput(polyinput)
        self.mapper2.SetInput(polyinput2)
        self.init = True
        VisualizationModule.updateRendering(self, polyinput)
        self.parent.Render()
Exemplo n.º 42
0
#!/usr/bin/env python3

from optimize import optimize, Kanban
from pprint import pprint

optimal = optimize(
    orders=[
        Kanban('A01', (1, 2, 3, 4, 5, 6, 7), 7),
        Kanban('A02', (1, 2, 4, 4, 5, 6, 7), 7),
        Kanban('A03', (1, 2, 4, 4, 3, 6, 7), 7),
        Kanban('A04', (1, 2, 4, 4, 3, 8, 7), 7),
        Kanban('A05', (1, 2, 4, 6, 3, 8, 7), 7),
        Kanban('A06', (1, 2, 4, 6, 3, 8, 9), 7),
        Kanban('A07', (1, 2, 4, 6, 3, 5, 9), 7),
        Kanban('A08', (1, 2, 4, 8, 3, 5, 9), 7),
        Kanban('A09', (1, 2, 4, 8, 9, 5, 9), 7),
        Kanban('A10', (1, 2, 4, 8, 9, 5, 3), 7),
    ],
    machines=[
        {'machineId': 'a1'},
        {'machineId': 'a2'},
        {'machineId': 'a3'},
    ]
)

pprint(optimal)
Exemplo n.º 43
0
#!/usr/bin/python
from parse_yacc import parse
from optimize import optimize
from peep import createAssemblyCode
import sys

if __name__ == '__main__':

  if len(sys.argv) < 2:
    print 'Run: python %s benchmarks/assembly.s output.s' % sys.argv[0]
    exit(1)    
  # Parsing and optimization
  parsed = parse(sys.argv[1])
  new = optimize(parsed, var=1)

  if len(sys.argv) > 2:
    # Save output assembly
    file_func = open(sys.argv[2], 'w+')
    file_func.write(createAssemblyCode(new))
    file_func.close()
Exemplo n.º 44
0
def simplex(dictionary, basic_vars, non_basic_vars):
    dict_, basic, non_basic = initialize_and_reconstruct(dictionary, basic_vars, np.array(non_basic_vars))
    res, _ = optimize(dict_, basic, non_basic)
    if res == UNBOUNDED:
        raise Infeasible()
    return dict_, basic, non_basic
		# There's a bug where no icon (right menu bar) is selected and we still try to process
		# the variable named colorImage, which in this turn of events is None.
		if colorImage == None:
			Logging.info("Nothing to preview (no channel, icon, data) selected.")
			return

		usedUpdateExt = 0
		uext = None
		if self.z != -1:
			x, y = self.dataDimX, self.dataDimY
			usedUpdateExt = 1
			uext = (0, x - 1, 0, y - 1, self.z, self.z)

		t = time.time()
		colorImage.ReleaseDataFlagOn()
		colorImage = optimize.optimize(image = colorImage, updateExtent = uext)

		t2 = time.time()
		Logging.info("Executing pipeline took %f seconds" % (t2 - t), kw = "pipeline")
		self.currentImage = colorImage

		if colorImage:
			x, y, z = colorImage.GetDimensions()
						
			if x != self.oldx or y != self.oldy:
				self.oldx = x
				self.oldy = y
			self.setImage(colorImage)
			self.setZSlice(self.z)
			z = self.z
		
Exemplo n.º 46
0
import sys

from common import read_input, print_solution
from optimize import optimize
from opt_2 import opt_2
from greedy import greedy
from solver_greedy import solve
from or_opt import or_opt
from vortex import vortex
from kruskal import kruskal_greedy
from my_random import random_solve
from combine import optimize2, read_path

if __name__ == '__main__':
    assert len(sys.argv) > 1
    optimize = optimize(read_input(sys.argv[1]))
    optimize2 = optimize2(read_input(sys.argv[1]), read_path(sys.argv[2]))
    greedy = greedy(read_input(sys.argv[1]))
    opt_2 = opt_2(read_input(sys.argv[1]))
    or_opt = or_opt(read_input(sys.argv[1]))
    vortex = vortex(read_input(sys.argv[1]))
    kruskal = kruskal_greedy(read_input(sys.argv[1]))
   
    print_solution(or_opt)
    print_solution(or_opt)
    print_solution(vortex)
    print_solution(kruskal)
    print_solution(greedy)
    print_solution(greedy)
    print_solution(optimize)
    print_solution(optimize)
	def doOperation(self, preview=0):
		"""
		Manipulationes the dataset in specified ways
		"""
		filterlist = self.settings.get("FilterList")
		if filterlist:
			modified = filterlist.getModified()
		else:
			modified = 0
		
		if preview and not modified and self.cached and self.timepoint == self.cachedTimepoint:
			Logging.info("--> Returning cached data, timepoint=%d, cached timepoint=%d" % 
				(self.timepoint, self.cachedTimepoint), kw = "pipeline")
			return self.cached
		else:
			del self.cached
			self.cached = None
	
		Logging.info("Creating preview, filters = %s"%str(filterlist), kw="pipeline")

		data = self.images
		if not filterlist or type(filterlist) == types.ListType or filterlist.getCount() == 0:
			Logging.info("No filters, returning original dat", kw="pipeline")
			return self.images[0]
		
		try:
			enabledFilters = filterlist.getEnabledFilters() 
		except AttributeError:
			enabledFilters = []
		highestFilterIndex = len(enabledFilters)-1
		
		lastfilter = None
		wantWhole = False
		polydata = None
		x = 1.0/(1+len(enabledFilters))

		# A very specific cache is needed, to prevent the "result" channel
		# being re-processed when being merged to the "original" channel(s).
		key = (self.settings.dataunit, self.timepoint)
		if key in self.cacheDataUnits and self.cacheDataUnitsEnabled:
			Logging.info("Returning cached dataunit", kw="pipeline")
			return self.cacheDataUnits[key]
		
		for i, currfilter in enumerate(enabledFilters):
			if currfilter.requireWholeDataset:
				wantWhole = True
			self.currentExecutingFilter = currfilter
			self.shift = x*(i+1)
			self.scale = x
			self.eventDesc = "Performing %s"%currfilter.name
			currfilter.setExecutive(self)
			if polydata:
				currfilter.setPolyDataInput(polydata)
			flag = (i == highestFilterIndex)
			if i > 0:
				currfilter.setPrevFilter(enabledFilters[i-1])
			else:
				currfilter.setPrevFilter(None)
			if not flag:
				currfilter.setNextFilter(enabledFilters[i+1])
			else:
				currfilter.setNextFilter(None)
			Logging.info("Executing %s"%currfilter.name,kw="pipeline")

			data = currfilter.execute(data, update=0, last=flag)
			polydata = currfilter.getPolyDataOutput()

			if not flag:
				nextfilter = enabledFilters[i+1]
				if not currfilter.itkFlag and nextfilter.itkFlag:
					Logging.info("Executing VTK side before switching to ITK", kw="pipeline")
					data = optimize.optimize(image = data, releaseData = 1)
					data.Update()
				elif currfilter.itkFlag and not nextfilter.itkFlag:
					Logging.info("Converting from ITK side back to VTK", kw="pipeline")
					data = currfilter.convertITKtoVTK(data)
			
			lastfilter = currfilter
			
			if not preview:
				currfilter.writeOutput(self.controlUnit, self.timepoint)
			data = [data]
			if not data:
				self.currentExecutingFilter = None
				self.cached = None
				return None

		self.currentExecutingFilter = None
		
		if wantWhole:
			scripting.wantWholeDataset = wantWhole
		Logging.info("Pipeline done",kw="pipeline")
		data = data[0]
		if data.__class__ != vtk.vtkImageData and type(data) != types.TupleType:
			data = lastfilter.convertITKtoVTK(data)

		filterlist.setModified(0)
		self.setPolyDataOutput(polydata)
		
		if self.cacheDataUnitsEnabled:
			print data.GetUpdateExtent()
			x0, x1, y0, y1, z0, z1 = data.GetUpdateExtent()
			if z1 >= 0:
				copy = vtk.vtkImageData()
				copy.DeepCopy(data)
				copy.Update()
				key = (self.settings.dataunit, self.timepoint)
				self.cacheDataUnits[key] = copy
				Logging.info("Caching dataunit", kw="pipeline")

		return data
    def updateRendering(self):
        """
		Update the Rendering of this module
		"""
        method = self.parameters["Method"]
        self.setMethod(method)

        if self.volumeModule:
            self.volumeModule.function = vtk.vtkVolumeRayCastIsosurfaceFunction()
            self.volumeModule.function.SetIsoValue(self.parameters["IsoValue"])
            self.volumeModule.showTimepoint(self.timepoint)
            return

        if not self.init:
            self.init = 1
            self.mapper.ColorByArrayComponent(0, 0)
        self.mapper.AddObserver("ProgressEvent", lib.messenger.send)
        lib.messenger.connect(self.mapper, "ProgressEvent", self.updateProgress)
        dataUnit = self.getInputDataUnit(1)
        if not dataUnit:
            dataUnit = self.dataUnit
        dataCtf = dataUnit.getColorTransferFunction()
        if self.parameters["SolidColor"]:
            minval, maxval = dataCtf.GetRange()
            ctf = vtk.vtkColorTransferFunction()
            ctf.AddRGBPoint(int(minval), 0, 0, 0)
            r, g, b = dataCtf.GetColor(maxval)
            ctf.AddRGBPoint(int(minval) + 1, r, g, b)
            ctf.AddRGBPoint(maxval, r, g, b)
        else:
            ctf = dataCtf
        self.mapper.SetLookupTable(ctf)
        self.mapper.ScalarVisibilityOn()

        minVal, maxVal = self.data.GetScalarRange()
        self.setScalarRange(minVal, maxVal)

        self.mapper.SetScalarRange(minVal, maxVal)
        self.mapper.SetColorModeToMapScalars()

        opacity = self.parameters["Transparency"]
        opacity = (100 - opacity) / 100.0
        Logging.info("Using opacity ", opacity, kw="visualizer")
        if opacity != 1:
            cullers = self.parent.getRenderer().GetCullers()
            cullers.InitTraversal()
            culler = cullers.GetNextItem()
            culler.SetSortingStyleToBackToFront()
            # print cullers, culler
            # self.parent.getRenderer().GetRenderWindow().SetAlphaBitPlanes(1)
            # self.parent.getRenderer().GetRenderWindow().SetMultiSamples(0)
            # self.parent.getRenderer().SetUseDepthPeeling(1)
            # self.parent.getRenderer().SetMaximumNumberOfPeels(100)
            # self.parent.getRenderer().SetOcclusionRatio(1.0)
            # print self.parent.getRenderer().GetLastRenderingUsedDepthPeeling()

        self.actor.GetProperty().SetOpacity(opacity)

        polyinput = self.getPolyDataInput(1)
        if polyinput:
            Logging.info("Using polydata input", kw="visualizer")
            self.mapper.SetInput(polyinput)
            VisualizationModule.updateRendering(self, polyinput)
            self.parent.Render()
            return

        x, y, z = self.dataUnit.getDimensions()
        input = self.getInput(1)
        input = optimize.optimize(image=input, updateExtent=(0, x - 1, 0, y - 1, 0, z - 1))

        if self.parameters["Gaussian"]:
            Logging.info("Doing gaussian smoothing", kw="visualizer")
            if not self.smooth:
                self.smooth = vtk.vtkImageGaussianSmooth()
            self.smooth.SetInput(input)
            input = self.smooth.GetOutput()

        self.contour.SetInput(input)
        input = self.contour.GetOutput()

        multi = self.parameters["MultipleSurfaces"]

        if not multi:
            Logging.info("Using single isovalue=%d" % int(self.parameters["IsoValue"]), kw="visualizer")
            self.contour.SetValue(0, self.parameters["IsoValue"])
        else:
            begin = self.parameters["SurfaceRangeBegin"]
            end = self.parameters["SurfaceRangeEnd"]
            n = self.parameters["SurfaceAmnt"]
            Logging.info("Generating %d values in range %d-%d" % (n, begin, end), kw="visualizer")
            self.contour.GenerateValues(n, begin, end)
            n = self.contour.GetNumberOfContours()
            for i in range(0, n):
                self.contour.SetValue(i, int(self.contour.GetValue(i)))
                # print self.contour

                # TODO: should decimateLevel and preserveTopology be instance variables?
        decimateLevel = self.parameters["Simplify"]
        preserveTopology = self.parameters["PreserveTopology"]
        if decimateLevel != 0:
            self.decimate.SetPreserveTopology(preserveTopology)
            if not preserveTopology:
                self.decimate.SplittingOn()
                self.decimate.BoundaryVertexDeletionOn()
            else:
                self.decimate.SplittingOff()
                self.decimate.BoundaryVertexDeletionOff()
            self.decimate.SetTargetReduction(decimateLevel / 100.0)

            Logging.info(
                "Decimating %.2f%%, preserve topology: %s" % (decimateLevel, preserveTopology), kw="visualizer"
            )
            self.decimate.SetInput(input)
            input = self.decimate.GetOutput()

        if self.parameters["Normals"]:
            angle = self.parameters["FeatureAngle"]
            Logging.info("Generating normals at angle", angle, kw="visualizer")
            self.normals.SetFeatureAngle(angle)
            self.normals.SetInput(input)
            input = self.normals.GetOutput()

        self.mapper.SetInput(input)
        VisualizationModule.updateRendering(self, input)
        self.parent.Render()
Exemplo n.º 49
0
def main_program():
  #
  # convert file to pic format
  #

  if not os.path.exists(Gerber_name.get()):
    get_input_filename()
  if not os.path.exists(Gerber_name.get()):
    tkMessageBox.showerror("G2G_GUI ERROR", "The path provided for the input Gerber file is invalid.")
    return

  head, tail = os.path.split(Gerber_name.get())

  if os.name=='nt':
    temp_pdf = os.path.normpath("%s\_tmp_gerber.pdf" % (head))
    temp_pic = os.path.normpath("%s\_tmp_gerber.pic" % (head))
    temp_bat = os.path.normpath("%s\_tmp_gerber.bat" % (head))
  else:
    temp_pdf = "_tmp_gerber.pdf"
    temp_pic = "_tmp_gerber.pic"

  if os.name=='nt':
    if not os.path.exists(gerbv_path.get()):
      tkMessageBox.showerror("G2G_GUI ERROR", "The path provided for gerbv is invalid.")
      return

    if not os.path.exists(pstoedit_path.get()):
      tkMessageBox.showerror("G2G_GUI ERROR", "The path provided for pstoedit is invalid.")
      return
  
  if os.name=='nt':
    os.system("echo \"%s\" --export=pdf --output=%s --border=0 \"%s\" > \"%s\"" % (os.path.normpath(gerbv_path.get()),temp_pdf,os.path.normpath(Gerber_name.get()),temp_bat))
    os.system("echo \"%s\" -q -f pic \"%s\" \"%s\" >> \"%s\"" % (os.path.normpath(pstoedit_path.get()),temp_pdf,temp_pic, temp_bat))
    os.system("\"%s\"" % temp_bat)
  else:
    os.system("%s --export=pdf --output=%s --border=0 \"%s\"" % (os.path.normpath(gerbv_path.get()),temp_pdf,os.path.normpath(Gerber_name.get())))
    os.system("%s -q -f pic \"%s\" \"%s\"" % (os.path.normpath(pstoedit_path.get()),temp_pdf,temp_pic))

  original_stdout = sys.stdout  # keep a reference to STDOUT

  if Output_name.get():
    sys.stdout = open(Output_name.get(), 'w')

  if not offset_str.get():
    default_offset_str()
  if not border_str.get():
    default_border_str()
  if not matrix_str.get():
    default_matrix_str()
  if not speed_str.get():
    default_speed_str()
  if not force_str.get():
    default_force_str()
  if not cut_mode_str.get():
    default_cut_mode_str()
    
  offset = floats(offset_str.get())
  border = floats(border_str.get())
  matrix = floats(matrix_str.get())
  speed = floats(speed_str.get())
  force = floats(force_str.get())
  cut_mode = int(cut_mode_str.get())

  #
  # main program
  #

  import graphtec
  import pic
  import optimize

  g = graphtec.graphtec()

  g.start()

  g.set(offset=(offset[0]+border[0]+0.5,offset[1]+border[1]+0.5), matrix=matrix)
  strokes = pic.read_pic(temp_pic)
  max_x,max_y = optimize.max_extent(strokes)

  tx,ty = 0.5,0.5

  border_path = [
    (-border[0], -border[1]),
    (max_x+border[0], -border[1]),
    (max_x+border[0], max_y+border[1]),
    (-border[0], max_y+border[1])
  ]

  if cut_mode==0:
    lines = optimize.optimize(strokes, border)
    for (s,f) in zip(speed,force):
      g.set(speed=s, force=f)
      for x in lines:
        g.line(*x)
      g.closed_path(border_path)
  else:
    for (s,f) in zip(speed,force):
      g.set(speed=s, force=f)
      for s in strokes:
        g.closed_path(s)
      g.closed_path(border_path)

  g.end()

  if Output_name.get():
    sys.stdout = original_stdout  # restore STDOUT back to its original value
    tkMessageBox.showinfo("G2G_GUI Message", "File '%s' created"  % (Output_name.get()) )
	def doProcessing(self, bxdFile, **kws):
		"""
		Executes the module's operation using the current settings
		Parameters:
				bxdFile		The name of the created .bxdfile
		Keywords:
				settings_only	If this parameter is set, then only the 
								settings will be written out and not the VTI 
								files.
				timepoints		The timepoints that should be processed
		"""
		if not self.module:
			Logging.error("No module set", "No module was set for the dataunit to do processing with")		
		callback = None
		
		settings_only = kws.get("settings_only", 0)
		callback = kws.get("callback", None)
		timepoints = kws.get("timepoints", range(self.getNumberOfTimepoints()))
		writebxd = kws.get("writebxd", True)
		# We create the vtidatasource with the name of the dataunit file
		# so it knows where to store the vtkImageData objects

		numberOfDatasets = self.module.getNumberOfOutputs()
		bxdWriters = []
		dataWriters = []

		if not settings_only:
			if numberOfDatasets > 1:
				for i in range(numberOfDatasets):
					channelBXDFile = bxdFile
					if bxdFile[-4:] == ".bxd":
						channelBXDFile = bxdFile[:-4] + "_" + self.sourceunits[i].getName() + ".bxd"
					bxdwriter = BXDDataWriter(channelBXDFile)
					bxdWriters.append(bxdwriter)
					bxcFile = bxdwriter.getBXCFileName(channelBXDFile)
					dataWriter = BXCDataWriter(bxcFile)
					dataWriters.append(dataWriter)
					bxdwriter.addChannelWriter(dataWriter)
			else:
				bxdwriter = BXDDataWriter(bxdFile)
				bxcFile = bxdwriter.getBXCFileName(bxdFile)
				bxdWriters.append(bxdwriter)
				dataWriter = BXCDataWriter(bxcFile)
				dataWriters.append(dataWriter)
				bxdwriter.addChannelWriter(dataWriter)

		else:
			bxcFile = bxdFile
			bxdwriter = None
			bxcFile = bxcFile[:-1] + "p"
			bxdWriters.append(bxdwriter)
			dataWriter = BXCDataWriter(bxcFile)
			dataWriters.append(dataWriter)
			
		self.outputDirectory = os.path.dirname(bxcFile)
		#self.dataWriter = BXCDataWriter(bxcFile)
		
		#if bxdwriter:
		#	bxdwriter.addChannelWriter(self.dataWriter)

		n = 1
		self.guicallback = callback
		self.module.setControlDataUnit(self)

		if not settings_only:
			for timePoint in timepoints:
				# First we reset the module, so that we can start the operation
				# from clean slate
				scripting.processingTimepoint = timePoint
				self.module.reset()
				# We get the processed timepoint from each of the source data 
				# units
				#self.module.setSettings(self.settings)
				self.module.setTimepoint(timePoint)
				
				for dataunit in self.sourceunits:
					image = dataunit.getTimepoint(timePoint)
					self.module.addInput(dataunit, image)
				
				# Get the vtkImageData containing the results of the operation 
				# for this time point
				imageDatas = self.module.doOperation()
				polydatas = self.module.getPolyDataOutput()
				# Convert output to tuples if aren't already
				if type(imageDatas) is not types.TupleType:
					imageDatas = (imageDatas,)
				if type(polydatas) is not types.TupleType and polydatas is not None:
					polydatas = (polydatas,)
				
				Logging.info("Executing with optimizations",kw="processing")
				for i, imageData in enumerate(imageDatas):
					imageData = optimize.optimize(image = imageData)
					Logging.info("Processing done",kw="processing")
					lib.messenger.send(None, "update_processing_progress", timePoint, n, len(timepoints) * len(imageDatas))
					n += 1
					# Write the image data to disk
					Logging.info("Writing timepoint %d"%timePoint,kw="processing")
					dataWriters[i].addImageData(imageData)
					if polydatas is not None and i < len(polydatas):
						dataWriters[i].addPolyData(polydatas[i])
					dataWriters[i].sync()
					dims = dataWriters[i].getOutputDimensions()
					self.settings.set("Dimensions", str(dims))
					
		scripting.processingTimepoint = -1
		if settings_only:
			self.settings.set("SettingsOnly", "True")
		else:
			self.settings.set("SettingsOnly", "False")

		# Check if we have multiple outputs
		# If we do, make sure ctf is correct
		updateCTF = 0
		origCTF = self.settings.get("ColorTransferFunction")
		if len(dataWriters) > 1:
			updateCTF = 1

		for i, dataWriter in enumerate(dataWriters):
			if updateCTF:
				self.settings.set("ColorTransferFunction", self.sourceunits[i].getColorTransferFunction())
			self.createDataUnitFile(dataWriter)
		
		self.settings.set("ColorTransferFunction", origCTF)

		if not settings_only:
			for bxdwriter in bxdWriters:
				bxdwriter.write()

			# Write references to channels in BXD file
			if writebxd:
				try:
					fp = open(bxdFile, "w")
					print "Writing output to",bxdFile
					for bxdwriter in bxdWriters:
						channelBXCFile = bxdwriter.getBXCFileName(bxdwriter.getFilename())
						pathParts = channelBXCFile.split(os.sep)
						channelBXCFile = pathParts[-2] + os.sep + pathParts[-1]
						fp.write("%s\n"%channelBXCFile)
				except IOError, ex:
					Logging.error("Failed to write settings", "CombinedDataUnit failed to open .bxd file %s for writing settings (%s)"%(bxdFile, str(ex)))
				fp.close()
				return bxdFile
			else:
				return bxdWriters[0].getFilename()