Ejemplo n.º 1
0
def install(cfg, ttyid):

	ssh = common.SSH(cfg, ttyid)
	maps = []
	for _ in cfg:
		maps.append(_.get('in_ipaddr', ''))
		maps.append(_.get('hostname', ''))
	pairs = ",".join(maps)
	ssh.upload(common.join(__file__, 'ipconfig-install.sh'), '/tmp/ipconfig-install.sh')
	ssh.cmd('chmod u+x /tmp/ipconfig-install.sh', True)
	for _ in cfg:
		hostname = _.get('hostname', '')
		ssh.cmd('/tmp/ipconfig-install.sh -h %s -s %s' % (hostname, pairs), True, False, hostname)

	jdk_local_path = common.join(__file__, "jdk-8u45-linux-x64.tar.gz")
	jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz"

	master = ssh.haskey('mesos_master', 'hostname')
	slave = ssh.haskey('mesos_slave', 'hostname')

	ssh.upload(jdk_local_path, jdk_tmp_path)
	ssh.cmd('tar zxvf %s -C /usr/local' % jdk_tmp_path, True)
	ssh.upload(common.join(__file__, 'mesos-install.sh'), '/tmp/mesos-install.sh')
	ssh.cmd('chmod u+x /tmp/mesos-install.sh', True)
	ssh.cmd('/tmp/mesos-install.sh -m %s -t 0' % master[0], True, False, *master)
	ssh.cmd('/tmp/mesos-install.sh -m %s -t 1' % master[0], True, False, *slave)

	ssh.close()
Ejemplo n.º 2
0
def install(cfg, ttyid):

	ssh = common.SSH(cfg, ttyid)

	maps = []
	for _ in cfg:
		maps.append(_.get('in_ipaddr', ''))
		maps.append(_.get('hostname', ''))
	pairs = ",".join(maps)
	ssh.upload(common.join(__file__, 'ipconfig-install.sh'), '/tmp/ipconfig-install.sh')
	ssh.cmd('chmod u+x /tmp/ipconfig-install.sh', True)
	for _ in cfg:
		hostname = _.get('hostname', '')
		ssh.cmd('/tmp/ipconfig-install.sh -h %s -s %s' % (hostname, pairs), True, False, hostname)
	
	jdk_local_path = common.join(__file__, "jdk-8u45-linux-x64.tar.gz")
	jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz"

	ssh.upload(jdk_local_path, jdk_tmp_path)
	ssh.cmd('tar zxvf %s -C /usr/local' % jdk_tmp_path, True)
	ssh.upload(common.join(__file__, 'zookeeper-install.sh'), '/tmp/zookeeper-install.sh')
	ssh.cmd('chmod u+x /tmp/zookeeper-install.sh', True)

	hostnames = ssh.array('hostname')
	s = ','.join(hostnames)

	for i, _ in enumerate(hostnames):
		ssh.cmd('/tmp/zookeeper-install.sh -s %s -i %d' % (s, i+1), True, False, _)

	ssh.close()
Ejemplo n.º 3
0
def bi_corr_clustering(a_list: Dict[int, Set[int]],
                       b_list: Dict[int, Set[int]]) -> None:
    """
    Two 'parallel' clustering.

    Parameters
    ----------
    a_list : the rows.
    b_list : the columns.

    Returns
    -------
    None, the result of the clustering is in variables ga and gb.
    """
    global ga
    global gb
    global fa
    global fb
    ga = {x: x for x in range(len(a_list))}
    gb = {x: x for x in range(len(b_list))}
    changeA, changeB = False, False
    while True:
        any_change = False
        fa = calc(a_list, gb)
        while True:
            changeA = False
            for i in ga:
                if move(i, ga, attract_rows):
                    changeA = True
                    any_change = True
            if join(ga, attract_rows):
                changeA = True
                any_change = True
            if not changeA:
                break
        #print("GA:",[list({i+1 for i in ga if ga[i]==v}) for v in set(ga.values())])
        fb = calc(b_list, ga)
        while True:
            changeB = False
            for i in gb:
                if move(i, gb, attract_columns):
                    changeB = True
                    any_change = True
            if join(gb, attract_columns):
                changeB = True
                any_change = True
            if not changeB:
                break
        #print("GB:",[list({i+1 for i in gb if gb[i]==v}) for v in set(gb.values())])
        if not any_change:
            break
Ejemplo n.º 4
0
def install(cfg, ttyid):

    ssh = common.SSH(cfg, ttyid)

    jdk_local_path = common.join(__file__, "jdk-8u45-linux-x64.tar.gz")
    jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz"

    ssh.upload(jdk_local_path, jdk_tmp_path)
    ssh.cmd("tar zxvf %s -C /usr/local" % jdk_tmp_path, True)
    ssh.upload(common.join(__file__, "tomcat-install.sh"), "/tmp/tomcat-install.sh")
    ssh.cmd("chmod u+x /tmp/tomcat-install.sh", True)
    ssh.cmd("/tmp/tomcat-install.sh", True)

    ssh.close()
Ejemplo n.º 5
0
def install(cfg, ttyid):

	ssh = common.SSH(cfg, ttyid)
	ssh.upload(common.join(__file__, 'mosquitto-install.sh'), '/tmp/mosquitto-install.sh')
	ssh.cmd('chmod u+x /tmp/mosquitto-install.sh', True)
	ssh.cmd('/tmp/mosquitto-install.sh', True)
	ssh.close()
Ejemplo n.º 6
0
def calc(matrix: List[List[int]]) -> List[List[int]]:
    """
    Biclustering.

    Parameters
    ----------
    m: homogeneous matrix of a relation

    Returns
    -------
    solution: "set of set of" objects, a clustering
    """
    global rows
    rows = make_rows(matrix)
    #transposed :List[List[int]] = list(map(list, zip(*matrix)))
    #trans: Dict[int,Set[int]] =alist(transposed)

    global g
    g = {x: x for x in range(len(rows))}
    change = False

    while True:
        change = False
        for i in g:
            if move(i, g, attract):
                change = True
        # print("after move:", [list({i+1 for i in g if g[i]==v}) for v in set(g.values())])
        if join(g, attract):
            change = True
            # print("after join:", [list({i+1 for i in g if g[i]==v}) for v in set(g.values())])
        if not change:
            break
    solution = [list({i + 1 for i in g if g[i] == v}) for v in set(g.values())]
    return solution
Ejemplo n.º 7
0
def simple_reduction(puzzle):
  """
  simple_reduction returns a solution to <puzzle>.

  It works by reducing the number of greenhouses one by one until it has the
  lowest cost and meets the max constraint.
  """
  max, field = puzzle

  # figure out the current number of greenhouses
  greenhouses = common.ids(field)

  # we need to keep a copy of the previous field and it's cost in order
  # to return it once we've realized we've done one reduction too many
  prev_field, prev_cost = None, sys.maxint
  if len(greenhouses) <= max:
    prev_field, prev_cost = copy.deepcopy(field), common.cost(field)

  # join greenhouses until when run out of them or until max constraint
  # is met *and* cost increases from one reduction to the next
  while len(greenhouses) > 1:
    j1, j2, js = 0, 0, sys.maxint
    # try each combination of greenhouses
    for g1, g2 in itertools.combinations(greenhouses, 2):
      # find outer bounds (left, right, top and bottom) for a greenhouse made
      # up of g1 and g2
      size3, p31, p32 = common.outer_bounds([g1, g2], field)

      if size3 is not None:
        size1, p11, p12 = common.outer_bounds(g1, field)
        size2, p21, p22 = common.outer_bounds(g2, field)

        diff = size3 - size2 - size1
        if diff < js:
          j1, j2, js = g1, g2, diff

    # if we run out of combinations to try
    # we must either surrender (return None)
    # or if len(greenhouses) <= max return
    # the best solution we have.
    if j1 == 0:
      if len(greenhouses) <= max:
        return max, prev_field
      else:
        return max, None

    # join j1 and j2, remove j2 from greenhouses
    field = common.join(j1, j2, field)
    greenhouses.remove(j2)

    # decide if we should exit this loop or keep on reducing
    curr_cost = common.cost(field)
    if len(greenhouses) < max:
      if prev_cost < curr_cost:
        return max, prev_field

    prev_field, prev_cost = copy.deepcopy(field), curr_cost

  # if we end up here, we've come down to 1 greenhouse
  return max, field
Ejemplo n.º 8
0
def install(cfg, ttyid):

	ssh = common.SSH(cfg, ttyid)
	ssh.cmd('apt-get update', True)
	ssh.upload(common.join(__file__, 'mongodb-install.sh'), '/tmp/mongodb-install.sh')
	ssh.cmd('chmod u+x /tmp/mongodb-install.sh', True)
	ssh.cmd('/tmp/mongodb-install.sh', True)
	ssh.close()
Ejemplo n.º 9
0
def clean(cfg):

	ssh = common.SSH(cfg)
	spark_client = ssh.haskey('spark_client', 'hostname')

	ssh.cmd('rm -rf /usr/local/jdk*', True)
	ssh.cmd('rm -rf /usr/local/hadoop*', True)
	ssh.cmd('rm -rf /usr/local/spark*', True, False, *spark_client)
	ssh.cmd('rm -rf /data', True)

	ssh.upload(common.join(__file__, 'hadoop-clean.sh'), '/tmp/hadoop-clean.sh')
	ssh.cmd('chmod u+x /tmp/hadoop-clean.sh', True)
	ssh.cmd('/tmp/hadoop-clean.sh', True)

	ssh.upload(common.join(__file__, 'ipconfig-clean.sh'), '/tmp/ipconfig-clean.sh')
	ssh.cmd('chmod u+x /tmp/ipconfig-clean.sh', True)
	ssh.cmd('/tmp/ipconfig-clean.sh', True)

	ssh.close()
Ejemplo n.º 10
0
def install(cfg, ttyid):

	if len(cfg) == 0:
		return
	cfg1 = cfg[0]
	in_ipaddr = cfg1.get('in_ipaddr', '')

	ssh = common.SSH(cfg, ttyid)
	ssh.cmd('apt-get update', True)
	ssh.upload(common.join(__file__, 'riak-install.sh'), '/tmp/riak-install.sh')
	ssh.cmd('chmod u+x /tmp/riak-install.sh', True)
	ssh.cmd('/tmp/riak-install.sh -i %s' % in_ipaddr, True)
	ssh.close()
Ejemplo n.º 11
0
def install(cfg, ttyid):

	if len(cfg) == 0:
		return
	cfg1 = cfg[0]

	mysqlpwd = cfg1.get('mysqlpwd', '')
	if mysqlpwd is None or len(mysqlpwd) == 0:
		mysqlpwd = '123456'	

	ssh = common.SSH(cfg, ttyid)
	ssh.upload(common.join(__file__, 'lamp-install.sh'), '/tmp/lamp-install.sh')
	ssh.cmd('chmod u+x /tmp/lamp-install.sh', True)
	ssh.cmd('/tmp/lamp-install.sh -p %s' % mysqlpwd, True)
	ssh.close()
Ejemplo n.º 12
0
def Worker(manifest):
	uname = 'ubuntu'
	passwd = '1qazxcvbNM'
	port = 22
	ret = conn.run_instances(
		image_id='trustysrvx64e',
		cpu=1,
		memory=1024,
		login_mode='passwd',
		login_passwd='1qazxcvbNM',
		vxnets=['vxnet-0']
	)
	instance = ret.get('instances')[0]
	time.sleep(10)
	ret = conn.allocate_eips(
		bandwidth=bandwidth_limit,
		billing_mode='bandwidth'
	)
	eip_id = ret.get('eips')[0]
	time.sleep(10)
	ret = conn.associate_eip(
		eip=eip_id,
		instance=instance,
	)
	time.sleep(10)
	ret = conn.describe_eips(
		eips = [eip_id]
	)
	eip = ret.get('eip_set')[0].get('eip_addr')
	time.sleep(10)
	d = {}
	d['hostname'] = instance
	d['in_ipaddr'] = eip
	d['ex_ipaddr'] = eip
	d['port'] = 22
	d['username'] = uname
	d['password'] = passwd
	ds = []
	ds.append(d)
	ssh = SSH(ds)
	ssh.upload(join(__file__, 'Spider.py'), '/tmp/Spider.py')
	ret = manifest.pop(1)
	for _ in ret:
		ssh.cmd('echo "%s" >> /tmp/manifest.txt' % _, True)
	if std:
		ssh.cmd('python /tmp/Spider.py -h %s -i %s -t %s -p %s -e %s -w %s' % (instance, eip_id, TOKEN, eip, me_ip, mwp), True)
	else:
		ssh.cmd('python /tmp/Spider.py -h %s -i %s -t %s -p %s -e %s -w %s' % (instance, eip_id, TOKEN, eip, me_ip, mwp), True)
Ejemplo n.º 13
0
def install(cfg, ttyid):

	ssh = common.SSH(cfg, ttyid)
	maps = []
	for _ in cfg:
		maps.append(_.get('in_ipaddr', ''))
		maps.append(_.get('hostname', ''))
	pairs = ",".join(maps)
	ssh.upload(common.join(__file__, 'ipconfig-install.sh'), '/tmp/ipconfig-install.sh')
	ssh.cmd('chmod u+x /tmp/ipconfig-install.sh', True)
	for _ in cfg:
		hostname = _.get('hostname', '')
		ssh.cmd('/tmp/ipconfig-install.sh -h %s -s %s' % (hostname, pairs), True, False, hostname)

	jdk_local_path = common.join(__file__, "jdk-8u45-linux-x64.tar.gz")
	jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz"
	hadoop_local_path = common.join(__file__, "hadoop-2.6.0.tar.gz")
	hadoop_tmp_path = "/tmp/hadoop-2.6.0.tar.gz"
	spark_local_path = common.join(__file__, "spark-1.3.0-bin-hadoop2.4.tgz")
	spark_tmp_path = "/tmp/spark-1.3.0-bin-hadoop2.4.tgz"

	hostnames = ssh.array('hostname')
	master = ssh.haskey('hadoop_master', 'hostname')
	slave = ssh.haskey('hadoop_slave', 'hostname')
	slave_ = ','.join(slave)

	ssh.upload(jdk_local_path, jdk_tmp_path)
	ssh.cmd('tar zxvf %s -C /usr/local' % jdk_tmp_path, True)
	ssh.upload(hadoop_local_path, hadoop_tmp_path)
	ssh.cmd('tar zxvf %s -C /usr/local' % hadoop_tmp_path, True)
	ssh.upload(common.join(__file__, 'hadoop-install.sh'), '/tmp/hadoop-install.sh')
	ssh.cmd('chmod u+x /tmp/hadoop-install.sh', True)
	ssh.cmd('/tmp/hadoop-install.sh -m %s -s %s -t 0' % (master[0], slave_), True, False, *master)
	ssh.cmd('/tmp/hadoop-install.sh -m %s -s %s -t 1' % (master[0], slave_), True, False, *slave)

	ssh.cmd('/usr/local/hadoop/bin/hdfs namenode -format hadoop', True, False, *master)
	ssh.cmd('/usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs start namenode', True, False, *master)
	ssh.cmd('/usr/local/hadoop/sbin/hadoop-daemon.sh --config /usr/local/hadoop/etc/hadoop --script hdfs start datanode', True, False, *slave)
	ssh.cmd('/usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop start resourcemanager', True, False, *master)
	ssh.cmd('/usr/local/hadoop/sbin/yarn-daemon.sh --config /usr/local/hadoop/etc/hadoop start nodemanager', True, False, *slave)

	spark_client = ssh.haskey('spark_client', 'hostname')

	ssh.upload(spark_local_path, spark_tmp_path, False, *spark_client)
	ssh.cmd('tar zxvf %s -C /usr/local' % spark_tmp_path, True, False, *spark_client)
	ssh.upload(common.join(__file__, 'spark-install.sh'), '/tmp/spark-install.sh', False, *spark_client)
	ssh.cmd('chmod u+x /tmp/spark-install.sh', True, False, *spark_client)
	ssh.cmd('/tmp/spark-install.sh', True, False, *spark_client)

	ssh.close()
Ejemplo n.º 14
0
def install(cfg, ttyid):

	zks = cfg.get('zks', '')
	hdfs = cfg.get('hdfs', '')

	cfg = cfg.get('hosts', None)
	ssh = common.SSH(cfg, ttyid)

	#maps = []
	#for _ in cfg:
	#	maps.append(_.get('in_ipaddr', ''))
	#	maps.append(_.get('hostname', ''))
	#pairs = ",".join(maps)
	#ssh.upload(common.join(__file__, 'ipconfig-install.sh'), '/tmp/ipconfig-install.sh')
	#ssh.cmd('chmod u+x /tmp/ipconfig-install.sh', True)
	#for _ in cfg:
	#	hostname = _.get('hostname', '')
	#	ssh.cmd('/tmp/ipconfig-install.sh -h %s -s %s' % (hostname, pairs), True, False, hostname)

	#jdk_local_path = common.join(__file__, "jdk-8u45-linux-x64.tar.gz")
	#jdk_tmp_path = "/tmp/jdk-8u45-linux-x64.tar.gz"

	master = ssh.haskey('hbase_master', 'hostname')
	backup = ssh.haskey('hbase_backup', 'hostname')
	if len(backup) == 1:
		backup_ = backup[0]
	else:
		backup_ = ''
	slaves = ssh.haskey('hbase_slave', 'hostname')
	slave_ = ','.join(slaves)

	#ssh.upload(jdk_local_path, jdk_tmp_path)
	#ssh.cmd('tar zxvf %s -C /usr/local' % jdk_tmp_path, True)
	ssh.upload(common.join(__file__, 'hbase-install.sh'), '/tmp/hbase-install.sh')
	ssh.cmd('chmod u+x /tmp/hbase-install.sh', True)
	ssh.cmd('/tmp/hbase-install.sh -b %s -s %s -z %s -h %s' % (backup_, slave_, zks, hdfs), True)

	ssh.cmd('/usr/local/hbase/bin/start-hbase.sh', True, False, *master)

	ssh.close()
Ejemplo n.º 15
0
def install(cfg, ttyid):

	if len(cfg) == 0:
		return
	cfg1 = cfg[0]

	domain = cfg1.get('domain', '')
	hostname = cfg1.get('hostname', '')
	dbname = cfg1.get('dbname', '')
	if dbname is None or len(dbname) == 0:
		dbname = 'mailserver'
	dbuser = cfg1.get('dbuser', '')
	if dbuser is None or len(dbuser) == 0:
		dbuser = '******'
	dbpwd = cfg1.get('dbpwd', '')
	if dbpwd is None or len(dbpwd) == 0:
		dbpwd = '123456'
	upwd = cfg1.get('upwd', '')
	if upwd is None or len(upwd) == 0:
		upwd = '123456'
	master = cfg1.get('master', '')
	if master is None or len(master) == 0:
		master = 'postmaster'
	maildir = cfg1.get('maildir', '')
	if maildir is None or len(maildir) == 0:
		maildir = '/var/mail/vhosts/'
	mysqlpwd = cfg1.get('mysqlpwd', '')
	if mysqlpwd is None or len(mysqlpwd) == 0:
		mysqlpwd = '123456'	

	ssh = common.SSH(cfg, ttyid)
	ssh.cmd('apt-get update', True)
	ssh.upload(common.join(__file__, 'mail-install.sh'), '/tmp/mail-install.sh')
	ssh.cmd('chmod u+x /tmp/mail-install.sh', True)
	ssh.cmd(('/tmp/mail-install.sh -d %s '
	'-h %s -l %s -m %s -n %s '
	'-p %s -a %s -y %s -i %s') % \
	(domain, hostname, dbname, dbuser, dbpwd, \
	upwd, master, mysqlpwd, maildir), True)
	ssh.close()
Ejemplo n.º 16
0
def compiler_invoke(arch, vstring, reloutdir):
    command = ("xcodebuild "
               "-project ../builds/macosx/signalizer.xcodeproj "
               "clean")

    if os.system(command) != 0:
        return -1

    command = (
        "xcodebuild "
        "-project ../builds/macosx/signalizer.xcodeproj "
        "-scheme Signalizer "
        "-configuration Release "
        "CONFIGURATION_BUILD_DIR=" + cm.join(os.getcwd(), reloutdir) + "/ "
        # Following optional line removes nearly all symbol info, so it creates smaller packages but not really that great for debugging.
        #"DEPLOYMENT_POSTPROCESSING=YES "
        "STRIP_INSTALLED_PRODUCT=YES "
        "SEPARATE_STRIP=YES "
        "COPY_PHASE_STRIP=YES "
        "ARCHS=" + arch + " "
        "ONLY_ACTIVE_ARCH=NO "
        "DYLIB_CURRENT_VERSION=" + vstring + " ")
    print("---------> Compiler invocation: \n" + command)
    return os.system(command)
Ejemplo n.º 17
0
def set_plist_option(rel_plist_path, command):
    full_path = cm.join(os.getcwd(), rel_plist_path)
    os.system('/usr/libexec/PlistBuddy -c "' + command + '" "' + full_path +
              '"')
Ejemplo n.º 18
0
#configurations
major = config.get("version", "major")
minor = config.get("version", "minor")
build = config.get("version", "build")
desc = config.get("info", "description")
name = config.get("info", "productname")

version_string = major + "." + minor + "." + build
vcxpath = "../builds/visualstudio2010"
zipoutput = "../Releases/Signalizer Windows VST " + version_string
#diagnostic
print("------> Building Signalizer v. " + version_string + " release targets")

#overwrite resource to embed version numbers
setup_resource(cm.join(vcxpath, "resources.rc"), major, minor, build, name,
               desc)

targets = [["x86", '"Release|win32"'], ["x64", '"Release|x64"']]

cm.rewrite_version_header("../Source/version.h", major, minor, build)

#run all targets

for option in targets:
    if compiler_invoke(option[0], option[1]) != 0:
        print("\n------> Error compiling for target " + option[0])
        exit(1)

# output dirs
release_dir = cm.join("Signalizer Windows", "Release " + version_string)
Ejemplo n.º 19
0
def search(puzzle, breakpoint = 2):
  """
  search produces a solution to <puzzle>.

  >>> solve("p3.text") # doctest: +ELLIPSIS
  71
  ...
  """
  max, field = puzzle

  solution = (common.cost(field), field)

  paths = [solution, ]
  while len(paths) > 0:
    curr_cost, field = paths.pop(0)

    # Figure out the current number of greenhouses
    greenhouses = common.ids(field)

    if len(greenhouses) > 1:
      diffs = {}
      # Try each combination of greenhouses
      for g1, g2 in itertools.combinations(greenhouses, 2):
        # Find outer bounds (left, right, top and bottom) for a greenhouse made
        # up of g1 and g2
        size3, p31, p32 = common.outer_bounds([g1, g2], field)

        if size3 is not None:
          size1, p11, p12 = common.outer_bounds(g1, field)
          size2, p21, p22 = common.outer_bounds(g2, field)

          diff = size3 - size2 - size1
          if diff not in diffs.keys():
            diffs[diff] = [(g1, g2),]
          else:
            diffs[diff].append((g1, g2))

      # Find the list of joins which has the lowest diff and select the joins
      # of the most frequent greenhouse.
      if len(diffs.keys()) > 0:
        freqs = {}
        for (g1, g2) in diffs[sorted(diffs.keys())[0]]:
          if g1 not in freqs.keys():
            freqs[g1] = [(g1, g2),]
          else:
            freqs[g1].append((g1, g2))

          if g2 not in freqs.keys():
            freqs[g2] = [(g1, g2),]
          else:
            freqs[g2].append((g1, g2))

        # Perform each join in a fresh copy of field and add it to paths if
        # cost is lower than current cost, otherwise compare cost to solution
        # and either discard this path or add it as best-so-far.
        joins = freqs[sorted(freqs.keys(), key = lambda k: len(freqs[k]), reverse = True)[0]]
        if len(joins) <= breakpoint:
          (g1, g2) = joins[0]
          _, _field = s1.simple_reduction((max, common.join(g1, g2, copy.deepcopy(field))))

          cf = common.cost(_field)
          if cf < solution[0] and \
             len(common.ids(_field)) <= max:

            solution = (cf, _field)

        else:
          for (g1, g2) in joins:
            _field = common.join(g1, g2, copy.deepcopy(field))
            cf = common.cost(_field)
            if cf < curr_cost:
              paths.append((cf, _field))

              if cf < solution[0] and \
                 len(common.ids(_field)) <= max:

                solution = (cf, _field)

  return max, solution[1]
Ejemplo n.º 20
0
print("------> Building Signalizer v. " + version_string + " release targets")

cm.rewrite_version_header("../Source/version.h", major, minor, build)

#run targets

compiler_invoke("--target=Release")

# output dirs
release_dir = "Signalizer Linux Release " + version_string

cm.create_build_file("Build.log", version_string)

# build skeleton
sh.copytree("Skeleton", release_dir)
sh.copyfile("Build.log", cm.join(release_dir, "Build.log"))

print("\n------> All builds finished, generating skeletons...")

# copy in builds
sh.copy("../Builds/CodeBlocks/bin/Release/libSignalizer.so",
        cm.join(release_dir, "Signalizer.so"))

print("------> Zipping output directories...")

zx = sh.make_archive(zipoutput, "zip", release_dir)

print("------> Builded Signalizer successfully into:")
print("------> " + zx)

# clean up dirs
Ejemplo n.º 21
0
desc = config.get("info", "description")
name = config.get("info", "productname")
company = config.get("info", "company")
author = config.get("info", "author")
manu4 = config.get("info", "manu4")
sub4 = config.get("info", "sub4")
version_string = major + "." + minor + "." + build
version_int = (int(major) << 48) | (int(minor) << 32) | int(build)
build_folder = "Signalizer_OSX"
zipoutput = "../Releases/Signalizer OS X " + version_string
#diagnostic
print("------> Building Signalizer v. " + version_string +
      " release targets (" + str(version_int))

# [0] = arg to clang, [1] = output folder
targets = [["i386", cm.join(build_folder, "x32")],
           ["x86_64", cm.join(build_folder, "x64")]]

# rewrite program internal version

cm.rewrite_version_header("../Source/version.h", major, minor, build)

# rewrite build plist
plist = cm.join("../Builds/MacOSX/Info.plist")
set_plist_option(plist, "Set :CFBundleIdentifier com." + company + "." + name)
set_plist_option(plist, "Set :CFBundleShortVersionString " + version_string)
set_plist_option(plist, "Set :CFBundleVersion " + version_string)
set_plist_option(
    plist, "Set :NSHumanReadableCopyright Copyright (c) " +
    str(date.today().year) + " " + author)