def add_service(sess, name): ''' reusable add service code for other tests ''' svc = sess.query(Service).filter_by(name=name).first() if not svc: svc = Service(name=name) create(sess, svc) return svc
def add_service(sess, name): """ reusable add service code for other tests """ svc = sess.query(Service).filter_by(name=name).first() if not svc: svc = Service(name=name) create(sess, svc) return svc
def test_upload_one_file(self): if not self.ok: return tmp = join(self.tempdir, 'b') create(tmp, 'youcoulele') remoteTmp = basename(mktemp()) self.ftp.upload(tmp, join(self.remoteDir, remoteTmp))
def test_create_iface(): machine = sess.query(Machine).first() iface = Interface(hardware_entity=machine, name='eth0', mac=random_mac(), bootable=True, interface_type='public') create(sess, iface) assert isinstance(iface, Interface), 'no iface created @ %s' % func_name()
def test_host_in_two_clusters(): """ create 2 new clusters and add a host to both. check Host.cluster. """ per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() for i in xrange(3): ec = EsxCluster(name='%s%s'% (CLUSTER_NAME, i), personality=per) add(sess, ec) commit(sess) c1 = sess.query(EsxCluster).filter_by(name='%s1' % (CLUSTER_NAME)).one() c2 = sess.query(EsxCluster).filter_by(name='%s2' % (CLUSTER_NAME)).one() assert c1 assert c2 print 'clusters in host in 2 cluster test are %s and %s'% (c1, c2) host = h_factory.next() sess.autoflush = False hcm1 = HostClusterMember(host=host, cluster=c1) create(sess, hcm1) assert host in c1.hosts print 'c1 hosts are %s'% (c1.hosts) c2.hosts.append(host) sess.autoflush = True commit(sess)
def test_host_in_two_clusters(): """ create 2 new clusters and add a host to both. check Host.cluster. """ per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() for i in xrange(3): ec = EsxCluster(name='%s%s' % (CLUSTER_NAME, i), personality=per) add(sess, ec) commit(sess) c1 = sess.query(EsxCluster).filter_by(name='%s1' % (CLUSTER_NAME)).one() c2 = sess.query(EsxCluster).filter_by(name='%s2' % (CLUSTER_NAME)).one() assert c1 assert c2 print 'clusters in host in 2 cluster test are %s and %s' % (c1, c2) host = h_factory.next() sess.autoflush = False hcm1 = HostClusterMember(host=host, cluster=c1) create(sess, hcm1) assert host in c1.hosts print 'c1 hosts are %s' % (c1.hosts) c2.hosts.append(host) sess.autoflush = True commit(sess)
def test_create_machines_for_test_interface(): np = Building.get_unique(sess, 'np') assert isinstance(np, Building), 'no building in %s' % func_name() hp = Vendor.get_unique(sess, 'hp') assert isinstance(hp, Vendor), 'no vendor in %s' % func_name() am = Model.get_unique(sess, name='bl45p', vendor=hp) assert isinstance(am, Model), 'no model in %s' % func_name() cpu = sess.query(Cpu).first() assert isinstance(cpu, Cpu), 'no cpu in %s' % func_name() for i in xrange(NUM_MACHINES): machine = Machine(label='%s%s' % (MACHINE_NAME_PREFIX, i), location=np, model=am, cpu=cpu, cpu_quantity=2, memory=32768) create(sess, machine) machines = sess.query(Machine).filter( Machine.label.like(MACHINE_NAME_PREFIX + '%')).all() eq_(len(machines), NUM_MACHINES)
def test_upload_one_file(self): if not self.ok: return tmp = join(self.tempdir, 'b') create(tmp, 'youcoulele.txt') remoteTmp = mktemp() self.ftp.upload(tmp, remoteTmp) self.assert_ (diff (tmp, remoteTmp))
def test_cluster_bound_svc(): """ test the creation of a cluster bound service """ si = add_service_instance(sess, SVC_NAME, INST_NAME) assert si, 'no service instance in %s' % func_name() ec = Cluster.get_unique(sess, CLUSTER_NAME) cs = ClusterServiceBinding(cluster=ec, service_instance=si) create(sess, cs) assert cs, 'no cluster bound service created by' % func_name() print cs
def test_cluster_bound_svc(): """ test the creation of a cluster bound service """ si = add_service_instance(sess, SVC_NAME, INST_NAME) assert si, "no service instance in %s" % func_name() ec = Cluster.get_unique(sess, CLUSTER_NAME) cs = ClusterServiceBinding(cluster=ec, service_instance=si) create(sess, cs) assert cs, "no cluster bound service created by" % func_name() print cs
def test_fail_upd_bootable_iface_to_null_mac(): machine = sess.query(Machine).first() iface = Interface(hardware_entity=machine, name='eth1', mac=random_mac(), bootable=True, interface_type='public') create(sess, iface) assert isinstance(iface, Interface), 'no iface created @ %s' % func_name() iface.mac = None commit(sess) assert iface.mac is not None, 'able to set a bootable interface to null'
def test_fail_upd_mgmt_iface_to_null_mac(): machine = sess.query(Machine).first() iface = Interface(hardware_entity=machine, name='ipmi', mac=random_mac(), bootable=True, interface_type='management') create(sess, iface) assert isinstance(iface, Interface), 'no iface created @ %s' % func_name() iface.mac = None commit(sess) assert iface.mac is not None, 'set a management iface to null mac_addr'
def test_add_aligned_service(): svc = sess.query(Service).filter_by(name=SVC_NAME).first() assert svc, "No cluster management service in %s" % func_name() cas = ClusterAlignedService(cluster_type="esx", service=svc) create(sess, cas) assert cas, "no cluster aligned service created by %s" % func_name() print cas ec = sess.query(EsxCluster).first() print "%s has required services %s" % (ec.name, ec.required_services) assert ec.required_services
def test_add_aligned_service(): svc = sess.query(Service).filter_by(name=SVC_NAME).first() assert svc, 'No cluster management service in %s' % func_name() cas = ClusterAlignedService(cluster_type='esx', service=svc) create(sess, cas) assert cas, 'no cluster aligned service created by %s' % func_name() print cas ec = sess.query(EsxCluster).first() print '%s has required services %s' % (ec.name, ec.required_services) assert ec.required_services
def add_service_instance(sess, service_name, name): si = sess.query(ServiceInstance).filter_by(name=name).first() if not si: print 'Creating %s instance %s ' % (service_name, name) svc = sess.query(Service).filter_by(name=service_name).one() assert svc, 'No %s service in %s' % (service_name, func_name()) si = ServiceInstance(name=name, service=svc) create(sess, si) assert si, 'no service instance created by %s' % func_name() return si
def add_service_instance(sess, service_name, name): si = sess.query(ServiceInstance).filter_by(name=name).first() if not si: print "Creating %s instance %s " % (service_name, name) svc = sess.query(Service).filter_by(name=service_name).one() assert svc, "No %s service in %s" % (service_name, func_name()) si = ServiceInstance(name=name, service=svc) create(sess, si) assert si, "no service instance created by %s" % func_name() return si
def add_arecord(ip): """ adding a valid ARecord """ (ms, intrnl) = get_reqs() a_rcrd = ARecord(name=AREC_PREFIX + str(unique_number.next()), dns_domain=ms, dns_environment=intrnl, ip=ip, comments='comment here', session=sess) create(sess, a_rcrd) assert a_rcrd, 'no a_record created by %s' % func_name() sess.refresh(a_rcrd) return a_rcrd
def test_primary_name(): mchn = add_machine(sess, MCHN_PREFIX + str(unique_number.next())) #log.debug('created machine %s' % str(mchn.__dict__)) (ms, intrnl) = get_reqs() a_rcrd = add_arecord(IP_ADDRS[0]) #log.debug(a_rcrd.__dict__) pna = PrimaryNameAssociation(a_record_id=a_rcrd.dns_record_id, hardware_entity=mchn) create(sess, pna) log.info(pna)
def test_add_meta_member(): """ Test adding a cluster to a metacluster and cluster.metacluster """ mc = MetaCluster.get_unique(sess, META_NAME) cl = cl_factory.next() mcm = MetaClusterMember(metacluster=mc, cluster=cl) create(sess, mcm) assert mcm assert len(mc.members) is 1 print 'metacluster members %s' % (mc.members) assert cl.metacluster is mc print cl.metacluster
def test_add_too_many_metacluster_members(): cl2 = cl_factory.next() cl3 = cl_factory.next() assert cl2 assert cl3 mc = MetaCluster.get_unique(sess, META_NAME) mcm2 = MetaClusterMember(metacluster=mc, cluster=cl2) create(sess, mcm2) assert mcm2 mcm3 = MetaClusterMember(metacluster=mc, cluster=cl3) create(sess, mcm3) assert mcm3
def test_cascaded_delete2(): """ deleting services deletes cluster aligned services """ svc = sess.query(Service).filter_by(name=SVC_2).first() assert svc, "No throw away service in %s" % func_name() # add cas, delete the service, make sure the CAS disappears cas = ClusterAlignedService(cluster_type="esx", service=svc) create(sess, cas) assert cas, "No cluster aligned service in %s" % func_name() # FIX ME: put in tear_down() sess.delete(svc) commit(sess) sess.refresh(cas) sess.refresh(svc)
def test_cascaded_delete2(): """ deleting services deletes cluster aligned services """ svc = sess.query(Service).filter_by(name=SVC_2).first() assert svc, "No throw away service in %s" % func_name() #add cas, delete the service, make sure the CAS disappears cas = ClusterAlignedService(cluster_type='esx', service=svc) create(sess, cas) assert cas, "No cluster aligned service in %s" % func_name() #FIX ME: put in tear_down() sess.delete(svc) commit(sess) sess.refresh(cas) sess.refresh(svc)
def test_create_machines_for_hosts(): np = Building.get_unique(sess, name='np', compel=True) am = Model.get_unique(sess, name='vm', compel=True) a_cpu = Cpu.get_unique(sess, name='aurora_cpu', compel=True) for i in xrange(NUM_HOSTS): machine = Machine(label='%s%s'% (MACHINE_NAME, i), location=np, model=am, cpu=a_cpu, cpu_quantity=8, memory=32768) create(sess, machine) machines = sess.query(Machine).filter( Machine.label.like(MACHINE_NAME+'%')).all() assert len(machines) is NUM_MACHINES print 'created %s esx machines' % len(machines)
def test_create_vm(): #vend = Vendor.get_unique(sess, 'virtual') mod = Model.get_unique(sess, name='vm', compel=True) proc = Cpu.get_unique(sess, name='virtual_cpu', speed=0, compel=True) np = Building.get_unique(sess, 'np', compel=True) for i in xrange(NUM_MACHINES): vm = Machine(label='%s%s'%(VM_NAME, i), location=np, model=mod, cpu=proc, cpu_quantity=1, memory=4196) create(sess, vm) machines = sess.query(Machine).filter(Machine.label.like(VM_NAME+'%')).all() assert len(machines) is NUM_MACHINES print 'created %s machines' % (len(machines))
def test_ns_record(): """ test creating a valid ns record """ tgt = ARecord.get_unique(sess, fqdn='%s.%s' % (AREC_NAME, DNS_DOMAIN_NAME), compel=True) dmn = DnsDomain.get_unique(sess, name=DNS_DOMAIN_NAME, compel=True) ns = NsRecord(a_record=tgt, dns_domain=dmn) create(sess, ns) assert ns, 'No NS Record created in test_ns_record' print 'created %s' % ns assert dmn.servers, 'No name server association proxy in test_ns_record'
def gen_displace(N): """Returns a function to calculate displacements using tensorflow expm Calling this function calculates the common variables and closes over them. Args: N (int): Dimension of Hilbert space Returns: Callable[[int], Tensor([num, N, N], c64)]: Displacement function for dim N """ a = utils.destroy(N) a_dag = utils.create(N) def displace(alphas): """Calculates D(alpha) for a batch of alphas Args: alphas (Tensor([num], c64)): A batch of num alphas Returns: Tensor([num, N, N], c64): A batch of D(alphas) """ # Reshape for broadcasting [num, 1, 1] alphas = tf.reshape(alphas, [alphas.shape[0], 1, 1]) return tf.linalg.expm(alphas * a_dag - tf.math.conj(alphas) * a) return displace
def test_create_esx_cluster(): """ tests the creation of an EsxCluster """ np = Building.get_unique(sess, name='np', compel=True) br = Branch.get_unique(sess, 'ny-prod', compel=True) per = sess.query(Personality).select_from( join(Archetype, Personality)).filter( and_(Archetype.name=='windows', Personality.name=='generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, down_hosts_threshold=2, branch=br) create(sess, ec) assert ec print ec assert ec.max_hosts is 8 print 'esx cluster max members = %s' % ec.max_hosts
def test_add_cluster_host(): """ test adding a host to the cluster """ vm_host = h_factory.next() ec = EsxCluster.get_unique(sess, CLUSTER_NAME) sess.autoflush=False hcm = HostClusterMember(host=vm_host, cluster=ec) sess.autoflush=True create(sess, hcm) assert hcm print hcm assert ec.hosts assert len(ec.hosts) is 1 print 'cluster members: %s'%(ec.hosts)
def test_add_cluster_host(): """ test adding a host to the cluster """ vm_host = h_factory.next() ec = EsxCluster.get_unique(sess, CLUSTER_NAME) sess.autoflush = False hcm = HostClusterMember(host=vm_host, cluster=ec) sess.autoflush = True create(sess, hcm) assert hcm print hcm assert ec.hosts assert len(ec.hosts) is 1 print 'cluster members: %s' % (ec.hosts)
def test_create_cluster(): # TODO: make this a reusable function in test_cluster and import np = sess.query(Building).filter_by(name="np").one() dmn = sess.query(Domain).first() assert dmn, "No domain found in %s" % func_name() per = ( sess.query(Personality) .select_from(join(Archetype, Personality)) .filter(and_(Archetype.name == "windows", Personality.name == "generic")) .one() ) ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, domain=dmn) create(sess, ec) assert ec, "No EsxCluster created by %s" % func_name() print ec
def test_create_machines_for_hosts(): np = Building.get_unique(sess, name='np', compel=True) am = Model.get_unique(sess, name='vm', compel=True) a_cpu = Cpu.get_unique(sess, name='aurora_cpu', compel=True) for i in xrange(NUM_HOSTS): machine = Machine(label='%s%s' % (MACHINE_NAME, i), location=np, model=am, cpu=a_cpu, cpu_quantity=8, memory=32768) create(sess, machine) machines = sess.query(Machine).filter( Machine.label.like(MACHINE_NAME + '%')).all() assert len(machines) is NUM_MACHINES print 'created %s esx machines' % len(machines)
def test_two_metaclusters(): """ Test unique constraint against cluster """ m2 = MetaCluster(name=M2) m3 = MetaCluster(name=M3) sess.add_all([m2, m3]) commit(sess) assert m2, 'metacluster %s not created ' % m2 assert m3, 'metacluster %s not created ' % m3 cl4 = cl_factory.next() assert cl4 mcm1 = MetaClusterMember(metacluster=m2, cluster=cl4) create(sess, mcm1) assert mcm1 mcm2 = MetaClusterMember(metacluster=m3, cluster=cl4) create(sess, mcm1) assert mcm2
def test_create_cluster(): #TODO: make this a reusable function in test_cluster and import np = sess.query(Building).filter_by(name='np').one() dmn = sess.query(Domain).first() assert dmn, 'No domain found in %s' % func_name() per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, domain=dmn) create(sess, ec) assert ec, "No EsxCluster created by %s" % func_name() print ec
def test_create_vm(): #vend = Vendor.get_unique(sess, 'virtual') mod = Model.get_unique(sess, name='vm', compel=True) proc = Cpu.get_unique(sess, name='virtual_cpu', speed=0, compel=True) np = Building.get_unique(sess, 'np', compel=True) for i in xrange(NUM_MACHINES): vm = Machine(label='%s%s' % (VM_NAME, i), location=np, model=mod, cpu=proc, cpu_quantity=1, memory=4196) create(sess, vm) machines = sess.query(Machine).filter(Machine.label.like(VM_NAME + '%')).all() assert len(machines) is NUM_MACHINES print 'created %s machines' % (len(machines))
def test_create_esx_cluster(): """ tests the creation of an EsxCluster """ np = Building.get_unique(sess, name='np', compel=True) br = Branch.get_unique(sess, 'ny-prod', compel=True) per = sess.query(Personality).select_from(join( Archetype, Personality)).filter( and_(Archetype.name == 'windows', Personality.name == 'generic')).one() ec = EsxCluster(name=CLUSTER_NAME, location_constraint=np, personality=per, down_hosts_threshold=2, branch=br) create(sess, ec) assert ec print ec assert ec.max_hosts is 8 print 'esx cluster max members = %s' % ec.max_hosts
def test_create_machines_for_test_host(): np = Building.get_unique(sess, 'np') assert isinstance(np,Building), 'no building in %s' % func_name() hp = Vendor.get_unique(sess, 'hp') assert isinstance(hp, Vendor), 'no vendor in %s' % func_name() am = Model.get_unique(sess, name='bl45p', vendor=hp) assert isinstance(am, Model), 'no model in %s' % func_name() cpu = sess.query(Cpu).first() assert isinstance(cpu, Cpu), 'no cpu in %s' % func_name() for i in xrange(NUM_MACHINES): machine = Machine(label='%s%s'% (MACHINE_NAME_PREFIX, i), location=np, model=am, cpu=cpu, cpu_quantity=2, memory=32768) create(sess, machine) machines = sess.query(Machine).filter( Machine.label.like(MACHINE_NAME_PREFIX+'%')).all() eq_(len(machines), NUM_MACHINES)
def test_cascaded_delete_1(): """ test that deleting service bindings don't delete services """ print 'Creating throw away service' svc = add_service(sess, SVC_2) assert svc, 'service not created by %s' % func_name() print 'added throw away service %s' % (svc) #make it a cluster aligned svc cas = ClusterAlignedService(cluster_type='esx', service=svc) create(sess, cas) assert cas, "No cluster aligned service in %s" % func_name() """ delete the cas, see if the service is still there. sess.refresh(obj) will throw 'InvalidRequestError: Instance xxx is not persistent within this Session' if it's been deleted """ sess.delete(cas) commit(sess) sess.refresh(svc) assert svc, "Service deleted when deleting the cluster aligned service" print "still have %s after deleting cluster aligned svc" % svc
def test_cascaded_delete_1(): """ test that deleting service bindings don't delete services """ print "Creating throw away service" svc = add_service(sess, SVC_2) assert svc, "service not created by %s" % func_name() print "added throw away service %s" % (svc) # make it a cluster aligned svc cas = ClusterAlignedService(cluster_type="esx", service=svc) create(sess, cas) assert cas, "No cluster aligned service in %s" % func_name() """ delete the cas, see if the service is still there. sess.refresh(obj) will throw 'InvalidRequestError: Instance xxx is not persistent within this Session' if it's been deleted """ sess.delete(cas) commit(sess) sess.refresh(svc) assert svc, "Service deleted when deleting the cluster aligned service" print "still have %s after deleting cluster aligned svc" % svc
def create_dummy_dir(self, newDir): if not self.ok: return topDir = join(self.tempdir, newDir) maybemakedirs(topDir) tmpftpfile = 'a_file' create(join(self.tempdir, tmpftpfile), 'youcoulele') tmpftpfile = 'another_file' create(join(self.tempdir, tmpftpfile), 'warzazat') nestedDir = join(topDir, 'a_dir') mkdir(nestedDir) tmpftpfile = 'a_file' create(join(nestedDir, tmpftpfile), 'youcoulele') tmpftpfile = 'another_file' create(join(nestedDir, tmpftpfile), 'warzazat') return topDir
def setup(): dmn = DnsDomain(name=DNS_DOMAIN_NAME) create(sess, dmn) assert dmn, 'no dns domain in %s' % func_name() pi = Building.get_unique(sess, name='pi', compel=True) n = IPv4Network(TEST_NET) net = Network(name=TEST_NET_NAME, network=n, location=pi) create(sess, net) assert net, 'no network created by %s' % func_name() ip = IPv4Address(TEST_IP) arec = ARecord(name=AREC_NAME, dns_domain=dmn, ip=ip, network=net) create(sess, arec) assert arec, 'no ARecord created by %s' % func_name()
def comm_err(test): """Calculate error in expectation value of [a, a_dag] Args: test (Tensor([num, N, N], c64)): Array of num D(alpha) operators Returns: (float, float): (mean, max) of % diff in commutator """ # Construct operators a = utils.destroy(test.shape[1], dtype=tf.complex128) a_dag = utils.create(test.shape[1], dtype=tf.complex128) comm = a @ a_dag - a_dag @ a # Prepare coherent state from our test coh = coherent(test) # <alpha|[a, a_dag]|alpha> should be 1 expected = tf.constant(1, dtype=tf.complex128) evs = tf.reduce_sum(tf.math.conj(coh) * tf.linalg.matvec(comm, coh), 1) diffs = tf.math.abs(evs - expected) return tf.math.reduce_mean(diffs), tf.math.reduce_max(diffs)
def handler(event, context): print("Received api request: " + json.dumps(event, indent=2)) if event['body']: payload = json.loads(event['body']) if type(payload) is list: shifts = [] for shift in payload: shift['id'] = str(uuid4()) shift['tradeable'] = False shifts.append(shift) response = create_batch(shifts, 'Shifts') else: payload['id'] = str(uuid4()) payload['tradeable'] = False response = create(payload) if 'error' in response: return respond(response) else: return respond(None, {'data': payload}) else: return respond({'error': 'No request body'})
def create_dummy_dir(self, newDir): ''' This one is duplicated in utils_test: We should create a class that inherit unittest.TestCase, with this method in it in test.py. We should take care of the self.tmpdir also. Here is the dummy tree structure, from topDir ./a_file ./another_file ./a_dir ./a_dir/a_file ./a_dir/another_file ''' if not self.ok: return topDir = join(self.tempdir, newDir) maybemakedirs(topDir) tmpftpfile = 'a_file' create(join(topDir, tmpftpfile), 'youcoulele') tmpftpfile = 'another_file' create(join(topDir, tmpftpfile), 'warzazat') nestedDir = join(topDir, 'a_dir') mkdir(nestedDir) tmpftpfile = 'a_file' create(join(nestedDir, tmpftpfile), 'gouzigouzi') tmpftpfile = 'another_file' create(join(nestedDir, tmpftpfile), 'guacamol') #import commands #outtext = commands.getoutput('(cd %s ; find .)' % topDir) #print outtext return topDir
from Models.hmm import HMM import utils import os import metric from Models.crf import CRFModel from Models.bilstm import BiLSTM from torch.optim import Adamax import torch import torch.nn.functional as F trainWordLists,trainTagLists,word2id,tag2id=utils.create('train.txt',make_vocab=True) devWordLists,devTagList=utils.create('dev.txt',make_vocab=False) #隐马尔科夫模型训练 print('HMM************************') if os.path.exists('ckpts/hmm.pkl'): hmm=utils.loadModel('ckpts/hmm.pkl') predictTags = hmm.test(devWordLists, word2id, tag2id) else: hmm=HMM(len(tag2id),len(word2id)) hmm.train(trainWordLists,trainTagLists,tag2id,word2id) utils.saveModel('ckpts/hmm.pkl',hmm) predictTags=hmm.test(devWordLists,word2id,tag2id) accuracy=metric.accuracy(predictTags,devTagList) print('accuracy: ',accuracy) print('CRF****************************') # #条件随机序列场模型训练 if os.path.exists('ckpts/crf.pkl'): crf=utils.loadModel('ckpts/crf.pkl') print(crf) predictTags=crf.test(devWordLists) else:
def create_dummy_dir(self, newDir): ''' This one is duplicated from ftp_utils: We should create a class that inherit unittest.TestCase, with this method in it in test.py. We should take care of the self.tmpdir also. ''' topDir = join(self.tempdir, newDir) maybemakedirs(topDir) # first level tmpftpfile = 'level_1_fileA' create(join(self.tempdir, tmpftpfile), 'youcoulele') tmpftpfile = 'level_1_fileB' create(join(self.tempdir, tmpftpfile), 'warzazat') # second level tmpftpfile = 'level_2_fileA' create(join(topDir, tmpftpfile), 'youcoulele') # third level A nestedDir = join(topDir, 'a_dir') mkdir(nestedDir) tmpftpfile = 'level_3A_fileA' create(join(nestedDir, tmpftpfile), 'youcoulele') tmpftpfile = 'level_3A_fileB' create(join(nestedDir, tmpftpfile), 'warzazat') # third level B nestedDir = join(topDir, 'b_dir') mkdir(nestedDir) tmpftpfile = 'level_3A_fileA' create(join(nestedDir, tmpftpfile), 'makelele') tmpftpfile = 'level_3B_fileB' create(join(nestedDir, tmpftpfile), 'bogoss') return topDir
def gen_displace_BCH(N): """Returns a function to calculate displacements using tensorflow with the Baker-Campbell-Hausdorff formula (BCH). Using the BCH formula allows us to perform matrix exponentiation *only* on diagonal matrices, which removes the complicated `expm` implementation (using the scaling/squaring method and Pade approximation) and changes it to an exponential of scalar values along the diagonal. Although both should, in theory, still have an O(N^3) complexity, the constant multiplier is very different, and there may be additional speedups that can be applied to the O(N^3) matrix multiplication steps in BCH. Calling this function calculates the common variables and closes over them. Since we need to diagonalize q and p in this step, this may take significant time. Of course, calling the returned displace(alphas) will be much faster in comparison. Args: N (int): Dimension of Hilbert space Returns: Callable[[int], Tensor([num, N, N], c64)]: Displacement function for dim N """ a = utils.destroy(N, dtype=tf.complex128) a_dag = utils.create(N, dtype=tf.complex128) # Convert raising/lowering to position/momentum sqrt2 = tf.math.sqrt(tf.constant(2, dtype=tf.complex128)) q = (a_dag + a) / sqrt2 p = (a_dag - a) * 1j / sqrt2 # Diagonalize q and p eig_q, U_q = tf.linalg.eigh(q) eig_p, U_p = tf.linalg.eigh(p) U_q_dag = tf.linalg.adjoint(U_q) U_p_dag = tf.linalg.adjoint(U_p) # Calculate the commutator numerically. I'm not sure if this is a little strange in # theory, but assuming that [q, p] = j causes significant errors... comm = tf.linalg.diag_part(q @ p - p @ q) def displace(alphas): """Calculates D(alpha) for a batch of alphas Args: alphas (Tensor([num], c64)): A batch of num alphas Returns: Tensor([num, N, N], c64): A batch of D(alphas) """ # Scale alpha and reshape from broadcast against the diagonals alphas = sqrt2 * tf.cast( tf.reshape(alphas, [alphas.shape[0], 1]), dtype=tf.complex128 ) # Take real/imag of alphas for the commutator part of the expansion re_a = tf.cast(tf.math.real(alphas), dtype=tf.complex128) im_a = tf.cast(tf.math.imag(alphas), dtype=tf.complex128) # Exponentiate diagonal matrices expm_q = tf.linalg.diag(tf.math.exp(1j * im_a * eig_q)) expm_p = tf.linalg.diag(tf.math.exp(-1j * re_a * eig_p)) expm_c = tf.linalg.diag(tf.math.exp(-0.5 * re_a * im_a * comm)) # Apply Baker-Campbell-Hausdorff return tf.cast( U_q @ expm_q @ U_q_dag @ U_p @ expm_p @ U_p_dag @ expm_c, dtype=tf.complex64, ) return displace
""" import sys import time import warnings if not sys.warnoptions: warnings.simplefilter("ignore") from config import text_embedding_module, model_dir from utils import reduce_logging_output, create, print_analysis start_time = time.time() reduce_logging_output() estimator = create(text_embedding_module, model_dir) mt_positive = "Guten Tag, Nachdem in diesem, sowie in anderen Foren sehr häufig nachgefragt wird, ob ein " \ "Angebot auf einer der bekannten Automobilhandelsplattformen der Realität entspricht und leider " \ "auch mittlerweile Betrugsopfer bei MT vorhanden sind, möchte Ich diese Problematik hier " \ "einmal zusammenfassen. Vermutlich werde ich nicht in der Lage sein diesen Beitrag erfüllend " \ "zu schreiben und werde sicherlich die ein oder andere Masche nicht aufzählen, die da noch " \ "existieren mag. Doch fangen wir mal an. In der Regel findet man solche Angebote auf den " \ "am stärksten frequentierten Automobil-Handelsseiten wie z.B. Autoscout24.de oder " \ "mobile.de, doch auch auf Ebay.de wurden schon entsprechende Angebote gesehen. Ein " \ "solches Angebot sieht meist wie folgt aus. Ein allgemein auf dem deutschen bzw. europäischen " \ "Markt stärker nachgefragtes Fahrzeug wird deutlich unterhalb seines üblichen Marktpreises " \ "zum Verkauf ausgeschrieben. Hierbei fallen mir folgende Fahrzeuge ein, bei welchen Ich " \ "bereits solche Fakeangebote gesehen habe, zum einen handelt es sich um die volle Bandbreite " \ "an Sportwagen vom Alfa Romeo Spider bis hin zum japanischen Tuningkultobjekt Toyota " \ "Supra. Aber auch solide Fahrzeuge der gehobenen Mittelklasse, wie aktuelle E-Klassen oder " \
def test_create_metacluster(): mc = MetaCluster(name=META_NAME) create(sess, mc) assert mc print mc