Esempio n. 1
0
def compress(fn):
	u"压缩合并后的文件,需要网络支持"

	print "compressing..."
	path, filename = os.path.split(fn)
	compressed_fn = os.path.join(path, filename.replace("-pkg.js", "-pkg-min.js"))
	compressor(compressed_fn, fn)
	print "compressed!"
Esempio n. 2
0
 def test_set_suffix2(self):
     ''' Test case than time suffix admin dont want to compressed file
     '''
     now = datetime.strptime("2019-01-01", "%Y-%m-%d")
     compressor1 = compressor.compressor("job1.conf", now)
     self.clear_environment(compressor1.conf)
     compressor1.conf["add_date_suffix"] = False
     compressor1.set_suffix()
     self.assertEqual(compressor1.suffix, '', 'bad suffix settings')
Esempio n. 3
0
 def test_set_suffix1(self):
     ''' Tests that if configured by admin that suffix will be append to compressed file
     '''
     now = datetime.strptime("2019-01-01", "%Y-%m-%d")
     compressor1 = compressor.compressor("job1.conf", now)
     self.clear_environment(compressor1.conf)
     compressor1.conf["add_date_suffix"] = True
     compressor1.set_suffix()
     self.assertEqual(compressor1.suffix, '2019-01-01',
                      'bad suffix settings')
Esempio n. 4
0
 def test_not_recursive_searching(self):
     ''' Test case that recusive searching in dir tree is not allowed. 
     '''
     now = datetime.strptime("2019-12-01", "%Y-%m-%d")
     compressor1 = compressor.compressor("job1.conf", now)
     self.clear_environment(compressor1.conf)
     compressor1.conf["recursive"] = False
     self.create_subdir_tree(compressor1.conf["direcrory"])
     compressor1.find_files(compressor1.conf["direcrory"])
     #print(self.subdirs)
     for subd in self.subdirs:
         self.assertNotIn(subd, compressor1.searched_directories,
                          'subdir wasnt searched')
Esempio n. 5
0
 def test_maturation_is_lower(self):
     ''' In days before plan compression must not be done. 
         Tests 1 day before plan.
     '''
     tested_data = ("2019-01-01", "2019-01-02", "2019-10-25", "2019-11-30",
                    "2019-12-01", "2019-12-31")
     for x in tested_data:
         now = datetime.strptime(x, "%Y-%m-%d")
         compressor1 = compressor.compressor("job1.conf", now)
         self.clear_environment(compressor1.conf)
         diff = timedelta(days=compressor1.conf["job_days_step"] - 1)
         last_date = now - diff
         compressor1.set_timestamp(last_date)
         result = compressor1.is_job_maturate()
         self.assertEqual(result, False, 'it run a job before maturation')
Esempio n. 6
0
 def test_maturation_is_higher(self):
     ''' Tests situation that server was shutdown in time when job was planed. May be plan was 30th day.
         After repearing sever cron will start program. It is higher than 30th, but compression should be done
     '''
     tested_data = ("2019-01-01", "2019-01-02", "2019-10-25", "2019-11-30",
                    "2019-12-01", "2019-12-31")
     for x in tested_data:
         now = datetime.strptime(x, "%Y-%m-%d")
         compressor1 = compressor.compressor("job1.conf", now)
         self.clear_environment(compressor1.conf)
         diff = timedelta(days=compressor1.conf["job_days_step"] + 1)
         last_date = now - diff
         compressor1.set_timestamp(last_date)
         result = compressor1.is_job_maturate()
         self.assertEqual(result, True,
                          'it didnt run a job after enough days')
Esempio n. 7
0
 def test_maturation_is_precisely(self):
     ''' Tests if compression task will start precisely N-th day after last job.  
         Default in conf file is 30
     '''
     tested_data = ("2019-01-01", "2019-01-02", "2019-10-25", "2019-11-30",
                    "2019-12-01", "2019-12-31")
     for x in tested_data:
         now = datetime.strptime(x, "%Y-%m-%d")
         compressor1 = compressor.compressor("job1.conf", now)
         self.clear_environment(compressor1.conf)
         diff = timedelta(days=compressor1.conf["job_days_step"])
         last_date = now - diff
         compressor1.set_timestamp(last_date)
         result = compressor1.is_job_maturate()
         self.assertEqual(result, True,
                          'it didnt run a job after enough days')
Esempio n. 8
0
 def test_first_time_run_job(self):
     ''' In first time job will start file with timestamp dosnt exist. In such case compression must start.
     '''
     tested_data = ("2019-01-01", "2019-01-02", "2019-10-25", "2019-11-30",
                    "2019-12-01", "2019-12-31")
     for x in tested_data:
         now = datetime.strptime(x, "%Y-%m-%d")
         compressor1 = compressor.compressor("job1.conf", now)
         #before first run dosnt exist file with timestamp, it is prepared in clear_environment too
         self.clear_environment(compressor1.conf)
         #but for case that clear_environment will be modified I explicitly check it
         timestamp_file_name = compressor1.conf[
             "job_name"] + "_timestamp.log"
         if (os.path.exists(timestamp_file_name)):
             os.remove(timestamp_file_name)
         result = compressor1.is_job_maturate()
         self.assertEqual(result, True, 'first time run wasnt started')
Esempio n. 9
0
 def test_searching_files(self):
     ''' test if all files for compression will be find
     '''
     now = datetime.strptime("2019-12-01", "%Y-%m-%d")
     compressor1 = compressor.compressor("job1.conf", now)
     self.clear_environment(compressor1.conf)
     compressor1.conf["recursive"] = True
     self.create_subdir_tree(compressor1.conf["direcrory"])
     self.create_all_random_files(compressor1.conf["exclude_ext"],
                                  compressor1.conf["direcrory"])
     compressor1.find_files(compressor1.conf["direcrory"])
     #test that files with extension for exclusion are really excluded
     for afile in self.files_exclude:
         self.assertNotIn(afile, compressor1.files_to_compress,
                          'file should be excluded and wasnt')
     #test that all files for compression are really find
     #print(compressor1.files_to_compress)
     for afile in self.files_compress:
         self.assertIn(afile, compressor1.files_to_compress,
                       'file should be compress and isnt')
Esempio n. 10
0
 def test_compression(self):
     ''' Tests if files which are specified for compression are really compressed and 
         after that are original files deleted
     '''
     now = datetime.strptime("2019-12-01", "%Y-%m-%d")
     compressor1 = compressor.compressor("job1.conf", now)
     self.clear_environment(compressor1.conf)
     compressor1.conf["recursive"] = True
     self.create_subdir_tree(compressor1.conf["direcrory"])
     self.create_all_random_files(compressor1.conf["exclude_ext"],
                                  compressor1.conf["direcrory"])
     compressor1.find_files(compressor1.conf["direcrory"])
     compressor1.set_suffix()
     for file_to_compress in compressor1.files_to_compress:
         new_compressed_file = compressor1.get_compressed_file_name(
             file_to_compress)
         compressor1.compress(file_to_compress)
         self.assertTrue(os.path.exists(new_compressed_file),
                         "new compressed file wasnt created")
         self.assertFalse(os.path.exists(file_to_compress),
                          "old file after compression wasnt deleted")
Esempio n. 11
0
    def __init__(self,
                 params,
                 lr=0.01,
                 momentum=0.9,
                 weight_decay=0,
                 compression_buffer=False,
                 all_reduce=False,
                 local_rank=0,
                 gpus_per_machine=1,
                 single_worker=False,
                 **kwargs):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= momentum:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if not 0.0 <= weight_decay:
            raise ValueError(
                "Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)

        super(SGD_distribute, self).__init__(params, defaults)

        #custom code
        self.compression_buffer = compression_buffer
        self.all_reduce = all_reduce
        self.single_worker = single_worker
        self.gpus_per_machine = gpus_per_machine
        #gpus_per_machine = torch.cuda.device_count()
        print('The number of GPUs is', gpus_per_machine)
        print('Single worker', single_worker)

        self.MB = 1024 * 1024
        self.bucket_size = 200 * self.MB

        if self.compression_buffer and not self.single_worker:

            self.compressor = compressor.compressor(using_cuda=True,
                                                    local_rank=1)
            self.local_rank = local_rank
            self.global_rank = dist.get_rank()
            self.local_dst_in_global = self.global_rank - self.local_rank

            self.inter_node_group = []
            self.nodes = dist.get_world_size() // gpus_per_machine

            self.intra_node_group_list = []
            for index in range(self.nodes):
                # set inter_node_group
                self.inter_node_group.append(0 + index * gpus_per_machine)
                # set all intra_node_group
                intra_node_group_temp = []
                for intra_index in range(gpus_per_machine):
                    intra_node_group_temp.append(intra_index +
                                                 index * gpus_per_machine)
                intra_node_group_temp = dist.new_group(intra_node_group_temp)
                self.intra_node_group_list.append(intra_node_group_temp)

                if self.local_dst_in_global == 0 + index * gpus_per_machine:
                    self.nodes_rank = index

            #self.intra_node_list = self.intra_node_group
            self.inter_node_list = self.inter_node_group
            self.inter_node_group_list = []
            for index in range(len(self.inter_node_list)):
                if index is not 0:
                    temp = dist.new_group(
                        [self.inter_node_list[0], self.inter_node_list[index]])
                    self.inter_node_group_list.append(temp)
            self.all_gpu = dist.new_group()

            self.all_inter_node_group = dist.new_group(self.inter_node_list)

            if dist.get_rank() == 0 or dist.get_rank() == 8:
                print('nodes', self.nodes)
                print('intra_node_group_list', self.intra_node_group_list)
                print('inter_node_group', self.inter_node_group_list)
                print('all_inter_node_group', self.inter_node_list)

        #add time record
        self.all_time = Time_recorder()
        self.all_reduce_time = Time_recorder()
        self.compression_time = Time_recorder()
        self.uncompression_time = Time_recorder()
        self.broadcast_time = Time_recorder()
        self.majority_vote_time = Time_recorder()
        self.compress_all_time = Time_recorder()
        self.gather_all_time = Time_recorder()
        self.calculate_all_time = Time_recorder()
        self.broadcast_all_time = Time_recorder()
        self.update_para_time = Time_recorder()
        self.bucketing_time = Time_recorder()
        self.debucketing_time = Time_recorder()
Esempio n. 12
0
    x_test = x_test.astype("float32") / 255
    x_test = np.expand_dims(x_test, -1)

    num_classes = 10
    y_test = keras.utils.to_categorical(y_test, num_classes)

    print("Original Model:")
    model.evaluate(x_test, y_test)

    k = 16
    bits = 2
    uniform = False  #uniform or non uniform quantization of model weights
    block_size = 0

    c = compressor(model, k, bits, uniform, block_size)
    compressedModel = c.compressedModel
    parameters = c.parameters

    print("Compressed:")
    #h = huffmanCompressor(compressedModel, bits, k)
    #print("huffman Compression Ratio:", h.compressionRatio)

    d = decompressor(compressedModel, parameters, block_size)
    decompressedModel = d.decompressedModel

    decompressedModel.evaluate(x_test, y_test)

    model = keras.models.load_model(modelPath)
    q = quantizer(model, bits, uniform)
    quantizedModel = q.quantizedModel
Esempio n. 13
0
            daughterSigma=float(args.clustered[1]))
    else:
        #data['blockage'], data['gamma'] = blockage(density, data['scenario'], data['baseStation'], data['userEquipment'])
        data['blockers'], data['blockage'] = blockage(density,
                                                      data['scenario'],
                                                      data['baseStation'],
                                                      data['userEquipment'],
                                                      tolerance=2 * bs_radius)

else:
    data['blockage'] = []
    for j in range(n_BS):
        data['blockage'].append([])
        data['blockage'][j].append([])
        for i in range(data['scenario']['simTime']):
            data['blockage'][j][0].append(1)
        #if ((i < data['scenario']['simTime']/4)
        #        or (i > data['scenario']['simTime']/2 and i <=  3*data['scenario']['simTime']/4)):
        #else:
        #    data['blockage'][j][0][i] = 1

#f.close()

compressor(data)

#outname = str(n_BS)+'-'+str(n_UE)+'-'+str(density)+'-'+str(args.vx)+'-'+str(args.vy)+'-'+str(args.seed) #'mobility.json'
#with open(outname, 'w') as outfile:
#    json.dump(data, outfile, indent=4)
y = json.dumps(data, indent=4)
print(y)
Esempio n. 14
0
from intro import intro
from input_parser import input_parser
from compressor import compressor
from printer import printer
from robo_browser import robo_browser
from misc_data import ListAlbum

# This is what runs the program. It calls multiple methods from different files to do so.
user_object = intro()
album_list = ListAlbum()
input_parser(user_object, album_list)
album_object = compressor(album_list)
printer(album_object)
robo_browser(album_object, user_object)
import compressor
import torch
import sys
import time

torch.cuda.set_device(0)

a = torch.randn(22, 100, 224, 224)
a = a.cuda()

#print('before sending')
#print(a)

compressor = compressor.compressor(using_cuda=True)

# warmups to amortize allocation costs
c, size = compressor.compress(a)
d = compressor.uncompress(c, size)
del c, size, d
c, size = compressor.compress(a)
d = compressor.uncompress(c, size)
del c, size, d

# benchmark
torch.cuda.synchronize()
start = time.time()
c, size = compressor.compress(a)
torch.cuda.synchronize()
end = time.time()
print('Compression time cost')
print(str(end - start))
Esempio n. 16
0
    def __init__(self,
                 params,
                 lr=0.01,
                 momentum=0.9,
                 weight_decay=0,
                 compression_buffer=False,
                 all_reduce=False,
                 local_rank=0,
                 gpus_per_machine=1,
                 args=None,
                 **kwargs):
        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= momentum:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if not 0.0 <= weight_decay:
            raise ValueError(
                "Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)

        super(SGD_distribute, self).__init__(params, defaults)

        #custom code
        self.compression_buffer = compression_buffer
        self.all_reduce = all_reduce
        self.use_majority_vote = not args.disable_majority_vote
        self.enable_krum = args.enable_krum
        self.krum_f = args.krum_f
        self.enable_adversary = args.enable_adversary
        self.adversary_num = args.adversary_num
        self.enable_minus_adversary = args.enable_minus_adversary

        if self.enable_krum:
            self.compression_buffer = True
            self.all_reduce = False
            self.use_majority_vote = True
            self.enable_minus_adversary = False

        self.MB = 1024 * 1024
        self.bucket_size = 100 * self.MB

        if self.compression_buffer:

            self.compressor = compressor.compressor(using_cuda=True,
                                                    local_rank=local_rank)
            self.local_rank = local_rank
            self.global_rank = dist.get_rank()
            self.local_dst_in_global = self.global_rank - self.local_rank

            self.inter_node_group = []
            self.nodes = dist.get_world_size() // gpus_per_machine

            self.intra_node_group_list = []
            for index in range(self.nodes):
                # set inter_node_group
                self.inter_node_group.append(0 + index * gpus_per_machine)
                # set all intra_node_group
                intra_node_group_temp = []
                for intra_index in range(gpus_per_machine):
                    intra_node_group_temp.append(intra_index +
                                                 index * gpus_per_machine)
                intra_node_group_temp = dist.new_group(intra_node_group_temp)
                self.intra_node_group_list.append(intra_node_group_temp)

                if self.local_dst_in_global == 0 + index * gpus_per_machine:
                    self.nodes_rank = index

            #self.intra_node_list = self.intra_node_group
            self.inter_node_list = self.inter_node_group
            self.inter_node_group_list = []
            for index in range(len(self.inter_node_list)):
                if index is not 0:
                    temp = dist.new_group(
                        [self.inter_node_list[0], self.inter_node_list[index]])
                    self.inter_node_group_list.append(temp)
            self.all_gpu = dist.new_group()

            self.all_inter_node_group = dist.new_group(self.inter_node_list)

            if dist.get_rank() == 0 or dist.get_rank() == 8:
                print('nodes', self.nodes)
                print('intra_node_group_list', self.intra_node_group_list)
                print('inter_node_group', self.inter_node_group_list)
                print('all_inter_node_group', self.inter_node_list)
Esempio n. 17
0
import distortion
import compressor

overdrive = distortion.overdrive_sigmoid(2)
distor1 = distortion.distortion_threshold(2)
distor2 = distortion.distortion_amplification(2)
compressor1 = compressor.compressor()
compressor2 = compressor.compressor_attack()

# overdrive .simulate()
# distor1.simulate()
# distor2.simulate()
compressor1.simulate()
compressor2.simulate()

try:
    input("Press Enter to exit...")
except:
    pass
Esempio n. 18
0
    def __init__(self, params, args, log_writer, **kwargs):

        lr = args.lr
        momentum = args.momentum
        weight_decay = args.weight_decay
        compression_buffer = args.compress
        all_reduce = args.all_reduce
        local_rank = args.local_rank
        gpus_per_machine = args.gpus_per_machine

        self.compression_buffer = compression_buffer
        self.all_reduce = all_reduce
        self.signum = args.signum
        self.log_writer = log_writer

        self.args = args

        if not 0.0 <= lr:
            raise ValueError("Invalid learning rate: {}".format(lr))
        if not 0.0 <= momentum:
            raise ValueError("Invalid momentum value: {}".format(momentum))
        if not 0.0 <= weight_decay:
            raise ValueError(
                "Invalid weight_decay value: {}".format(weight_decay))

        defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)

        super(SGD_distribute, self).__init__(params, defaults)

        self.MB = 1024 * 1024
        self.bucket_size = 100 * self.MB

        if self.compression_buffer:
            import compressor

            self.compressor = compressor.compressor(
                using_cuda=True,
                local_rank=local_rank,
                cpp_extend_load=args.cpp_extend_load)
            self.local_rank = local_rank
            self.global_rank = dist.get_rank()
            self.local_dst_in_global = self.global_rank - self.local_rank

            self.inter_node_group = []
            self.nodes = dist.get_world_size() // gpus_per_machine

            self.intra_node_group_list = []
            for index in range(self.nodes):
                # set inter_node_group
                self.inter_node_group.append(0 + index * gpus_per_machine)
                # set all intra_node_group
                intra_node_group_temp = []
                for intra_index in range(gpus_per_machine):
                    intra_node_group_temp.append(intra_index +
                                                 index * gpus_per_machine)
                intra_node_group_temp = dist.new_group(intra_node_group_temp)
                self.intra_node_group_list.append(intra_node_group_temp)

                if self.local_dst_in_global == 0 + index * gpus_per_machine:
                    self.nodes_rank = index

            #self.intra_node_list = self.intra_node_group
            self.inter_node_list = self.inter_node_group
            self.inter_node_group_list = []
            for index in range(len(self.inter_node_list)):
                if index is not 0:
                    temp = dist.new_group(
                        [self.inter_node_list[0], self.inter_node_list[index]])
                    self.inter_node_group_list.append(temp)
            self.all_gpu = dist.new_group()

            self.all_inter_node_group = dist.new_group(self.inter_node_list)

            if dist.get_rank() == 0 or dist.get_rank() == 8:
                print('nodes', self.nodes)
                print('intra_node_group_list', self.intra_node_group_list)
                print('inter_node_group', self.inter_node_group_list)
                print('all_inter_node_group', self.inter_node_list)
        lead = np.append(lead, zeros_vec)
    zz += 4
lead = np.array(lead)    

# get lead-vector that has same length as signal, because we are calculating
# a little bit more than that
lead = lead[0:signal_length]

# divide signal into three filter-bands
lower_border_freq = 250
upper_border_freq = 4000
filt = filterbank.filterbank(fs, lower_border_freq, upper_border_freq)
low_sig, mid_sig, high_sig = filt.filter(signal)

# compress signal in filter-bands depending on where speech was detected
comp = compressor.compressor(fs=fs, ratio=2, mu_auto=True)
gain = 1/4 # -12 dB
compressed_low = comp.compress_mono(low_sig, lead)
compressed_mid = comp.compress_mono(mid_sig, lead)
compressed_high = comp.compress_mono(high_sig, lead)
signal_out = gain * compressed_low + compressed_mid + gain * compressed_high

# write signal into file so that you can (hopefully) hear a difference
sf.write('test_signal_compressed.wav', data=signal_out, samplerate=fs)

# plotting classifier output (lead) and the original audio signal
time_vec = np.linspace(0, signal_length/fs, signal_length )

fig, ax1 = plt.subplots()

color = 'tab:blue'