struct.unpack

Here are the examples of the python api struct.unpack taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: clusterd
Source File: cifstrap.py
View license
    def run(self):
        try:
            #get negotiate_protocol_request
            negotiate_protocol_request = self.conn.recv(1024)
            if not negotiate_protocol_request:
                self.conn.close()
                return

            dialect_location = 40
            dialect_index = 0
            dialect_name = ""
            while dialect_location < negotiate_protocol_request.__len__():
                dialect_name = ""
                while ord(negotiate_protocol_request[dialect_location]) != 0x00:
                    if ord(negotiate_protocol_request[dialect_location]) != 0x02:
                        dialect_name += negotiate_protocol_request[dialect_location]
                    dialect_location += 1
                if dialect_name == "NT LM 0.12":
                    break
                dialect_index += 1
                dialect_location += 1

            #netbios session service
            negotiate_protocol_response = "\x00\x00\x00\x51"

            #SMB Header
            #Server Component
            negotiate_protocol_response += "\xff\x53\x4d\x42"
            #SMB Command
            negotiate_protocol_response += "\x72"
            #NT Status
            negotiate_protocol_response += "\x00\x00\x00\x00"
            #Flags
            negotiate_protocol_response += "\x88"
            #Flags2
            negotiate_protocol_response += "\x01\xc0"
            #Process ID High
            negotiate_protocol_response += "\x00\x00"
            #Signature
            negotiate_protocol_response += "\x00\x00\x00\x00\x00\x00\x00\x00"
            #Reserved
            negotiate_protocol_response += "\x00\x00"
            #Tree ID
            negotiate_protocol_response += negotiate_protocol_request[28] + negotiate_protocol_request[29]
            #Process ID
            negotiate_protocol_response += negotiate_protocol_request[30] + negotiate_protocol_request[31]
            #User ID
            negotiate_protocol_response += negotiate_protocol_request[32] + negotiate_protocol_request[33]
            #Multiplex ID
            negotiate_protocol_response += negotiate_protocol_request[34] + negotiate_protocol_request[35]

            #Negotiate Protocol Response
            #Word Count
            negotiate_protocol_response += "\x11"
            #Dialect Index
            negotiate_protocol_response += chr(dialect_index) + "\x00"
            #Security Mode
            negotiate_protocol_response += "\x03"
            #Max Mpx Count
            negotiate_protocol_response += "\x02\x00"
            #Max VCs
            negotiate_protocol_response += "\x01\x00"
            #Max Buffer Size
            negotiate_protocol_response += "\x04\x11\x00\x00"
            #Max Raw Buffer
            negotiate_protocol_response += "\x00\x00\x01\x00"
            #Session Key
            negotiate_protocol_response += "\x00\x00\x00\x00"
            #Capabilities
            negotiate_protocol_response += "\xfd\xe3\x00\x00"
            #System Time
            negotiate_protocol_response += "\x00" * 8
            #UTC Offset in minutes
            negotiate_protocol_response += "\x00\x00"
            #Key Length
            negotiate_protocol_response += "\x08"
            #Byte Count
            negotiate_protocol_response += "\x0c\x00"
            #Encryption Key
            negotiate_protocol_response += "\x11\x22\x33\x44\x55\x66\x77\x88"
            #Primary Domain
            negotiate_protocol_response += "\x00\x00"
            #Server
            negotiate_protocol_response += "\x00\x00"

            self.conn.sendall(negotiate_protocol_response)
            for x in range(0, 2):
                ntlmssp_request = self.conn.recv(1024)
                if ntlmssp_request.__len__() < 89 + 32 + 8 + 16:
                    continue

                nt_len = struct.unpack('<H', ntlmssp_request[53:55])[0]
                if nt_len == 24 and ntlmssp_request[8:10] == '\x73\x00':
                    # NTLMv1
                    lm_len = struct.unpack('<H', ntlmssp_request[51:53])[0]
                    cc = struct.unpack('<H', ntlmssp_request[63:65])[0]
                    pack = tuple(ntlmssp_request[89+24:].split("\x00\x00\x00"))[:2]
                    var = [x.replace('\x00','') for x in ntlmssp_request[89+24:cc+60].split('\x00\x00\x00')[:2]]
                    (account, domain) = tuple(var)
                    self.data = '{0}::{1}:112233445566778899:{2}:{3}'.format(account, domain,
                                ntlmssp_request[65:65+lm_len].encode('hex').upper(),
                                ntlmssp_request[65+lm_len:65+lm_len+nt_len].encode('hex').upper())
                elif nt_len > 24:
                    # NTLMv2
                    hmac = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89, 89 + 16))
                    header = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 16, 89 + 20))
                    challenge = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 24, 89 + 32 + 8))
                    tail = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 32 + 8, 89 + 32 + 8 + 16))

                    tindex = 89 + 32 + 8 + 16 + 1
                    account = ""
                    while ord(ntlmssp_request[tindex]) != 0x00:
                        account += chr(ord(ntlmssp_request[tindex]))
                        tindex += 2

                    tindex += 2
                    domain = ""
                    while ord(ntlmssp_request[tindex]) != 0x00:
                        domain += chr(ord(ntlmssp_request[tindex]))
                        tindex += 2

                    self.data = "{0}::{1}:1122334455667788:{2}:{3}00000000{4}{5}".format(
                                    account, domain, hmac, header, challenge, tail)

                #netbios session service
                ntlmssp_failed = "\x00\x00\x00\x23"

                #SMB Header
                #Server Component
                ntlmssp_failed += "\xff\x53\x4d\x42"
                #SMB Command
                ntlmssp_failed += "\x73"
                #NT Status
                ntlmssp_failed += "\x6d\x00\x00\xc0"
                #Flags
                ntlmssp_failed += "\x88"
                #Flags2
                ntlmssp_failed += "\x01\xc8"
                #Process ID Hight
                ntlmssp_failed += "\x00\x00"
                #Signature
                ntlmssp_failed += "\x00\x00\x00\x00\x00\x00\x00\x00"
                #Reserved
                ntlmssp_failed += "\x00\x00"
                #Tree ID
                ntlmssp_failed += ntlmssp_request[28] + ntlmssp_request[29]
                #Process ID
                ntlmssp_failed += ntlmssp_request[30] + ntlmssp_request[31]
                #User ID
                ntlmssp_failed += ntlmssp_request[32] + ntlmssp_request[33]
                #Multiplex ID
                ntlmssp_failed += ntlmssp_request[34] + ntlmssp_request[35]

                #Negotiate Protocol Response
                #Word Count
                ntlmssp_failed += "\x00\x00\x00"
                self.conn.sendall(ntlmssp_failed)

            self.conn.close()

        except Exception, e:
            self.data = e

Example 2

Project: penelope
Source File: format_stardict.py
View license
def read(dictionary, args, input_file_paths):
    def find_files(entries):
        found = {}
        for entry in entries:
            if entry.endswith(".ifo"):
                found["d.ifo"] = entry
                break
        if "d.ifo" not in found:
            print_error("Cannot find .ifo file in the given StarDict file (see StarDict spec)")
            return {}
        # remove .ifo extension
        base = found["d.ifo"][:-4]
        # attempt to find these ones
        tentative_idx = base + ".idx"
        tentative_idx_gz = base + ".idx.gz"
        tentative_dict = base + ".dict"
        tentative_dict_dz = base + ".dict.dz"
        tentative_dz = base + ".dz"
        if tentative_idx in entries:
            found["d.idx"] = tentative_idx
        if tentative_idx_gz in entries:
            found["d.idx.gz"] = tentative_idx_gz
        if not (("d.idx" in found) or ("d.idx.gz" in found)):
            print_error("Cannot find .idx or .idx.gz file in the given StarDict file (see StarDict spec)")
            return {}
        if tentative_dict in entries:
            found["d.dict"] = tentative_dict
        if tentative_dict_dz in entries:
            found["d.dict.dz"] = tentative_dict_dz
        if tentative_dz in entries:
            found["d.dz"] = tentative_dz
        if not (("d.dict" in found) or ("d.dict.dz" in found) or ("d.dz" in found)):
            print_error("Cannot find .dict, .dict.dz, or .dz file in the given StarDict file (see StarDict spec)")
            return {}
        # syn is optional
        tentative_syn = base + ".syn"
        if tentative_syn in entries:
            found["d.syn"] = tentative_syn
        return found

    def uncompress_file(compressed_path, tmp_path, key):
        uncompressed_path = os.path.join(tmp_path, key)
        u_obj = io.open(uncompressed_path, "wb")
        c_obj = gzip.open(compressed_path, "rb")
        u_obj.write(c_obj.read())
        c_obj.close()
        u_obj.close()
        print_debug("Uncompressed %s" % (uncompressed_path), args.debug)
        return uncompressed_path

    def read_ifo(ifo_path, has_syn, args):
        ifo_dict = {}
        ifo_obj = io.open(ifo_path, "rb")
        ifo_bytes = ifo_obj.read()                  # bytes
        ifo_unicode = ifo_bytes.decode("utf-8")     # unicode, always utf-8 by spec
        ifo_obj.close()
        for line in ifo_unicode.splitlines():
            array = line.split("=")
            if len(array) >= 2:
                key = array[0]
                val = "=".join(array[1:])
                ifo_dict[key] = val

        if "version" not in ifo_dict:
            print_error("No 'version' found in the .ifo file (see StarDict spec)")
            return None
        if ifo_dict["version"] not in ["2.4.2", "3.0.0"]:
            print_error("The .ifo file must have a 'version' value equal to '2.4.2' or '3.0.0' (see StarDict spec)")
            return None

        required_keys = ["bookname", "wordcount", "idxfilesize"]
        if has_syn:
            required_keys.append("synwordcount")
        # TODO not used => disabling this
        # if ifo_dict["version"] == "3.0.0":
        #     required_keys.append("idxoffsetbits")
        for key in required_keys:
            if key not in ifo_dict:
                print_error("No '%s' found in the .ifo file (see StarDict spec)" % key)
                return None

        ifo_dict["wordcount"] = int(ifo_dict["wordcount"])
        ifo_dict["idxfilesize"] = int(ifo_dict["idxfilesize"])
        if has_syn:
            ifo_dict["synwordcount"] = int(ifo_dict["synwordcount"])
        # TODO not used => disabling this
        # if ifo_dict["version"] == "3.0.0":
        #     ifo_dict["idxoffsetbits"] = int(ifo_dict["idxoffsetbits"])

        if args.sd_ignore_sametypesequence:
            print_debug("Ignoring sametypesequence value", args.debug)
        else:
            # TODO limitation: we require sametypesequence to be present
            if "sametypesequence" not in ifo_dict:
                print_error("The .ifo file must have a 'sametypesequence' value (see README).")
                return None
            # TODO limitation: we require sametypesequence to have a value in SAMETYPESEQUENCE_SUPPORTED_VALUES
            if not ifo_dict["sametypesequence"] in SAMETYPESEQUENCE_SUPPORTED_VALUES:
                print_error("The .ifo file must have a 'sametypesequence' value of %s (see README)." % "|".join(SAMETYPESEQUENCE_SUPPORTED_VALUES))
                return None

        return ifo_dict

    def read_single_file(dictionary, args, input_file_path):
        # result flag
        result = False

        # create a tmp directory
        tmp_path = create_temp_directory()
        print_debug("Working in temp dir '%s'" % (tmp_path), args.debug)

        # find .ifo, .idx, .dict[.dz] and .syn files inside the zip
        # and extract them to tmp_path
        input_file_obj = zipfile.ZipFile(input_file_path)
        found_files = find_files(input_file_obj.namelist())
        extracted_files = {}
        if len(found_files) > 0:
            for key in found_files:
                entry = found_files[key]
                ext_file_path = os.path.join(tmp_path, key)
                ext_file_obj = io.open(ext_file_path, "wb")
                zip_entry = input_file_obj.open(entry)
                ext_file_obj.write(zip_entry.read())
                zip_entry.close()
                ext_file_obj.close()
                print_debug("Extracted %s" % (ext_file_path), args.debug)
                extracted_files[key] = ext_file_path
                # extract from compressed file, but only if ".idx" is not present as well
                if (key == "d.idx.gz") and ("d.idx" not in found_files):
                    extracted_files["d.idx"] = uncompress_file(ext_file_path, tmp_path, "d.idx")
                # extract from compressed file, but only if ".dict" is not present as well
                if ((key == "d.dict.dz") or (key == "d.dz")) and ("d.dict" not in found_files):
                    extracted_files["d.dict"] = uncompress_file(ext_file_path, tmp_path, "d.dict")
        input_file_obj.close()

        # here we have d.ifo, d.idx and d.dict (all uncompressed) and possibly d.syn

        has_syn = "d.syn" in extracted_files
        if (has_syn) and (args.ignore_synonyms):
            has_syn = False
            print_debug("Dictionary has synonyms, but ignoring them (--ignore-synonym)", args.debug)
        ifo_dict = read_ifo(extracted_files["d.ifo"], has_syn, args)
        print_debug("Read .ifo file with values:\n%s" % (str(ifo_dict)), args.debug)

        # read dict file
        dict_file_obj = io.open(extracted_files["d.dict"], "rb")
        dict_file_bytes = dict_file_obj.read()
        dict_file_obj.close()

        # read idx file
        idx_file_obj = io.open(extracted_files["d.idx"], "rb")
        byte_read = idx_file_obj.read(1)
        headword = b""
        while byte_read:
            if byte_read == b"\0":
                # end of current word: read offset and size
                offset_bytes = idx_file_obj.read(4)
                offset_int = int((struct.unpack('>i', offset_bytes))[0])
                size_bytes = idx_file_obj.read(4)
                size_int = int((struct.unpack('>i', size_bytes))[0])
                definition = dict_file_bytes[offset_int:(offset_int + size_int)].decode(args.input_file_encoding)
                headword = headword.decode("utf-8")
                if args.ignore_case:
                    headword = headword.lower()
                dictionary.add_entry(headword=headword, definition=definition)
                headword = b""
            else:
                # read next byte
                headword += byte_read
            byte_read = idx_file_obj.read(1)
        idx_file_obj.close()
        result = True

        # read syn file, if present
        if has_syn:
            print_debug("The input StarDict file contains a .syn file, parsing it...", args.debug)
            result = False
            syn_file_obj = io.open(extracted_files["d.syn"], "rb")
            byte_read = syn_file_obj.read(1)
            synonym = b""
            while byte_read:
                if byte_read == b"\0":
                    # end of current synonym: read index of original word
                    index_bytes = syn_file_obj.read(4)
                    index_int = int((struct.unpack('>i', index_bytes))[0])
                    synonym = synonym.decode("utf-8")
                    if index_int < len(dictionary):
                        dictionary.add_synonym(synonym=synonym, headword_index=index_int)
                    else:
                        # emit a warning?
                        print_debug("Synonym '%s' points to index %d >= len(dictionary), skipping it" % (index_int, synonym), args.debug)
                    synonym = b""
                else:
                    # read next byte
                    synonym += byte_read
                byte_read = syn_file_obj.read(1)
            syn_file_obj.close()
            result = True
            print_debug("The input StarDict file contains a .syn file, parsing it... done", args.debug)
        else:
            print_debug("The input StarDict file does not contain a .syn file", args.debug)

        # delete tmp directory
        if args.keep:
            print_info("Not deleting temp dir '%s'" % (tmp_path))
        else:
            delete_directory(tmp_path)
            print_debug("Deleted temp dir '%s'" % (tmp_path), args.debug)

        return result

    for input_file_path in input_file_paths:
        print_debug("Reading from file '%s'..." % (input_file_path), args.debug)
        result = read_single_file(dictionary, args, input_file_path)
        if result:
            print_debug("Reading from file '%s'... success" % (input_file_path), args.debug)
        else:
            print_error("Reading from file '%s'... failed" % (input_file_path))
            return None
    return dictionary

Example 3

View license
def main():
	
	import struct, json, time, sys, os, shutil, datetime, base64

	parserversion = "0.9.12.0"
	
	global rawdata, tupledata, data, structures, numoffrags
	global filename_source, filename_target
	global option_server, option_format, option_tanks
	
	filename_source = ""
	option_raw = 0
	option_format = 0
	option_server = 0
	option_frags = 1
	option_tanks = 0
	
	for argument in sys.argv[1:]:
		if argument == "-s":
			option_server = 1
			#print '-- SERVER mode enabled'
		elif argument == "-r":
			option_raw = 1
			#print '-- RAW mode enabled'
		elif argument == "-f":
			option_format = 1
			#print '-- FORMAT mode enabled'
		elif argument == "-k":
			option_frags = 0
			#print '-- FRAGS will be excluded'
		elif argument == "-t":
			option_tanks = 1
			#print '-- TANK info will be included'
		else:
			# dossier file, if more than one get only first
			if filename_source =='' and os.path.isfile(argument):
				filename_source = argument
	
	if filename_source == "":
		usage()
		sys.exit(2)
		
	printmessage('############################################')
	printmessage('###### WoTDC2J ' + parserversion)
	

	printmessage('Processing ' + filename_source)
	

	if not os.path.exists(filename_source) or not os.path.isfile(filename_source) or not os.access(filename_source, os.R_OK):
		catch_fatal('Dossier file does not exists')
		sys.exit(1)

	if os.path.getsize(filename_source) == 0:
		catch_fatal('Dossier file size is zero')
		sys.exit(1)
		
	filename_target = os.path.splitext(filename_source)[0]
	filename_target = filename_target + '.json'

	if os.path.exists(filename_target) and os.path.isfile(filename_target) and os.access(filename_target, os.R_OK):
		try:
			os.remove(filename_target)
		except:
			catch_fatal('Cannot remove target file ' + filename_target)

			
	cachefile = open(filename_source, 'rb')

	try:
		from SafeUnpickler import SafeUnpickler
		dossierversion, dossierCache = SafeUnpickler.load(cachefile)
	except Exception, e:
		exitwitherror('Dossier cannot be read (pickle could not be read) ' + e.message)

	if not 'dossierCache' in locals():
		exitwitherror('Dossier cannot be read (dossierCache does not exist)')

	printmessage("Dossier version " + str(dossierversion))
	
	tankitems = [(k, v) for k, v in dossierCache.items()]

	dossier = dict()
		
	dossierheader = dict()
	dossierheader['dossierversion'] = str(dossierversion)
	dossierheader['parser'] = 'http://www.vbaddict.net'
	dossierheader['parserversion'] = parserversion
	dossierheader['tankcount'] = len(tankitems)
	

	
	base32name = "?;?"
	if option_server == 0:
		filename_base = os.path.splitext(os.path.basename(filename_source))[0]
		try:
			base32name = base64.b32decode(filename_base)
		except Exception, e:
			pass
			#printmessage('cannot decode filename ' + filename_base + ': ' + e.message)


	dossierheader['server'] = base32name.split(';', 1)[0];
	dossierheader['username'] = base32name.split(';', 1)[1];
	
	
	if option_server == 0:
		dossierheader['date'] = time.mktime(time.localtime())
	
	tanksdata = load_tanksdata()
	structures = load_structures()
	
	tanks = dict()
	tanks_v2 = dict()
	
	battleCount_15 = 0
	battleCount_7 = 0
	battleCount_historical = 0
	battleCount_company = 0
	battleCount_clan = 0
	battleCount_fortBattles = 0
	battleCount_fortSorties = 0
	battleCount_rated7x7 = 0
	battleCount_globalMap = 0
	battleCount_fallout = 0
	
	for tankitem in tankitems:
		
		if len(tankitem) < 2:
			printmessage('Invalid tankdata')
			continue

		if len(tankitem[0]) < 2:
			printmessage('Invalid tankdata')
			continue
			
		rawdata = dict()
		
		try:
			data = tankitem[1][1]
		except Exception, e:
			printmessage('Invalid tankitem ' + str(e.message))
			continue
			
		tankstruct = str(len(data)) + 'B'
		tupledata = struct.unpack(tankstruct, data)
		tankversion = getdata("tankversion", 0, 1)
		
		#if tankversion != 87:
		#printmessage("Tankversion " + str(tankversion))
		#	continue
		
		if tankversion not in structures:
			write_to_log('unsupported tankversion ' + str(tankversion))
			continue				

		if not isinstance(tankitem[0][1], (int)):
			printmessage('Invalid tankdata')
			continue
	
		try:
			tankid = tankitem[0][1] >> 8 & 65535
		except Exception, e:
			printmessage('cannot get tankid ' + e.message)
			continue
						
		try:
			countryid = tankitem[0][1] >> 4 & 15
		except Exception, e:
			printmessage('cannot get countryid ' + e.message)
			continue
			
		#For debugging purposes
		#if not (countryid==4 and tankid==19):
		#	continue
		
		for m in xrange(0,len(tupledata)):
			rawdata[m] = tupledata[m]
		
		if len(tupledata) == 0:
			continue

		if option_server == 0:
			tanktitle = get_tank_data(tanksdata, countryid, tankid, "title")
		else:
			tanktitle = str(countryid) + '_' + str(tankid)

		fragslist = []
		if tankversion >= 65:
			tank_v2 = dict()
			
			if tankversion == 65:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7')
				
			if tankversion == 69:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7')

			if tankversion == 77:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical')

			if tankversion == 81:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements')

			if tankversion in [85, 87]:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements')

			if tankversion in [88,89]:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7')

			if tankversion == 92:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7', 'globalMapCommon', 'maxGlobalMapCommon')
			
			if tankversion == 94:
				blocks = ('a15x15', 'a15x15_2', 'clan', 'clan2', 'company', 'company2', 'a7x7', 'achievements', 'frags', 'total', 'max15x15', 'max7x7', 'playerInscriptions', 'playerEmblems', 'camouflages', 'compensation', 'achievements7x7', 'historical', 'maxHistorical', 'historicalAchievements', 'fortBattles', 'maxFortBattles', 'fortSorties', 'maxFortSorties', 'fortAchievements', 'singleAchievements', 'clanAchievements', 'rated7x7', 'maxRated7x7', 'globalMapCommon', 'maxGlobalMapCommon', 'fallout', 'maxFallout', 'falloutAchievements')
				
			blockcount = len(list(blocks))+1

			newbaseoffset = (blockcount * 2)
			header = struct.unpack_from('<' + 'H' * blockcount, data)
			blocksizes = list(header[1:])
			blocknumber = 0
			numoffrags_list = 0
			numoffrags_a15x15 = 0
			numoffrags_a7x7 = 0
			numoffrags_historical = 0
			numoffrags_fortBattles = 0
			numoffrags_fortSorties = 0
			numoffrags_rated7x7 = 0
			numoffrags_globalMap = 0
			numoffrags_fallout = 0

			for blockname in blocks:

				if blocksizes[blocknumber] > 0:
					if blockname == 'frags':
						if option_frags == 1:
							fmt = '<' + 'IH' * (blocksizes[blocknumber]/6)
							fragsdata = struct.unpack_from(fmt, data, newbaseoffset)
							index = 0

							for i in xrange((blocksizes[blocknumber]/6)):
								compDescr, amount = (fragsdata[index], fragsdata[index + 1])
								numoffrags_list += amount	
								frag_countryid, frag_tankid, frag_tanktitle = get_tank_details(compDescr, tanksdata)
								tankfrag = [frag_countryid, frag_tankid, amount, frag_tanktitle]
								fragslist.append(tankfrag)
								index += 2							

							for i in xrange((blocksizes[blocknumber])):
								rawdata[newbaseoffset+i] = str(tupledata[newbaseoffset+i]) + " / Frags"
								
							tank_v2['fragslist'] = fragslist
				
						newbaseoffset += blocksizes[blocknumber] 

						
					else:
						oldbaseoffset = newbaseoffset
						structureddata = getstructureddata(blockname, tankversion, newbaseoffset)
						structureddata = keepCompatibility(structureddata)
						newbaseoffset = oldbaseoffset+blocksizes[blocknumber]
						tank_v2[blockname] = structureddata 

				blocknumber +=1
			if contains_block('max15x15', tank_v2):
				if 'maxXP' in tank_v2['max15x15']:
					if tank_v2['max15x15']['maxXP']==0:
						tank_v2['max15x15']['maxXP'] = 1
						
				if 'maxFrags' in tank_v2['max15x15']:
					if tank_v2['max15x15']['maxFrags']==0:
						tank_v2['max15x15']['maxFrags'] = 1

				
			if contains_block('company', tank_v2):
				if 'battlesCount' in tank_v2['company']:
					battleCount_company += tank_v2['company']['battlesCount']
			
			if contains_block('clan', tank_v2):
				if 'battlesCount' in tank_v2['clan']:
					battleCount_company += tank_v2['clan']['battlesCount']

			if contains_block('a15x15', tank_v2):
				
				if 'battlesCount' in tank_v2['a15x15']:
					battleCount_15 += tank_v2['a15x15']['battlesCount']
					
				if 'frags' in tank_v2['a15x15']:
					numoffrags_a15x15 = int(tank_v2['a15x15']['frags'])

			if contains_block('a7x7', tank_v2):
				
				if 'battlesCount' in tank_v2['a7x7']:
					battleCount_7 += tank_v2['a7x7']['battlesCount']
				
				if 'frags' in tank_v2['a7x7']:
					numoffrags_a7x7 = int(tank_v2['a7x7']['frags'])
			
			if contains_block('historical', tank_v2):
				
				if 'battlesCount' in tank_v2['historical']:
					battleCount_historical += tank_v2['historical']['battlesCount']
				
				if 'frags' in tank_v2['historical']:
					numoffrags_historical = int(tank_v2['historical']['frags'])

			if contains_block('fortBattles', tank_v2):
				
				if 'battlesCount' in tank_v2['fortBattles']:
					battleCount_fortBattles += tank_v2['fortBattles']['battlesCount']
				
				if 'frags' in tank_v2['fortBattles']:
					numoffrags_fortBattles = int(tank_v2['fortBattles']['frags'])
					
			if contains_block('fortSorties', tank_v2):
				
				if 'battlesCount' in tank_v2['fortSorties']:
					battleCount_fortSorties += tank_v2['fortSorties']['battlesCount']
				
				if 'frags' in tank_v2['fortSorties']:
					numoffrags_fortSorties = int(tank_v2['fortSorties']['frags'])

			if contains_block('rated7x7', tank_v2):
				
				if 'battlesCount' in tank_v2['rated7x7']:
					battleCount_rated7x7 += tank_v2['rated7x7']['battlesCount']
				
				if 'frags' in tank_v2['rated7x7']:
					numoffrags_rated7x7 = int(tank_v2['rated7x7']['frags'])
					
			if contains_block('globalMapCommon', tank_v2):
				
				if 'battlesCount' in tank_v2['globalMapCommon']:
					battleCount_globalMap += tank_v2['globalMapCommon']['battlesCount']
				
				if 'frags' in tank_v2['globalMapCommon']:
					numoffrags_globalMap = int(tank_v2['globalMapCommon']['frags'])
				
			if contains_block('fallout', tank_v2):
				
				if 'battlesCount' in tank_v2['fallout']:
					battleCount_fallout += tank_v2['fallout']['battlesCount']
				
				if 'frags' in tank_v2['fallout']:
					numoffrags_fallout = int(tank_v2['fallout']['frags'])
				
			if option_frags == 1:

				try:
					if numoffrags_list <> (numoffrags_a15x15 + numoffrags_a7x7 + numoffrags_historical + numoffrags_fortBattles + numoffrags_fortSorties + numoffrags_rated7x7 + numoffrags_globalMap + numoffrags_fallout):
						pass
						#write_to_log('Wrong number of frags for ' + str(tanktitle) + ', ' + str(tankversion) + ': ' + str(numoffrags_list) + ' = ' + str(numoffrags_a15x15) + ' + ' + str(numoffrags_a7x7) + ' + ' + str(numoffrags_historical) + ' + ' + str(numoffrags_fortBattles) + ' + ' + str(numoffrags_fortSorties) + ' + ' + str(numoffrags_rated7x7))
				except Exception, e:
						write_to_log('Error processing frags: ' + e.message)
		
			
				
			tank_v2['common'] = {"countryid": countryid,
				"tankid": tankid,
				"tanktitle": tanktitle,
				"compactDescr": tankitem[0][1],
				"type": get_tank_data(tanksdata, countryid, tankid, "type"),
				"premium": get_tank_data(tanksdata, countryid, tankid, "premium"),
				"tier": get_tank_data(tanksdata, countryid, tankid, "tier"),
				"updated": tankitem[1][0],
				"updatedR": datetime.datetime.fromtimestamp(int(tankitem[1][0])).strftime('%Y-%m-%d %H:%M:%S'),
				"creationTime": tank_v2['total']['creationTime'],
				"creationTimeR": datetime.datetime.fromtimestamp(int(tank_v2['total']['creationTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"lastBattleTime": tank_v2['total']['lastBattleTime'],
				"lastBattleTimeR": datetime.datetime.fromtimestamp(int(tank_v2['total']['lastBattleTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"basedonversion": tankversion,
				"frags":  numoffrags_a15x15,
				"frags_7x7":  numoffrags_a7x7,
				"frags_historical":  numoffrags_historical,
				"frags_fortBattles":  numoffrags_fortBattles,
				"frags_fortSorties":  numoffrags_fortSorties,
				"frags_compare": numoffrags_list,
				"has_15x15": contains_block("a15x15", tank_v2),
				"has_7x7": contains_block("a7x7", tank_v2),
				"has_historical": contains_block("historical", tank_v2),
				"has_clan": contains_block("clan", tank_v2),
				"has_company": contains_block("company", tank_v2),
				"has_fort": contains_block("fortBattles", tank_v2),
				"has_sortie": contains_block("fortSorties", tank_v2)
				
			}
			
			if option_raw == 1:
				tank_v2['rawdata'] = rawdata

			tanks_v2[tanktitle] = tank_v2
			
			
		if tankversion < 65:
			if tankversion >= 20:
				company = getstructureddata("company", tankversion)
				battleCount_company += company['battlesCount']
				clan = getstructureddata("clan", tankversion)
				battleCount_clan += clan['battlesCount']
			
			numoffrags = 0
	
			structure = getstructureddata("structure", tankversion)


			
			if 'fragspos' not in structure:
				write_to_log('tankversion ' + str(tankversion) + ' not in JSON')
				continue
			
			if option_frags == 1 and tankversion >= 17:
				fragslist = getdata_fragslist(tankversion, tanksdata, structure['fragspos'])
	
			tankdata = getstructureddata("tankdata", tankversion)
			battleCount_15 += tankdata['battlesCount']
	
			if not "creationTime" in tankdata:
				tankdata['creationTime'] = 1356998400
	
			common = {"countryid": countryid,
				"tankid": tankid,
				"tanktitle": tanktitle,
				"compactDescr": tankitem[0][1],
				"type": get_tank_data(tanksdata, countryid, tankid, "type"),
				"premium": get_tank_data(tanksdata, countryid, tankid, "premium"),
				"tier": get_tank_data(tanksdata, countryid, tankid, "tier"),
				"updated": tankitem[1][0],
				"updatedR": datetime.datetime.fromtimestamp(int(tankitem[1][0])).strftime('%Y-%m-%d %H:%M:%S'),
				"creationTime": tankdata['creationTime'],
				"creationTimeR": datetime.datetime.fromtimestamp(int(tankdata['creationTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"lastBattleTime": tankdata['lastBattleTime'],
				"lastBattleTimeR": datetime.datetime.fromtimestamp(int(tankdata['lastBattleTime'])).strftime('%Y-%m-%d %H:%M:%S'),
				"basedonversion": tankversion,
				"frags": tankdata['frags'],
				"frags_compare": numoffrags
			}
	
			if option_frags == 1 and tankversion >= 17:
				try:
					if tankdata['frags'] <> numoffrags:
						printmessage('Wrong number of frags!')
				except Exception, e:
						write_to_log('Error processing frags: ' + e.message)
	
			series = getstructureddata("series", tankversion)
	
			special = getstructureddata("special", tankversion)
	
			battle = getstructureddata("battle", tankversion)
	
			major = getstructureddata("major", tankversion)
	
			epic = getstructureddata("epic", tankversion)
	
	
	
			tank = dict()
			
			tank['tankdata'] = tankdata
			tank['common'] = common
	
			if tankversion >= 20:
				tank['series'] = series
				tank['battle'] = battle
				tank['special'] = special
				tank['epic'] = epic
				tank['major'] = major
				tank['clan'] = clan
				tank['company'] = company
				
			if option_frags == 1:
				tank['kills'] = fragslist
			
			if option_raw == 1:
				tank['rawdata'] = rawdata
			
			tanks[tanktitle] = tank
			#tanks = sorted(tanks.values())

	
	dossierheader['battleCount_15'] = battleCount_15	
	dossierheader['battleCount_7'] = battleCount_7
	dossierheader['battleCount_historical'] = battleCount_historical
	dossierheader['battleCount_company'] = battleCount_company
	dossierheader['battleCount_clan'] = battleCount_clan

	dossierheader['result'] = "ok"
	dossierheader['message'] = "ok"
	
	dossier['header'] = dossierheader
	dossier['tanks'] = tanks
	dossier['tanks_v2'] = tanks_v2

	dumpjson(dossier)

	printmessage('###### Done!')
	printmessage('')
	sys.exit(0)

Example 4

Project: Medusa
Source File: tz.py
View license
    def _read_tzfile(self, fileobj):
        out = _tzfile()

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
            # The number of UTC/local indicators stored in the file.
            ttisgmtcnt,

            # The number of standard/wall indicators stored in the file.
            ttisstdcnt,

            # The number of leap seconds for which data is
            # stored in the file.
            leapcnt,

            # The number of "transition times" for which data
            # is stored in the file.
            timecnt,

            # The number of "local time types" for which data
            # is stored in the file (must not be zero).
            typecnt,

            # The  number  of  characters  of "time zone
            # abbreviation strings" stored in the file.
            charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            out.trans_list = list(struct.unpack(">%dl" % timecnt,
                                                  fileobj.read(timecnt*4)))
        else:
            out.trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.

        if timecnt:
            out.trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            out.trans_idx = []

        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now (but read anyway for correct file position)
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # Build ttinfo list
        out.ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = 60 * ((gmtoff + 30) // 60)
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.dstoffset = datetime.timedelta(0)
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            out.ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        out.ttinfo_std = None
        out.ttinfo_dst = None
        out.ttinfo_before = None
        if out.ttinfo_list:
            if not out.trans_list:
                out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = out.trans_idx[i]
                    if not out.ttinfo_std and not tti.isdst:
                        out.ttinfo_std = tti
                    elif not out.ttinfo_dst and tti.isdst:
                        out.ttinfo_dst = tti

                    if out.ttinfo_std and out.ttinfo_dst:
                        break
                else:
                    if out.ttinfo_dst and not out.ttinfo_std:
                        out.ttinfo_std = out.ttinfo_dst

                for tti in out.ttinfo_list:
                    if not tti.isdst:
                        out.ttinfo_before = tti
                        break
                else:
                    out.ttinfo_before = out.ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = None
        for i, tti in enumerate(out.trans_idx):
            if not tti.isdst:
                offset = tti.offset
                laststdoffset = offset
            else:
                if laststdoffset is not None:
                    # Store the DST offset as well and update it in the list
                    tti.dstoffset = tti.offset - laststdoffset
                    out.trans_idx[i] = tti

                offset = laststdoffset or 0

            out.trans_list[i] += offset

        # In case we missed any DST offsets on the way in for some reason, make
        # a second pass over the list, looking for the /next/ DST offset.
        laststdoffset = None
        for i in reversed(range(len(out.trans_idx))):
            tti = out.trans_idx[i]
            if tti.isdst:
                if not (tti.dstoffset or laststdoffset is None):
                    tti.dstoffset = tti.offset - laststdoffset
            else:
                laststdoffset = tti.offset

            if not isinstance(tti.dstoffset, datetime.timedelta):
                tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
            
            out.trans_idx[i] = tti

        out.trans_idx = tuple(out.trans_idx)
        out.trans_list = tuple(out.trans_list)

        return out

Example 5

Project: wotdecoder
Source File: wotdecoder.py
View license
def replay(filename, to_decode):
# filename= name of .wotreplay file
# to_decode= bitmask of chunks you want decoded.
# We do not just count blocks as they are in replay files. Instead we always decode
# Bit 0 = first Json block, starting player list
# Bit 1 = second Json block, simplified frag count
# Bit 2 = pickle, proper battle result with damage numbers
# 7(binary 111) means decode all three. 5(binary 101) means decode first Json and pikle.
#
# returns decoded_chunks[0:3], chunks bitmask, decoder status

  while True:
    wot_replay_magic_number = "12323411"
    blocks = 0
    first_chunk_decoded = {}
    second_chunk_decoded = {}
    third_chunk_decoded = {}
    chunks_bitmask = 0
    filesize = os.path.getsize(filename)
    if filesize<12: processing =10; break
    f = open(filename, "rb")
    if f.read(4)!=bytes.fromhex(wot_replay_magic_number): processing =11; break  
    blocks = struct.unpack("i",f.read(4))[0]

# 8.1 Adds new unencrypted Python pickle block containing your match stats
# Before 8.1 (< 20121101)
#  Json + binary = 1 = incomplete.
#  Json + Json + binary = 2 = complete.
# After  8.1 (>=20121101)
#  Json + binary = 1 = incomplete.
#  Json + pickle + binary = 2 = incomplete, but you looked at 'Battle Result' screen and replay got updated.
#  Json + Json + pickle + binary = 3 = complete.
# Some oddities:
#  Json + Json + ~8 bytes = 2 = incomplete, game crashed somewhere, second Json has game result, but we are missing Pickle
#
# Proper way to detect replay version is to decrypt and decompress binary part, but that is too slow.
# Instead I am using Date to estimate version in a very crude way. It is only accurade down to a day and doesnt take into
# consideration player timezone so I need to double check replays saved at 20121101. Still faster than decrypting and
# unzipping 1MB files.


    first_size = struct.unpack("i",f.read(4))[0]
#    print (first_size, filename)

    if filesize < (12+first_size+4): processing =10; break

    if (blocks == 1) and (not (to_decode&1)): processing =1; break

    first_chunk = f.read(first_size)
    if first_chunk[0:1] != b'{': processing =13; break
    first_chunk_decoded = json.loads(first_chunk.decode('utf-8'))
    chunks_bitmask = 1

    if blocks == 1: processing =1; break
    if ((blocks!=2) and (blocks!=3)): processing =16; break

    replaydate = datetime.strptime(first_chunk_decoded['dateTime'][0:10], "%d.%m.%Y")

    second_size = struct.unpack("i",f.read(4))[0]
    if filesize < (12+first_size+4+second_size): processing =10; break
    second_chunk = f.read(second_size)

# <20121101 and blocks==2 means Complete (pre 8.1). Second block should be Json.
    if (replaydate < datetime(2012, 11, 1)) and blocks==2:
      if second_chunk[0:2] == b'[{':
# Complete (pre 8.1).
        if to_decode&2:
          second_chunk_decoded = json.loads(second_chunk.decode('utf-8'))
          chunks_bitmask = chunks_bitmask|2
        processing =3; break
      else: processing =14; break

# =20121101 and blocks==2 can go both ways, need to autodetect second block.
# >20121101 and blocks==2 can contain broken replay
    elif (replaydate >= datetime(2012, 11, 1)) and blocks==2:
      if second_chunk[0:2] == b'(d':
# Incomplete (past 8.1), with 'Battle Result' pickle.
        if to_decode&4:
          third_chunk_decoded = _Unpickler(io.BytesIO(second_chunk)).load()
          chunks_bitmask = chunks_bitmask|4
          for b in third_chunk_decoded['vehicles']:
            third_chunk_decoded['vehicles'][b]['details']= _Decoder.decode_details(third_chunk_decoded['vehicles'][b]['details'].encode('raw_unicode_escape'))
            third_chunk_decoded['players'][ third_chunk_decoded['vehicles'][b]['accountDBID'] ]["vehicleid"]=b
        processing =2; break
      elif second_chunk[0:2] == b'[{':
        if to_decode&2:
          second_chunk_decoded = json.loads(second_chunk.decode('utf-8'))
          chunks_bitmask = chunks_bitmask|2
        if replaydate == datetime(2012, 11, 1):
# Complete (pre 8.1).
          processing =3; break
        else:
# Bugged (past 8.1). Game crashed somewhere, second Json has game result.
          processing =6; break

# >=20121101 and blocks==3 means Complete (past 8.1).
    elif (replaydate >= datetime(2012, 11, 1)) and blocks==3:
      if second_chunk[0:2] == b'[{':
        if to_decode&2:
          second_chunk_decoded = json.loads(second_chunk.decode('utf-8'))
          chunks_bitmask = chunks_bitmask|2
        if filesize<(12+first_size+4+second_size+4): processing =10; break
        third_size = struct.unpack("i",f.read(4))[0]
        if filesize<(12+first_size+4+second_size+4+third_size): processing =10; break
        third_chunk = f.read(third_size)
        if third_chunk[0:2] == b'(d':
          if to_decode&4:
            third_chunk_decoded = _Unpickler(io.BytesIO(third_chunk)).load()
            chunks_bitmask = chunks_bitmask|4
            for b in third_chunk_decoded['vehicles']:
              third_chunk_decoded['vehicles'][b]['details']= _Decoder.decode_details(third_chunk_decoded['vehicles'][b]['details'].encode('raw_unicode_escape'))
              third_chunk_decoded['players'][ third_chunk_decoded['vehicles'][b]['accountDBID'] ]["vehicleid"]=b
          processing =4; break
        else: processing =15; break
      else: processing =14; break


# All states that we can handle broke out of the While loop at this point.
# Unhandled cases trigger this.
    processing =20; break

  f.close()

  if chunks_bitmask&5 ==5:
# lets check if pickle belongs to this replay
# this is weak check, we only compare map and game mode, It can still pass some corrupted ones
    if maps[ third_chunk_decoded['common']['arenaTypeID'] & 65535 ][0] !=first_chunk_decoded['mapName'] or \
       gameplayid[ third_chunk_decoded['common']['arenaTypeID'] >>16] != first_chunk_decoded['gameplayID']:
#      print("EERRRROOOORRRrrrrrr!!!one77")
#      print("json:  ", first_chunk_decoded['mapName'])
#      print("pickle:", maps[ third_chunk_decoded['common']['arenaTypeID'] & 65535 ])
#      print("json:  ", first_chunk_decoded['gameplayID'])
#      print("pickle:", gameplayid[ third_chunk_decoded['common']['arenaTypeID'] >>16])
      processing =8
#      chunks_bitmask = chunks_bitmask^4
#      print(datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S'))
#      print( datetime.fromtimestamp(chunks[2]['common']['arenaCreateTime']))
#      print( mapidname[ chunks[2]['common']['arenaTypeID'] & 65535 ])

#guesstimating version, reliable only since 8.6 because WG added version string, earlier ones can be ~guessed by counting data or comparing dates
  if chunks_bitmask&1 ==1:
   if "clientVersionFromExe" in first_chunk_decoded:
    version = int(first_chunk_decoded["clientVersionFromExe"].replace(', ',''))
#    print (first_chunk_decoded["clientVersionFromExe"], version)
   else:
    
#8.7
#July 29, 2013
#8.4
#05 Mar 2013
#8.3
#16 Jan 2013
#8.2
#12 Dec 2012
#8.1
#Mar 13 2013
#8.0
#Sep 24 2012 
#7.5
#04.08.2012
#7.4
#20.06.2012
#7.3
#11.05.2012
#7.2
#30.03.2012
#7.1
#05.01.2012
#7.0
#19.12.2011
#6.7
#15.09.2011
#6.6
#10.08.2011
#6.5
#Jun 14 2011
#6.4
#Mar 12 2011
#6.3.11
#Apr 12 2011
#6.3.10
#Apr 07 2011
#6.3.9
#Mar 22 2011
#6.3
#Jan 15 2011
#6.2.8
#Dec 28 2010
#6.2.7
#Dec 23 2010
#6.2
#Dec 01 2010
#6.1.5
#Sep 28 2010
#5.5
#Oct 21 2010
#5.4.1
#Jul 16 2010    

    version = 830 # no clue, lets default to safe 8.3
  else:
   version = 0 #no first chunk = no version

# returns decoded_chunk[0:3], bitmap of available chunks, decoder status, ~version
  return (first_chunk_decoded, second_chunk_decoded, third_chunk_decoded), chunks_bitmask, processing, version

Example 6

Project: wotdecoder
Source File: wotrepparser.py
View license
def main():

  verbose = False
  recursive = False
  rename = True
  dry = False
  mode = 0
  b_r = 0
  overwrite = False
  source = os.getcwd()
  output = os.getcwd()
  skip = -1

# Parse arguments
  for argind, arg in enumerate(sys.argv[1:]):
    if argind == skip: pass
    elif arg == "-v" : verbose = True
    elif arg == "-r" : recursive = True
    elif arg == "-n" : rename = False
    elif arg == "-b" : b_r = 1
    elif arg == "-b1" : b_r = 2
    elif arg == "-b2" : b_r = 3
    elif arg == "-f" : overwrite = True
    elif arg == "-c" : mode = 1
    elif arg == "-c0" : mode = 2
    elif arg == "-o" :
      if len(sys.argv) <= argind+2:
        sys.exit("\nUnspecified Output directory.")
      output = sys.argv[argind+2]
      skip = argind+1

      if not os.path.isdir(output):
        print("\nOutput directory: "+output+" doesnt exist. Creating.")
        try:
          os.makedirs(output)
        except:
          sys.exit("Cant create "+output)

    elif arg in ("-h", "-?") or arg.startswith("-") :
                    sys.exit("wotrepparser scans replay files and sorts them into categories (incomplete, result, complete, clanwar, error)."
                             "\nUsage:" \
                             "\n\nwotrepparser file_or_directory -o output_directory -v -r -n" \
                             "\n\n-o  Specify output directory. Default is current." \
                             "\n-v  Verbose, display every file processed." \
                             "\n-r  Recursive scan of all subdirectories." \
                             "\n-n  Dont rename files." \
                             "\n-b  Dump raw battle_results pickle to output_directory\\b_r\\number.pickle" \
                             "\n-b1 Decode battle_results pickle, save output_directory\\b_r\\number.json" \
                             "\n-b2 Same as above, but human readable json." \
                             "\n-f  Force overwrite. Default is ask." \
                             "\n-c  Copy instead of moving." \
                             "\n-c0 Dry run, dont copy, dont move.")

    elif source == os.getcwd():
      if not os.path.exists(arg):
        sys.exit("\n"+arg+" doesnt exist.")
      source = arg


  print ("\nSource:", source)
  print ("Output:", output)
  print ("Mode  :", ("move","copy","dry run")[mode]+",",("dont rename","rename")[rename]+("",", verbose")[verbose]+("",", recursive dir scan")[recursive]+ \
         ("",", raw battle_results pickle",", decoded battle_results json",", decoded human readable battle_results json")[b_r]+".\n")




  t1 = time.clock()

  if os.path.isfile(source):
    listdir = [source]
  else:
    listdir = custom_listfiles(source, "wotreplay", recursive, "temp.wotreplay")

#  listdir = custom_listfiles("G:\\World_of_Tanks\\replays\\clanwars\\", "wotreplay", False)
#  listdir += custom_listfiles("G:\\World_of_Tanks\\replays\\complete\\", "wotreplay", False)
#  listdir += custom_listfiles("G:\\World_of_Tanks\\replays\\incomplete\\", "wotreplay", False)
#  listdir = {"G:\\World_of_Tanks\\replays\\incomplete\\20121213_0553_usa-T110_39_crimea.wotreplay"}

  if not os.path.exists(output + os.path.sep + "clanwar"):
    os.makedirs(output + os.path.sep + "clanwar")
  if not os.path.exists(output + os.path.sep + "incomplete"):
    os.makedirs(output + os.path.sep + "incomplete")
  if not os.path.exists(output + os.path.sep + "result"):
    os.makedirs(output + os.path.sep + "result")
  if not os.path.exists(output + os.path.sep + "complete"):
    os.makedirs(output + os.path.sep + "complete")
  if not os.path.exists(output + os.path.sep + "error"):
    os.makedirs(output + os.path.sep + "error")
  if b_r>0 and (not os.path.exists(output + os.path.sep + "b_r")):
    os.makedirs(output + os.path.sep + "b_r")

  errors = 0
  dest = ["incomplete", "result", "complete", "complete", "clanwar", "error"]
  stats = [0, 0, 0, 0, 0, 0]

  for files in listdir:
    while True:
#      print ("\n"+files)
      fileo = os.path.basename(files)

      chunks, chunks_bitmask, processing, version = wotdecoder.replay(files,7) #7 means try to decode all three blocks (binary 111)

      if processing == 3 and (len(chunks[0]['vehicles'])!=len(chunks[1][1])) or \
         processing == 4 and chunks[2]['common']['bonusType'] == 5: #fogofwar = cw, bonusType = 5 = cw
        dest_index = 4
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          clan_tag = ["", ""]
          for playind, player in enumerate(chunks[1][1]):
            if playind == 0:
              first_tag = chunks[1][1][player]['clanAbbrev']
              clan_tag[chunks[1][1][player]['team'] - 1] = first_tag
            elif first_tag != chunks[1][1][player]['clanAbbrev']:
              clan_tag[chunks[1][1][player]['team'] - 1] = chunks[1][1][player]['clanAbbrev']
              break

          winlose=("Loss","Win_")[chunks[1][0]['isWinner']==1]

          clan_tag[0] = clan_tag[0] +"_"*(5-len(clan_tag[0]))
          clan_tag[1] = clan_tag[1] +"_"*(5-len(clan_tag[1]))

# You can change cw filename format here.
          fileo = "cw"+date+"_"+clan_tag[0]+"_"+clan_tag[1]+"_"+winlose+"_"+"-".join(chunks[0]['playerVehicle'].split("-")[1:])+"_"+chunks[0]['mapName']+".wotreplay"

      elif processing <6 and chunks_bitmask&2: #is second Json available? use it to determine win/loss
        dest_index = processing-1
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[1][0]['isWinner']==1]
          fileo = date+"_"+winlose+"_"+"-".join(chunks[0]['playerVehicle'].split("-")[1:])+"_"+chunks[0]['mapName']+".wotreplay"
      elif processing <6 and chunks_bitmask&4: #is pickle available? use it to determine win/loss
        dest_index = processing-1
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[2]['common']['winnerTeam'] == chunks[2]['personal']['team']]
          fileo = date+"_"+winlose+"_"+wotdecoder.tank[chunks[2]['personal']['typeCompDescr']][0]+"_"+wotdecoder.maps[chunks[2]['common']['arenaTypeID'] & 65535][0]+".wotreplay"
      elif processing ==6: #bugged, but has valid score and can be renamed
        dest_index = 5
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[1][0]['isWinner']==1]
          fileo = date+"_"+winlose+"_"+"-".join(chunks[0]['playerVehicle'].split("-")[1:])+"_"+chunks[0]['mapName']+".wotreplay"
      elif processing ==8: #bugged, but has valid pickle, can be renamed and moved to result
        dest_index = 1
        stats[dest_index] += 1
        if rename:
          date = datetime.strptime(chunks[0]['dateTime'], '%d.%m.%Y %H:%M:%S').strftime('%Y%m%d_%H%M')
          winlose=("Loss","Win_")[chunks[2]['common']['winnerTeam'] == chunks[2]['personal']['team']]
          fileo = date+"_"+winlose+"_"+wotdecoder.tank[chunks[2]['personal']['typeCompDescr']][0]+"_"+wotdecoder.maps[chunks[2]['common']['arenaTypeID'] & 65535][0]+".wotreplay"
      elif processing ==1: #incomplete
        dest_index = processing-1
        stats[dest_index] += 1
      elif processing >6: #bugged, cant be renamed
        dest_index = 5
        stats[dest_index] += 1

      fileo = output + os.path.sep + dest[dest_index] + os.path.sep + fileo
      exists = os.path.isfile(fileo)
      ask = 0
      if not overwrite and exists:
        ask = getkeyboard(fileo, files)
        if ask == 2: overwrite = True
      else: ask = 1

      if mode == 0 and ask>0:
          shutil.move(files, fileo)

      elif mode == 1 and ask>0:
          shutil.copy(files, fileo)

      fileb_r = ""
      if b_r >0 and chunks_bitmask&4:
        fileb_r = output + os.path.sep + "b_r" + os.path.sep + str(chunks[2]['arenaUniqueID']) +("",".pickle",".json",".json")[b_r]
        exists = os.path.isfile(fileb_r)
        ask = 0
        if not overwrite and exists:
          ask = getkeyboard(fileb_r)
          if ask == 2: overwrite = True
        else: ask = 1

        if b_r == 1 and ask>0:
          try:
            fo = open(fileb_r,"wb")
            f = open(files, "rb")
            f.seek(8)
            seek_size = struct.unpack("i",f.read(4))[0]
            f.seek(seek_size,1)
            if chunks_bitmask&2: #replay with Pickle can have 2 or 3 blocks, we are only interested in the last one and need to skip others
              seek_size = struct.unpack("i",f.read(4))[0]
              f.seek(seek_size,1)
            third_size = struct.unpack("i",f.read(4))[0]
            third_chunk = f.read(third_size)
            f.close()
          except:
            raise
          else:
            fo.write(third_chunk)
            fo.close()

        elif b_r == 2 and ask>0:
          try:
            fo = open(fileb_r,"w")
          except:
            raise
          else:
            json.dump(chunks[2],fo)
            fo.close()

        elif b_r == 3 and ask>0:
          try:
            fo = open(fileb_r,"w")
          except:
            raise
          else:
            json.dump(chunks[2], fo, sort_keys=True, indent=4)
            fo.close()

      if verbose:
        print ("\n"+files)
        print ("", dest[dest_index], " | ", wotdecoder.status[processing])
        print (fileo)
        print (fileb_r)
      break


  t2 = time.clock()


  print ("\n{0:10} {1:>5}".format("Processed", str(len(listdir))))

  del dest[2]
  stats[2] += stats[3]
  del stats[3]
  for x in range(0, len(dest)):
    print ("{0:10} {1:>5}".format(dest[x], stats[x]))

  print  ("Took %0.3fms"  % ((t2-t1)*1000))

Example 7

Project: phobos
Source File: bobj_import.py
View license
def load(#operator, context,
         filepath,
         global_clamp_size=0.0,
         use_ngons=True,
         use_smooth_groups=True,
         use_edges=True,
         use_split_objects=True,
         use_split_groups=True,
         use_image_search=True,
         use_groups_as_vgroups=False,
         relpath=None,
         global_matrix=None,
         ):
    print('\nimporting bobj %r' % filepath)

    filepath = os.fsencode(filepath)

    #if global_matrix is None:
    #    global_matrix = mathutils.Matrix()
    global_matrix = mathutils.Matrix()

    #if use_split_objects or use_split_groups:
    #    use_groups_as_vgroups = False
    use_groups_as_vgroups = False

    verts_loc = []
    verts_tex = []
    faces = []
    material_libs = []
    vertex_groups = {}

    float_func = float      # not always right

    context_material = None
    context_smooth_group = None
    context_object = None
    context_vgroup = None

    context_nurbs = {}
    nurbs = []
    context_parm = b''

    has_ngons = False

    unique_materials = {}
    unique_material_images = {}
    unique_smooth_groups = {}

    context_multi_line = b''

    stream = open(filepath, 'rb')

    read_bytes = stream.read()
    offset = 0
    while offset < len(read_bytes):
        data = None
        marker = struct.unpack('i', read_bytes[offset:offset+4])[0]
        #print('marker:', marker)
        offset += 4
        if marker == 1:
            data = struct.unpack('fff', read_bytes[offset:offset+12])
            verts_loc.append(data)
            offset += 12
        elif marker == 2:
            data = struct.unpack('ff', read_bytes[offset:offset+8])
            verts_tex.append((float_func(data[0]), float_func(data[1])))
            offset += 8
        elif marker == 3:
            #data = struct.unpack('fff', read_bytes[offset:offset+12])
            offset += 12
            # Nothing to do here

        elif marker == 4:

            face_vert_loc_indices = []
            face_vert_tex_indices = []

            faces.append((face_vert_loc_indices,
                          face_vert_tex_indices,
                          context_material,
                          context_smooth_group,
                          context_object,
                          ))

            for i in range(3):      #really?
                data = struct.unpack('iii', read_bytes[offset:offset+12])
                vert = data

                vert_loc_index = int(vert[0]) - 1
                #print('vert_loc_index:', vert_loc_index)
                if vert_loc_index < 0:
                    vert_loc_index = len(verts_loc) + vert_loc_index + 1
                face_vert_loc_indices.append(vert_loc_index)
                if len(vert) > 1 and vert[1]:
                    vert_tex_index = int(vert[1]) - 1
                    if vert_tex_index < 0:
                        vert_tex_index = len(verts_tex) + vert_tex_index + 1
                    face_vert_tex_indices.append(vert_tex_index)
                else:
                    # dummy
                    face_vert_tex_indices.append(0)

                offset += 12

            if len(face_vert_loc_indices) > 4:
                has_ngons = True


            #if face_vert_loc_indices == [4, 3, 7]:
            #    face_vert_loc_indices = [0, 1, 2]

        else:
            print('ERROR: unable to read from here')
            break

        # more?

    stream.close()


    #relpath = None
    #use_image_search = True
    #use_split_objects = True
    #use_split_groups = True
    #use_ngons = True
    #use_edges = True



    create_materials(filepath, relpath, material_libs, unique_materials, unique_material_images, use_image_search, float_func)

    if bpy.ops.object.select_all.poll():
        bpy.ops.object.select_all(action='DESELECT')

    scene = bpy.context.scene

    new_objects = []

    if use_split_objects or use_split_groups:
        SPLIT_OB_OR_GROUP = True
    else:
        SPLIT_OB_OR_GROUP = False

    for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP):
        create_mesh(new_objects,
                    has_ngons,
                    use_ngons,
                    use_edges,
                    verts_loc_split,
                    verts_tex,
                    faces_split,
                    unique_materials_split,
                    unique_material_images,
                    unique_smooth_groups,
                    vertex_groups,
                    dataname,
                    )

    for context_nurbs in nurbs:
        create_nurbs(context_nurbs, verts_loc, new_objects)

    # Create new obj
    for obj in new_objects:
        base = scene.objects.link(obj)
        base.select = True

        # we could apply this anywhere before scaling.
        obj.matrix_world = global_matrix

    scene.update()

    axis_min = [1000000000] * 3
    axis_max = [-1000000000] * 3

    if global_clamp_size:
        # Get all object bounds
        for ob in new_objects:
            for v in ob.bound_box:
                for axis, value in enumerate(v):
                    if axis_min[axis] > value:
                        axis_min[axis] = value
                    if axis_max[axis] < value:
                        axis_max[axis] = value

        # Scale objects
        max_axis = max(axis_max[0] - axis_min[0], axis_max[1] - axis_min[1], axis_max[2] - axis_min[2])
        scale = 1.0

        while global_clamp_size < max_axis * scale:
            scale = scale / 10.0

        for obj in new_objects:
            obj.scale = scale, scale, scale

Example 8

Project: shellsploit-framework
Source File: machobin.py
View license
    def find_Needed_Items(self, theCmds):
        '''
        This method returns a dict with commands that we need
        for mach-o patching
        '''
        _tempDict = {}
        text_segment = {}
        text_section = {}
        LC_MAIN = {}
        LC_UNIXTREAD = {}
        LC_CODE_SIGNATURE = {}
        LC_DYLIB_CODE_SIGN_DRS = {}

        locationInFIle = 0
        last_cmd = 0
        for item in theCmds:
            locationInFIle = item['LOCInFIle']
            if item['DATA'][0:6] == "__TEXT" and item['Command'] == 0x01:
                text_segment = {
                    'segname': item['DATA'][0:0x10],
                    'VMAddress': item['DATA'][0x10:0x14],
                    'VMSize': item['DATA'][0x14:0x18],
                    'File Offset': item['DATA'][0x18:0x1C],
                    'File Size': item['DATA'][0x1C:0x20],
                    'MaxVMProt': item['DATA'][0x20:0x24],
                    'InitalVMProt': item['DATA'][0x24:0x28],
                    'NumberOfSections': item['DATA'][0x28:0x2C],
                    'Flags': item['DATA'][0x2C:0x30]
                }

                count = struct.unpack("<I", text_segment['NumberOfSections'])[0]
                i = 0
                while count > 0:
                    if '__text' in item['DATA'][0x30 + i:0x40 + i]:
                        text_section = {
                            'sectionName': item['DATA'][0x30 + i:0x40 + i],
                            'segmentName': item['DATA'][0x40 + i:0x50 + i],
                            'Address': item['DATA'][0x50 + i:0x54 + i],
                            'LOCAddress': locationInFIle + 0x50 + i,
                            'Size': item['DATA'][0x54 + i:0x58 + i],
                            'LOCTextSize': locationInFIle + 0x54 + i,
                            'Offset': item['DATA'][0x58 + i:0x5c + i],
                            'LocTextOffset': locationInFIle + 0x58 + i,
                            'Alignment': item['DATA'][0x5c + i:0x60 + i],
                            'Relocations': item['DATA'][0x60 + i:0x64 + i],
                            'NumberOfRelocs': item['DATA'][0x64 + i:0x68 + i],
                            'Flags': item['DATA'][0x68 + i:0x6c + i],
                            'Reserved1': item['DATA'][0x6c + i:0x70 + i],
                            'Reserved2': item['DATA'][0x70 + i:0x74 + i],
                        }
                        break
                    else:
                        count -= 1
                        i += 0x40

            elif item['DATA'][0:6] == "__TEXT" and item['Command'] == 0x19:
                text_segment = {
                    'segname': item['DATA'][0:0x10],
                    'VMAddress': item['DATA'][0x10:0x18],
                    'VMSize': item['DATA'][0x18:0x20],
                    'File Offset': item['DATA'][0x20:0x28],
                    'File Size': item['DATA'][0x28:0x30],
                    'MaxVMProt': item['DATA'][0x30:0x34],
                    'InitalVMProt': item['DATA'][0x34:0x38],
                    'NumberOfSections': item['DATA'][0x38:0x3C],
                    'Flags': item['DATA'][0x3c:0x40]
                }
                count = struct.unpack("<I", text_segment['NumberOfSections'])[0]
                i = 0
                while count > 0:

                    if '__text' in item['DATA'][0x40 + i:0x50 + i]:
                        text_section = {
                            'sectionName': item['DATA'][0x40 + i:0x50 + i],
                            'segmentName': item['DATA'][0x50 + i:0x60 + i],
                            'Address': item['DATA'][0x60 + i:0x68 + i],
                            'LOCAddress': locationInFIle + 0x60 + i,
                            'Size': item['DATA'][0x68 + i:0x70 + i],
                            'LOCTextSize': locationInFIle + 0x68 + i,
                            'Offset': item['DATA'][0x70 + i:0x74 + i],
                            'LocTextOffset': locationInFIle + 0x70 + i,
                            'Alignment': item['DATA'][0x74 + i:0x78 + i],
                            'Relocations': item['DATA'][0x78 + i:0x7c + i],
                            'NumberOfRelocs': item['DATA'][0x7c + i:0x80 + i],
                            'Flags': item['DATA'][0x80 + i:0x84 + i],
                            'Reserved1': item['DATA'][0x84 + i:0x88 + i],
                            'Reserved2': item['DATA'][0x88 + i:0x8c + i],
                            'Reserved3': item['DATA'][0x8c + i:0x90 + i],
                        }

                        break
                    else:
                        count -= 1
                        i += 0x4c

            if item['Command'] == 0x80000028:
                LC_MAIN = {
                    'LOCEntryOffset': locationInFIle,
                    'EntryOffset': item['DATA'][0x0:0x8],
                    'StackSize': item['DATA'][0x8:0x16]
                }
            elif item['Command'] == 0x00000005 and struct.unpack("<I", item['DATA'][0x0:0x4])[0] == 0x01:
                LC_UNIXTREAD = {
                    'LOCEntryOffset': locationInFIle,
                    'Flavor': item['DATA'][0x00:0x04],
                    'Count': item['DATA'][0x04:0x08],
                    'eax': item['DATA'][0x08:0x0C],
                    'ebx': item['DATA'][0x0C:0x10],
                    'ecx': item['DATA'][0x10:0x14],
                    'edx': item['DATA'][0x14:0x18],
                    'edi': item['DATA'][0x18:0x1C],
                    'esi': item['DATA'][0x1C:0x20],
                    'ebp': item['DATA'][0x20:0x24],
                    'esp': item['DATA'][0x24:0x28],
                    'ss': item['DATA'][0x28:0x2C],
                    'eflags': item['DATA'][0x2C:0x30],
                    'LOCeip': locationInFIle + 0x30,
                    'eip': item['DATA'][0x30:0x34],
                    'cs': item['DATA'][0x34:0x38],
                    'ds': item['DATA'][0x38:0x3C],
                    'es': item['DATA'][0x3C:0x40],
                    'fs': item['DATA'][0x40:0x44],
                    'gs': item['DATA'][0x44:0x48],
                }
            elif item['Command'] == 0x00000005 and struct.unpack("<I", item['DATA'][0x0:0x4])[0] == 0x04:
                LC_UNIXTREAD = {
                    'LOCEntryOffset': locationInFIle,
                    'Flavor': item['DATA'][0x00:0x04],
                    'Count': item['DATA'][0x04:0x08],
                    'rax': item['DATA'][0x08:0x10],
                    'rbx': item['DATA'][0x10:0x18],
                    'rcx': item['DATA'][0x18:0x20],
                    'rdx': item['DATA'][0x20:0x28],
                    'rdi': item['DATA'][0x28:0x30],
                    'rsi': item['DATA'][0x30:0x38],
                    'rbp': item['DATA'][0x38:0x40],
                    'rsp': item['DATA'][0x40:0x48],
                    'r8': item['DATA'][0x48:0x50],
                    'r9': item['DATA'][0x50:0x58],
                    'r10': item['DATA'][0x58:0x60],
                    'r11': item['DATA'][0x60:0x68],
                    'r12': item['DATA'][0x68:0x70],
                    'r13': item['DATA'][0x70:0x78],
                    'r14': item['DATA'][0x78:0x80],
                    'r15': item['DATA'][0x80:0x88],
                    'LOCrip': locationInFIle + 0x88,
                    'rip': item['DATA'][0x88:0x90],
                    'rflags': item['DATA'][0x90:0x98],
                    'cs': item['DATA'][0x98:0xA0],
                    'fs': item['DATA'][0xA0:0xA8],
                    'gs': item['DATA'][0xA8:0xB0],
                }

            if item['Command'] == 0x000001D:
                LC_CODE_SIGNATURE = {
                    'Data Offset': item['DATA'][0x0:0x4],
                    'Data Size': item['DATA'][0x0:0x8],
                }

            if item['Command'] == 0x0000002B:
                LC_DYLIB_CODE_SIGN_DRS = {
                    'Data Offset': item['DATA'][0x0:0x4],
                    'Data Size': item['DATA'][0x0:0x8],
                }

            if item['last_cmd'] > last_cmd:
                last_cmd = item['last_cmd']

        _tempDict = {'text_segment': text_segment, 'text_section': text_section,
                     'LC_MAIN': LC_MAIN, 'LC_UNIXTREAD': LC_UNIXTREAD,
                     'LC_CODE_SIGNATURE': LC_CODE_SIGNATURE,
                     'LC_DYLIB_CODE_SIGN_DRS': LC_DYLIB_CODE_SIGN_DRS,
                     'last_cmd': last_cmd
                     }

        return _tempDict

Example 9

Project: NOT_UPDATED_Sick-Beard-Dutch
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 10

Project: the-backdoor-factory
Source File: machobin.py
View license
    def find_Needed_Items(self, theCmds):
        '''
        This method returns a dict with commands that we need
        for mach-o patching
        '''
        _tempDict = {}
        text_segment = {}
        text_section = {}
        LC_MAIN = {}
        LC_UNIXTREAD = {}
        LC_CODE_SIGNATURE = {}
        LC_DYLIB_CODE_SIGN_DRS = {}

        locationInFIle = 0
        last_cmd = 0
        for item in theCmds:
            locationInFIle = item['LOCInFIle']
            if item['DATA'][0:6] == "__TEXT" and item['Command'] == 0x01:
                text_segment = {
                    'segname': item['DATA'][0:0x10],
                    'VMAddress': item['DATA'][0x10:0x14],
                    'VMSize': item['DATA'][0x14:0x18],
                    'File Offset': item['DATA'][0x18:0x1C],
                    'File Size': item['DATA'][0x1C:0x20],
                    'MaxVMProt': item['DATA'][0x20:0x24],
                    'InitalVMProt': item['DATA'][0x24:0x28],
                    'NumberOfSections': item['DATA'][0x28:0x2C],
                    'Flags': item['DATA'][0x2C:0x30]
                }

                count = struct.unpack("<I", text_segment['NumberOfSections'])[0]
                i = 0
                while count > 0:
                    if '__text' in item['DATA'][0x30 + i:0x40 + i]:
                        text_section = {
                            'sectionName': item['DATA'][0x30 + i:0x40 + i],
                            'segmentName': item['DATA'][0x40 + i:0x50 + i],
                            'Address': item['DATA'][0x50 + i:0x54 + i],
                            'LOCAddress': locationInFIle + 0x50 + i,
                            'Size': item['DATA'][0x54 + i:0x58 + i],
                            'LOCTextSize': locationInFIle + 0x54 + i,
                            'Offset': item['DATA'][0x58 + i:0x5c + i],
                            'LocTextOffset': locationInFIle + 0x58 + i,
                            'Alignment': item['DATA'][0x5c + i:0x60 + i],
                            'Relocations': item['DATA'][0x60 + i:0x64 + i],
                            'NumberOfRelocs': item['DATA'][0x64 + i:0x68 + i],
                            'Flags': item['DATA'][0x68 + i:0x6c + i],
                            'Reserved1': item['DATA'][0x6c + i:0x70 + i],
                            'Reserved2': item['DATA'][0x70 + i:0x74 + i],
                        }
                        break
                    else:
                        count -= 1
                        i += 0x40

            elif item['DATA'][0:6] == "__TEXT" and item['Command'] == 0x19:
                text_segment = {
                    'segname': item['DATA'][0:0x10],
                    'VMAddress': item['DATA'][0x10:0x18],
                    'VMSize': item['DATA'][0x18:0x20],
                    'File Offset': item['DATA'][0x20:0x28],
                    'File Size': item['DATA'][0x28:0x30],
                    'MaxVMProt': item['DATA'][0x30:0x34],
                    'InitalVMProt': item['DATA'][0x34:0x38],
                    'NumberOfSections': item['DATA'][0x38:0x3C],
                    'Flags': item['DATA'][0x3c:0x40]
                }
                count = struct.unpack("<I", text_segment['NumberOfSections'])[0]
                i = 0
                while count > 0:

                    if '__text' in item['DATA'][0x40 + i:0x50 + i]:
                        text_section = {
                            'sectionName': item['DATA'][0x40 + i:0x50 + i],
                            'segmentName': item['DATA'][0x50 + i:0x60 + i],
                            'Address': item['DATA'][0x60 + i:0x68 + i],
                            'LOCAddress': locationInFIle + 0x60 + i,
                            'Size': item['DATA'][0x68 + i:0x70 + i],
                            'LOCTextSize': locationInFIle + 0x68 + i,
                            'Offset': item['DATA'][0x70 + i:0x74 + i],
                            'LocTextOffset': locationInFIle + 0x70 + i,
                            'Alignment': item['DATA'][0x74 + i:0x78 + i],
                            'Relocations': item['DATA'][0x78 + i:0x7c + i],
                            'NumberOfRelocs': item['DATA'][0x7c + i:0x80 + i],
                            'Flags': item['DATA'][0x80 + i:0x84 + i],
                            'Reserved1': item['DATA'][0x84 + i:0x88 + i],
                            'Reserved2': item['DATA'][0x88 + i:0x8c + i],
                            'Reserved3': item['DATA'][0x8c + i:0x90 + i],
                        }

                        break
                    else:
                        count -= 1
                        i += 0x4c

            if item['Command'] == 0x80000028:
                LC_MAIN = {
                    'LOCEntryOffset': locationInFIle,
                    'EntryOffset': item['DATA'][0x0:0x8],
                    'StackSize': item['DATA'][0x8:0x16]
                }
            elif item['Command'] == 0x00000005 and struct.unpack("<I", item['DATA'][0x0:0x4])[0] == 0x01:
                LC_UNIXTREAD = {
                    'LOCEntryOffset': locationInFIle,
                    'Flavor': item['DATA'][0x00:0x04],
                    'Count': item['DATA'][0x04:0x08],
                    'eax': item['DATA'][0x08:0x0C],
                    'ebx': item['DATA'][0x0C:0x10],
                    'ecx': item['DATA'][0x10:0x14],
                    'edx': item['DATA'][0x14:0x18],
                    'edi': item['DATA'][0x18:0x1C],
                    'esi': item['DATA'][0x1C:0x20],
                    'ebp': item['DATA'][0x20:0x24],
                    'esp': item['DATA'][0x24:0x28],
                    'ss': item['DATA'][0x28:0x2C],
                    'eflags': item['DATA'][0x2C:0x30],
                    'LOCeip': locationInFIle + 0x30,
                    'eip': item['DATA'][0x30:0x34],
                    'cs': item['DATA'][0x34:0x38],
                    'ds': item['DATA'][0x38:0x3C],
                    'es': item['DATA'][0x3C:0x40],
                    'fs': item['DATA'][0x40:0x44],
                    'gs': item['DATA'][0x44:0x48],
                }
            elif item['Command'] == 0x00000005 and struct.unpack("<I", item['DATA'][0x0:0x4])[0] == 0x04:
                LC_UNIXTREAD = {
                    'LOCEntryOffset': locationInFIle,
                    'Flavor': item['DATA'][0x00:0x04],
                    'Count': item['DATA'][0x04:0x08],
                    'rax': item['DATA'][0x08:0x10],
                    'rbx': item['DATA'][0x10:0x18],
                    'rcx': item['DATA'][0x18:0x20],
                    'rdx': item['DATA'][0x20:0x28],
                    'rdi': item['DATA'][0x28:0x30],
                    'rsi': item['DATA'][0x30:0x38],
                    'rbp': item['DATA'][0x38:0x40],
                    'rsp': item['DATA'][0x40:0x48],
                    'r8': item['DATA'][0x48:0x50],
                    'r9': item['DATA'][0x50:0x58],
                    'r10': item['DATA'][0x58:0x60],
                    'r11': item['DATA'][0x60:0x68],
                    'r12': item['DATA'][0x68:0x70],
                    'r13': item['DATA'][0x70:0x78],
                    'r14': item['DATA'][0x78:0x80],
                    'r15': item['DATA'][0x80:0x88],
                    'LOCrip': locationInFIle + 0x88,
                    'rip': item['DATA'][0x88:0x90],
                    'rflags': item['DATA'][0x90:0x98],
                    'cs': item['DATA'][0x98:0xA0],
                    'fs': item['DATA'][0xA0:0xA8],
                    'gs': item['DATA'][0xA8:0xB0],
                }

            if item['Command'] == 0x000001D:
                LC_CODE_SIGNATURE = {
                    'Data Offset': item['DATA'][0x0:0x4],
                    'Data Size': item['DATA'][0x0:0x8],
                }

            if item['Command'] == 0x0000002B:
                LC_DYLIB_CODE_SIGN_DRS = {
                    'Data Offset': item['DATA'][0x0:0x4],
                    'Data Size': item['DATA'][0x0:0x8],
                }

            if item['last_cmd'] > last_cmd:
                last_cmd = item['last_cmd']

        _tempDict = {'text_segment': text_segment, 'text_section': text_section,
                     'LC_MAIN': LC_MAIN, 'LC_UNIXTREAD': LC_UNIXTREAD,
                     'LC_CODE_SIGNATURE': LC_CODE_SIGNATURE,
                     'LC_DYLIB_CODE_SIGN_DRS': LC_DYLIB_CODE_SIGN_DRS,
                     'last_cmd': last_cmd
                     }

        return _tempDict

Example 11

Project: stormhttp
Source File: parser.py
View license
    def feed_data(self, data: bytes):
        self._buffer += data
        buffer_length = len(self._buffer)
        buffer_offset = 0

        if self._state == _PARSER_STATE_EMPTY and buffer_length > 2:
            self._length = int.from_bytes(self._buffer[:3], "big")
            self._state = _PARSER_STATE_LENGTH
            buffer_length -= 3
            buffer_offset += 3

        if self._state == _PARSER_STATE_LENGTH and buffer_length > 0:
            frame_type = self._buffer[buffer_offset]
            if frame_type == FRAME_TYPE_DATA:
                self.message = Http2DataFrame()

            elif frame_type == FRAME_TYPE_HEADERS:
                self.message = Http2HeadersFrame()

            elif frame_type == FRAME_TYPE_PRIORITY:
                self.message = Http2PriorityFrame()

            elif frame_type == FRAME_TYPE_RST_STREAM:
                if self._length != 4:
                    raise Http2ConnectionError(ERROR_CODE_FRAME_SIZE_ERROR)
                self.message = Http2RstStreamFrame()

            elif frame_type == FRAME_TYPE_SETTINGS:
                if self._length % 6:
                    raise Http2ConnectionError(ERROR_CODE_FRAME_SIZE_ERROR)
                self.message = Http2SettingsFrame()

            elif frame_type == FRAME_TYPE_PUSH_PROMISE:
                self.message = Http2PushPromiseFrame()

            elif frame_type == FRAME_TYPE_PING:
                if self._length != 8:
                    raise Http2ConnectionError(ERROR_CODE_FRAME_SIZE_ERROR)
                self.message = Http2PingFrame()

            elif frame_type == FRAME_TYPE_GO_AWAY:
                self.message = Http2GoAwayFrame()

            elif frame_type == FRAME_TYPE_WINDOW_UPDATE:
                self.message = Http2WindowUpdateFrame()

            elif frame_type == FRAME_TYPE_CONTINUATION:
                self.message = Http2ContinuationFrame()

            else:
                self.message = Http2Frame(frame_type)

            buffer_offset += 1
            buffer_length -= 1
            self._state = _PARSER_STATE_FLAGS

        if self._state == _PARSER_STATE_FLAGS and buffer_length > 0:
            self.message.frame_flags = self._buffer[buffer_offset]
            buffer_offset += 1
            buffer_length -= 1
            self._state = _PARSER_STATE_STREAM

            if self.message.frame_type == FRAME_TYPE_SETTINGS and self.message.frame_flags & 0x1 and self._length:
                raise Http2ConnectionError(ERROR_CODE_FRAME_SIZE_ERROR)

        if self._state == _PARSER_STATE_STREAM and buffer_length > 3:
            self.message.stream_id = int.from_bytes(self._buffer[buffer_offset:buffer_offset + 4], "big") & 0x7FFFFFFF
            buffer_offset += 4
            buffer_length -= 4
            self._state = _PARSER_STATE_PAYLOAD

            # These frame types require a non-zero stream_id.
            if self.message.stream_id == 0:
                if self.message.frame_type in _FRAME_TYPES_REQUIRE_STREAM_ID:
                    raise Http2ConnectionError(ERROR_CODE_PROTOCOL_ERROR)

            # These frame types require a stream_id of 0.
            elif self.message.frame_type in _FRAME_TYPES_REQUIRE_NO_STREAM_ID:
                raise Http2ConnectionError(ERROR_CODE_PROTOCOL_ERROR)

            if self.message.frame_type == FRAME_TYPE_PRIORITY:
                if self._length != 5:
                    raise Http2StreamError(self.message.stream_id, ERROR_CODE_FRAME_SIZE_ERROR)

        if self._state == _PARSER_STATE_PAYLOAD and buffer_length >= self._length:
            if isinstance(self.message, Http2PaddedFrame) and self.message.padded:
                padding_length = self._buffer[buffer_offset]
                if padding_length == 0:
                    padding_length = 1
                buffer_offset += 1
            else:
                padding_length = 0

            if self.message.frame_type == FRAME_TYPE_DATA:
                payload_length = self._length - padding_length - (1 if padding_length else 0)
                self.message.payload = self._buffer[buffer_offset:buffer_offset + payload_length]
                buffer_offset += payload_length

            elif self.message.frame_type == FRAME_TYPE_HEADERS:
                if self.message.priority:
                    dependent_stream_id = struct.unpack(">I", self._buffer[buffer_offset:buffer_offset + 4])[0]
                    self.message.exclusive = True if dependent_stream_id & 0x80000000 else False
                    self.message.dependent_stream_id = dependent_stream_id & 0x7FFFFFFF
                    self.message.weight = self._buffer[buffer_offset + 4] + 1
                    buffer_offset += 5

                payload_length = self._length - (5 if self.message.priority else 0) - padding_length - (1 if padding_length else 0)
                self.message.payload = self._buffer[buffer_offset:buffer_offset + payload_length]
                buffer_offset += payload_length

            elif self.message.frame_type == FRAME_TYPE_PRIORITY:
                dependent_stream_id = struct.unpack(">I", self._buffer[buffer_offset:buffer_offset + 4])[0]
                self.message.exclusive = True if dependent_stream_id & 0x80000000 else False
                self.message.dependent_stream_id = dependent_stream_id & 0x7FFFFFFF
                self.message.weight = self._buffer[buffer_offset + 4] + 1
                buffer_offset += 5

            elif self.message.frame_type == FRAME_TYPE_RST_STREAM:
                self.message.error_code = int.from_bytes(self._buffer[buffer_offset:buffer_offset + 4], "big")
                buffer_offset += 4

            elif self.message.frame_type == FRAME_TYPE_SETTINGS:
                settings = struct.unpack(">" + ("HI" * (self._length // 6)), self._buffer[buffer_offset:buffer_offset + self._length])
                for settings_key, settings_value in _grouper(settings, 2):
                    self.message.settings[settings_key] = settings_value
                buffer_offset += self._length

            elif self.message.frame_type == FRAME_TYPE_PUSH_PROMISE:
                self.message.promised_stream_id = int.from_bytes(self._buffer[buffer_offset:buffer_offset + 4], "big") & 0x7FFFFFFF
                payload_length = self._length - padding_length - (1 if padding_length else 0)
                self.message.payload = self._buffer[buffer_offset + 4:buffer_offset + payload_length]
                buffer_offset += payload_length

            elif self.message.frame_type == FRAME_TYPE_PING:
                self.message.payload = self._buffer[buffer_offset:buffer_offset + 8]
                buffer_offset += 8

            elif self.message.frame_type == FRAME_TYPE_GO_AWAY:
                self.message.last_stream_id = int.from_bytes(self._buffer[buffer_offset:buffer_offset + 4], "big") & 0x7FFFFFFF
                self.message.error_code = int.from_bytes(self._buffer[buffer_offset + 4:buffer_offset + 8], "big")
                self.message.payload = self._buffer[buffer_offset + 8:self._length]
                buffer_offset += self._length

            elif self.message.frame_type == FRAME_TYPE_WINDOW_UPDATE:
                self.message.window_size = int.from_bytes(self._buffer[buffer_offset:buffer_offset + 4], "big") & 0x7FFFFFFF
                buffer_offset += 4

            elif self.message.frame_type == FRAME_TYPE_CONTINUATION:
                self.message.payload = self._buffer[buffer_offset:buffer_offset + self._length]
                buffer_offset += self._length

            if padding_length > 0:
                self.message.padding = self._buffer[buffer_offset:buffer_offset + padding_length]
                buffer_offset += padding_length

            self.message.on_complete()

        self._buffer = self._buffer[buffer_offset:]

Example 12

Project: SickGear
Source File: tz.py
View license
    def __init__(self, fileobj, filename=None):
        file_opened_here = False
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
            file_opened_here = True
        elif filename is not None:
            self._filename = filename
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        try:
            if fileobj.read(4).decode() != "TZif":
                raise ValueError("magic not found")

            fileobj.read(16)

            (
                # The number of UTC/local indicators stored in the file.
                ttisgmtcnt,

                # The number of standard/wall indicators stored in the file.
                ttisstdcnt,

                # The number of leap seconds for which data is
                # stored in the file.
                leapcnt,

                # The number of "transition times" for which data
                # is stored in the file.
                timecnt,

                # The number of "local time types" for which data
                # is stored in the file (must not be zero).
                typecnt,

                # The  number  of  characters  of "time zone
                # abbreviation strings" stored in the file.
                charcnt,

            ) = struct.unpack(">6l", fileobj.read(24))

            # The above header is followed by tzh_timecnt four-byte
            # values  of  type long,  sorted  in ascending order.
            # These values are written in ``standard'' byte order.
            # Each is used as a transition time (as  returned  by
            # time(2)) at which the rules for computing local time
            # change.

            if timecnt:
                self._trans_list = struct.unpack(">%dl" % timecnt,
                                                 fileobj.read(timecnt*4))
            else:
                self._trans_list = []

            # Next come tzh_timecnt one-byte values of type unsigned
            # char; each one tells which of the different types of
            # ``local time'' types described in the file is associated
            # with the same-indexed transition time. These values
            # serve as indices into an array of ttinfo structures that
            # appears next in the file.

            if timecnt:
                self._trans_idx = struct.unpack(">%dB" % timecnt,
                                                fileobj.read(timecnt))
            else:
                self._trans_idx = []

            # Each ttinfo structure is written as a four-byte value
            # for tt_gmtoff  of  type long,  in  a  standard  byte
            # order, followed  by a one-byte value for tt_isdst
            # and a one-byte  value  for  tt_abbrind.   In  each
            # structure, tt_gmtoff  gives  the  number  of
            # seconds to be added to UTC, tt_isdst tells whether
            # tm_isdst should be set by  localtime(3),  and
            # tt_abbrind serves  as an index into the array of
            # time zone abbreviation characters that follow the
            # ttinfo structure(s) in the file.

            ttinfo = []

            for i in range(typecnt):
                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

            abbr = fileobj.read(charcnt).decode()

            # Then there are tzh_leapcnt pairs of four-byte
            # values, written in  standard byte  order;  the
            # first  value  of  each pair gives the time (as
            # returned by time(2)) at which a leap second
            # occurs;  the  second  gives the  total  number of
            # leap seconds to be applied after the given time.
            # The pairs of values are sorted in ascending order
            # by time.

            # Not used, for now
            # if leapcnt:
            #    leap = struct.unpack(">%dl" % (leapcnt*2),
            #                         fileobj.read(leapcnt*8))

            # Then there are tzh_ttisstdcnt standard/wall
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as standard
            # time or wall clock time, and are used when
            # a time zone file is used in handling POSIX-style
            # time zone environment variables.

            if ttisstdcnt:
                isstd = struct.unpack(">%db" % ttisstdcnt,
                                      fileobj.read(ttisstdcnt))

            # Finally, there are tzh_ttisgmtcnt UTC/local
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as UTC or
            # local time, and are used when a time zone file
            # is used in handling POSIX-style time zone envi-
            # ronment variables.

            if ttisgmtcnt:
                isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                      fileobj.read(ttisgmtcnt))

            # ** Everything has been read **
        finally:
            if file_opened_here:
                fileobj.close()

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 13

Project: SickRage
Source File: tz.py
View license
    def __init__(self, fileobj, filename=None):
        file_opened_here = False
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
            file_opened_here = True
        elif filename is not None:
            self._filename = filename
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        try:
            if fileobj.read(4).decode() != "TZif":
                raise ValueError("magic not found")

            fileobj.read(16)

            (
                # The number of UTC/local indicators stored in the file.
                ttisgmtcnt,

                # The number of standard/wall indicators stored in the file.
                ttisstdcnt,

                # The number of leap seconds for which data is
                # stored in the file.
                leapcnt,

                # The number of "transition times" for which data
                # is stored in the file.
                timecnt,

                # The number of "local time types" for which data
                # is stored in the file (must not be zero).
                typecnt,

                # The  number  of  characters  of "time zone
                # abbreviation strings" stored in the file.
                charcnt,

            ) = struct.unpack(">6l", fileobj.read(24))

            # The above header is followed by tzh_timecnt four-byte
            # values  of  type long,  sorted  in ascending order.
            # These values are written in ``standard'' byte order.
            # Each is used as a transition time (as  returned  by
            # time(2)) at which the rules for computing local time
            # change.

            if timecnt:
                self._trans_list = struct.unpack(">%dl" % timecnt,
                                                 fileobj.read(timecnt*4))
            else:
                self._trans_list = []

            # Next come tzh_timecnt one-byte values of type unsigned
            # char; each one tells which of the different types of
            # ``local time'' types described in the file is associated
            # with the same-indexed transition time. These values
            # serve as indices into an array of ttinfo structures that
            # appears next in the file.

            if timecnt:
                self._trans_idx = struct.unpack(">%dB" % timecnt,
                                                fileobj.read(timecnt))
            else:
                self._trans_idx = []

            # Each ttinfo structure is written as a four-byte value
            # for tt_gmtoff  of  type long,  in  a  standard  byte
            # order, followed  by a one-byte value for tt_isdst
            # and a one-byte  value  for  tt_abbrind.   In  each
            # structure, tt_gmtoff  gives  the  number  of
            # seconds to be added to UTC, tt_isdst tells whether
            # tm_isdst should be set by  localtime(3),  and
            # tt_abbrind serves  as an index into the array of
            # time zone abbreviation characters that follow the
            # ttinfo structure(s) in the file.

            ttinfo = []

            for i in range(typecnt):
                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

            abbr = fileobj.read(charcnt).decode()

            # Then there are tzh_leapcnt pairs of four-byte
            # values, written in  standard byte  order;  the
            # first  value  of  each pair gives the time (as
            # returned by time(2)) at which a leap second
            # occurs;  the  second  gives the  total  number of
            # leap seconds to be applied after the given time.
            # The pairs of values are sorted in ascending order
            # by time.

            # Not used, for now
            # if leapcnt:
            #    leap = struct.unpack(">%dl" % (leapcnt*2),
            #                         fileobj.read(leapcnt*8))

            # Then there are tzh_ttisstdcnt standard/wall
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as standard
            # time or wall clock time, and are used when
            # a time zone file is used in handling POSIX-style
            # time zone environment variables.

            if ttisstdcnt:
                isstd = struct.unpack(">%db" % ttisstdcnt,
                                      fileobj.read(ttisstdcnt))

            # Finally, there are tzh_ttisgmtcnt UTC/local
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as UTC or
            # local time, and are used when a time zone file
            # is used in handling POSIX-style time zone envi-
            # ronment variables.

            if ttisgmtcnt:
                isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                      fileobj.read(ttisgmtcnt))

            # ** Everything has been read **
        finally:
            if file_opened_here:
                fileobj.close()

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 14

Project: smarthome
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, str):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 15

Project: LKD
Source File: test_all.py
View license
def get_kl

class RequireSymbol(object):
    def __init__(self, *args):
        self.required_symbols = args
        
    def __call__(self, f):
        self.f = f
        # non-class associated function to get the wrapped 'self' (type TestCase) and call
        # a RequireSymbol method to also have our self (type RequireSymbol)
        def mywrapper(testcase):
            return self.do_call(testcase)
        return mywrapper
        
    def do_call(self, testcase):
        for sym in self.required_symbols:
            if testcase.kdbg.get_symbol_offset(sym) is None:
                raise testcase.skipTest("Cannot get symbol <{0}>".format(sym))
        return self.f(testcase)
        
    

class IDebugSymbolsTestCase(unittest.TestCase):

    def setUp(self):
        pass
        
    @classmethod
    def setUpClass(self):
        windows.winproxy.SetThreadAffinityMask(dwThreadAffinityMask=(1 << 0))
        self.kdbg = LocalKernelDebugger()
        modules = windows.utils.get_kernel_modules()
        self.modules = modules
        self.ntkernelbase = modules[0].Base
        self.kernelpath = modules[0].ImageName[:]
        self.kernelpath = os.path.expandvars(self.kernelpath.replace("\SystemRoot", "%SystemRoot%"))
        self.kernelmod = winproxy.LoadLibraryA(self.kernelpath)
        pe = windows.pe_parse.PEFile(self.kernelmod)
        self.NtCreateFileVA = pe.exports['NtCreateFile'] - self.kernelmod + self.ntkernelbase

    def tearDown(self):
        #self.kdbg.detach()
        self.kdbg = None

    def test_get_symbol_offset(self):
        # IDebugSymbols::GetOffsetByName
        x = self.kdbg.get_symbol_offset("nt")
        self.assertEqual(x, self.ntkernelbase)

    @RequireSymbol("ntdll!NtCreateFile")
    def test_get_symbol_offset_user(self):
        # IDebugSymbols::GetOffsetByName
        x = windows.utils.get_func_addr("ntdll", "NtCreateFile")
        y = self.kdbg.get_symbol_offset("ntdll!NtCreateFile")
        self.assertEqual(x, y)
        
    @RequireSymbol("nt!NtCreateFile")
    def test_get_symbol(self):
        # IDebugSymbols::GetNameByOffset
        x = self.kdbg.get_symbol(self.NtCreateFileVA)
        self.assertEqual(x[0], 'nt!NtCreateFile')
        self.assertEqual(x[1], 0x00)
        
    @RequireSymbol("ntdll!NtCreateFile")
    def test_get_symbol_user(self):
        # IDebugSymbols::GetNameByOffset
        x = windows.utils.get_func_addr("ntdll", "NtCreateFile")
        y = self.kdbg.get_symbol(x)
        self.assertIn(y[0], ["ntdll!NtCreateFile", "ntdll!ZwCreateFile"])

    def test_get_number_modules(self):
        # IDebugSymbols::GetNumberModules
        loaded, unloaded = self.kdbg.get_number_modules()

    def test_get_module_by_index(self):
        # IDebugSymbols::GetModuleByIndex
        for i in range(self.kdbg.get_number_modules()[0]):
            x = self.kdbg.get_module_by_index(i)
            if x == self.ntkernelbase:
                return
        raise AssertionError("ntoskrnl not found")

    def test_get_module_name_by_index(self):
        # IDebugSymbols::GetModuleNames
        for i in range(self.kdbg.get_number_modules()[0]):
            x = self.kdbg.get_module_name_by_index(i)
            if x[1] == "nt":
                return
        raise AssertionError("ntoskrnl not found")

    def test_symbol_match(self):
        # IDebugSymbols::StartSymbolMatch | IDebugSymbols::GetNextSymbolMatch | IDebugSymbols::EndSymbolMatch
        x = list(self.kdbg.symbol_match("nt!NtCreateF*"))
        self.assertEqual(x[0][0], 'nt!NtCreateFile')
        self.assertEqual(x[0][1], self.NtCreateFileVA)

class IDebugDataSpacesTestCase(unittest.TestCase):

    def setUp(self):
        pass
        
    @classmethod
    def setUpClass(self):
        windows.winproxy.SetThreadAffinityMask(dwThreadAffinityMask=(1 << 0))
        self.kdbg = LocalKernelDebugger()
        modules = windows.utils.get_kernel_modules()
        self.ntkernelbase = modules[0].Base
        self.kernelpath = modules[0].ImageName[:]
        self.kernelpath = os.path.expandvars(self.kernelpath.replace("\SystemRoot", "%SystemRoot%"))
        self.kernelbuf = open(self.kernelpath, "rb").read()
        self.kernelmod = winproxy.LoadLibraryA(self.kernelpath)
        pe = windows.pe_parse.PEFile(self.kernelmod)
        self.kernel_section_data = [section for section in pe.sections if section.name == ".data"][0]

    def tearDown(self):
        #self.kdbg.detach()
        self.kdbg = None

    def test_read_byte(self):
        # IDebugDataSpaces::ReadVirtual
        x = self.kdbg.read_byte(self.kdbg.get_symbol_offset("nt"))
        self.assertEqual(x, ord(self.kernelbuf[0]))

    def test_read_word(self):
        # IDebugDataSpaces::ReadVirtual
        x = self.kdbg.read_word(self.kdbg.get_symbol_offset("nt"))
        self.assertEqual(x, struct.unpack("<H", self.kernelbuf[:2])[0])

    def test_read_dword(self):
        # IDebugDataSpaces::ReadVirtual
        x = self.kdbg.read_dword(self.kdbg.get_symbol_offset("nt"))
        self.assertEqual(x, struct.unpack("<I", self.kernelbuf[:4])[0])

    def test_read_qword(self):
        # IDebugDataSpaces::ReadVirtual
        x = self.kdbg.read_qword(self.kdbg.get_symbol_offset("nt"))
        self.assertEqual(x, struct.unpack("<Q", self.kernelbuf[:8])[0])

    def test_read_byte_p(self):
        # IDebugDataSpaces::ReadPhysical
        x = self.kdbg.read_byte(self.kdbg.get_symbol_offset("nt"))
        y = self.kdbg.read_byte_p(self.kdbg.virtual_to_physical(self.kdbg.get_symbol_offset("nt")))
        self.assertEqual(x, y)

    def test_read_word_p(self):
        # IDebugDataSpaces::ReadPhysical
        x = self.kdbg.read_word(self.kdbg.get_symbol_offset("nt"))
        y = self.kdbg.read_word_p(self.kdbg.virtual_to_physical(self.kdbg.get_symbol_offset("nt")))
        self.assertEqual(x, y)

    def test_read_dword_p(self):
        # IDebugDataSpaces::ReadPhysical
        x = self.kdbg.read_dword(self.kdbg.get_symbol_offset("nt"))
        y = self.kdbg.read_dword_p(self.kdbg.virtual_to_physical(self.kdbg.get_symbol_offset("nt")))
        self.assertEqual(x, y)

    def test_read_qword_p(self):
        # IDebugDataSpaces::ReadPhysical
        x = self.kdbg.read_qword(self.kdbg.get_symbol_offset("nt"))
        y = self.kdbg.read_qword_p(self.kdbg.virtual_to_physical(self.kdbg.get_symbol_offset("nt")))
        self.assertEqual(x, y)


    @test_32bit_only
    @RequireSymbol('nt!KiFastCallEntry')
    def test_read_msr32(self):
        # IDebugDataSpaces::ReadMsr
        IA32_SYSENTER_EIP = 0x176
        x = self.kdbg.read_msr(IA32_SYSENTER_EIP)
        y = self.kdbg.get_symbol(x)
        self.assertEqual(y[0], 'nt!KiFastCallEntry')

    @test_64bit_only
    @RequireSymbol('nt!KiSystemCall64')
    def test_read_msr64(self):
        # IDebugDataSpaces::ReadMsr
        LSTAR = 0xC0000082
        x = self.kdbg.read_msr(LSTAR)
        y = self.kdbg.get_symbol(x)
        self.assertEqual(y[0], 'nt!KiSystemCall64')

    @test_32bit_only
    def test_read_processor_system_data32(self):
        # IDebugDataSpaces::ReadProcessorSystemData
        DEBUG_DATA_PROCESSOR_IDENTIFICATION = 4
        x = self.kdbg.read_processor_system_data(0, DEBUG_DATA_PROCESSOR_IDENTIFICATION)
        self.assertEqual(cpuid.get_vendor_id(), x.X86.VendorString)
        self.assertEqual(cpuid.get_proc_family_model(), (x.X86.Family, x.X86.Model))

    @test_64bit_only
    def test_read_processor_system_data64(self):
        # IDebugDataSpaces::ReadProcessorSystemData
        DEBUG_DATA_PROCESSOR_IDENTIFICATION = 4
        x = self.kdbg.read_processor_system_data(0, DEBUG_DATA_PROCESSOR_IDENTIFICATION)
        self.assertEqual(cpuid.get_vendor_id(), x.Amd64.VendorString)
        self.assertEqual(cpuid.get_proc_family_model(), (x.Amd64.Family, x.Amd64.Model))

    def test_write_byte(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 1
        self.kdbg.write_byte(addr, 0x42)
        x = self.kdbg.read_byte(addr)
        self.assertEqual(0x42, x)

    def test_write_byte_p(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 1
        self.kdbg.write_byte_p(self.kdbg.virtual_to_physical(addr), 0x43)
        x = self.kdbg.read_byte(addr)
        self.assertEqual(0x43, x)

    def test_write_word(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 2
        self.kdbg.write_word(addr, 0x4444)
        x = self.kdbg.read_word(addr)
        self.assertEqual(0x4444, x)

    def test_write_word_p(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 2
        self.kdbg.write_word_p(self.kdbg.virtual_to_physical(addr), 0x4545)
        x = self.kdbg.read_word(addr)
        self.assertEqual(0x4545, x)

    def test_write_dword(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 4
        self.kdbg.write_dword(addr, 0x46464646)
        x = self.kdbg.read_dword(addr)
        self.assertEqual(0x46464646, x)

    def test_write_dword_p(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 4
        self.kdbg.write_dword_p(self.kdbg.virtual_to_physical(addr), 0x47474747)
        x = self.kdbg.read_dword(addr)
        self.assertEqual(0x47474747, x)

    def test_write_qword(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 8
        self.kdbg.write_qword(addr, 0x4848484848484848)
        x = self.kdbg.read_qword(addr)
        self.assertEqual(0x4848484848484848, x)

    def test_write_qword_p(self):
        kernel_base = self.kdbg.get_symbol_offset("nt")
        addr = kernel_base + self.kernel_section_data.VirtualAddress + self.kernel_section_data.VirtualSize - 8
        self.kdbg.write_qword_p(self.kdbg.virtual_to_physical(addr), 0x4949494949494949)
        x = self.kdbg.read_qword(addr)
        self.assertEqual(0x4949494949494949, x)
        
        
        
# Testing read_bus_data and read_io and write_io by exploring the PCI bus   
class PciExplorer(object):
    CONFIG_ADDRESS = 0xCF8
    CONFIG_DATA    = 0xCFC
    PCIConfiguration = 4

    def __init__(self, quiet=True):
        self.kdbg = dbginterface.LocalKernelDebugger(quiet)
    
    def read_pci(self, busnumber, device, function, offset, size):
        return self.kdbg.read_bus_data(self.PCIConfiguration, busnumber, device + (function << 5), offset, size)
    
    def read_pci_word(self, busnumber, device, function, offset):
        raw = self.read_pci(busnumber, device, function, offset, 2)
        return struct.unpack("<H", raw)[0]
        
    def manual_read_pci_word(self, busnumber, device, function, offset):
        value = 1 << 31
        value |= busnumber << 16
        value |= device << 11
        value |= function << 8
        value |= (offset & 0xfc) << 2
        self.kdbg.write_io(self.CONFIG_ADDRESS, value, 4)
        return ((self.kdbg.read_io(self.CONFIG_DATA, 4) >> ((offset & 2) * 8)) & 0xffff)
    
   
class PCITestCase(unittest.TestCase):

    def setUp(self):
        pass
        
    @classmethod
    def setUpClass(self):
        self.pciexplorer = PciExplorer()
        
    def test_iter_pci_device(self):
        bus = 0x00
        f = 0
        for device in range(32):
            pci_vendor = self.pciexplorer.read_pci_word(bus, device, f, 0)
            pci_device = self.pciexplorer.read_pci_word(bus, device, f, 2)
            
            manual_pci_vendor = self.pciexplorer.manual_read_pci_word(bus, device, f, 0)
            manual_pci_device = self.pciexplorer.manual_read_pci_word(bus, device, f, 2)
            self.assertEqual(pci_vendor, manual_pci_vendor)
            self.assertEqual(pci_device, manual_pci_device)
        
    def tearDown(self):
        pass
        
class DriverUpgradeTestCase(unittest.TestCase):
    @classmethod
    def setUpClass(self):
        self.kdbg = LocalKernelDebugger()
        
    def test_alloc_memory(self):
        addr = self.kdbg.alloc_memory(0x1000)
        self.kdbg.write_byte(addr, 0x42)
        self.assertEqual(self.kdbg.read_byte(addr), 0x42)

        self.kdbg.write_byte(addr + 0xfff, 0x42)
        self.assertEqual(self.kdbg.read_byte(addr + 0xfff), 0x42)

    def test_full_driver_upgrade(self):
        upgrader = self.kdbg.upgrader
        upgrader.registered_ioctl = []
        upgrader.full_driver_upgrade()
        self.test_alloc_memory()
        
    def test_retrieve_driver_upgrade(self):
        # Get current registered IO
        registered_io = self.kdbg.upgrader.registered_ioctl
        # Verif that some IO are registered
        self.assertTrue(registered_io)
        new_upgrader = type(self.kdbg.upgrader)(self.kdbg)
        # Verif that new upgrader see that driver is upgraded
        self.assertTrue(new_upgrader.is_driver_already_upgraded())
        # Verif IOCTL retrieving
        new_upgrader.retrieve_upgraded_info()
        self.assertItemsEqual(registered_io, new_upgrader.registered_ioctl)
        
    def test_map_page_to_userland(self):
        kpage = self.kdbg.alloc_memory(0x1000)
        userpage = self.kdbg.map_page_to_userland(kpage, 0x1000)
        
        self.kdbg.write_dword(kpage, 0x11223344)
        self.assertEqual(ctypes.c_uint.from_address(userpage).value, 0x11223344)
        
        ctypes.c_uint.from_address(userpage + 4).value = 0x12345678
        self.assertEqual(self.kdbg.read_dword(kpage + 4), 0x12345678)

if __name__ == '__main__':
    alltests = unittest.TestSuite()
    alltests.addTest(unittest.makeSuite(IDebugSymbolsTestCase))
    alltests.addTest(unittest.makeSuite(IDebugDataSpacesTestCase))
    alltests.addTest(unittest.makeSuite(PCITestCase))
    alltests.addTest(unittest.makeSuite(DriverUpgradeTestCase))
    unittest.TextTestRunner(verbosity=2).run(alltests)

Example 16

Project: maltrail
Source File: sensor.py
View license
def _process_packet(packet, sec, usec, ip_offset):
    """
    Processes single (raw) IP layer data
    """

    global _connect_sec
    global _last_syn
    global _last_logged_syn
    global _last_udp
    global _last_logged_udp
    global _last_dns_exhaustion
    global _subdomains_sec

    try:
        if len(_result_cache) > MAX_RESULT_CACHE_ENTRIES:
            _result_cache.clear()

        if config.USE_HEURISTICS:
            if _locks.connect_sec:
                _locks.connect_sec.acquire()

            connect_sec = _connect_sec
            _connect_sec = sec

            if _locks.connect_sec:
                _locks.connect_sec.release()

            if sec > connect_sec:
                for key in _connect_src_dst:
                    if len(_connect_src_dst[key]) > PORT_SCANNING_THRESHOLD:
                        _src_ip, _dst_ip = key.split('~')
                        if not check_whitelisted(_src_ip):
                            for _ in _connect_src_details[key]:
                                log_event((sec, usec, _src_ip, _[2], _dst_ip, _[3], PROTO.TCP, TRAIL.IP, _src_ip, "potential port scanning", "(heuristic)"), packet)

                _connect_src_dst.clear()
                _connect_src_details.clear()

        ip_data = packet[ip_offset:]
        ip_version = ord(ip_data[0]) >> 4
        localhost_ip = LOCALHOST_IP[ip_version]

        if ip_version == 0x04:  # IPv4
            ip_header = struct.unpack("!BBHHHBBH4s4s", ip_data[:20])
            iph_length = (ip_header[0] & 0xf) << 2
            protocol = ip_header[6]
            src_ip = socket.inet_ntoa(ip_header[8])
            dst_ip = socket.inet_ntoa(ip_header[9])
        elif ip_version == 0x06:  # IPv6
            # Reference: http://chrisgrundemann.com/index.php/2012/introducing-ipv6-understanding-ipv6-addresses/
            ip_header = struct.unpack("!BBHHBB16s16s", ip_data[:40])
            iph_length = 40
            protocol = ip_header[4]
            src_ip = inet_ntoa6(ip_header[6])
            dst_ip = inet_ntoa6(ip_header[7])
        else:
            return

        if protocol == socket.IPPROTO_TCP:  # TCP
            src_port, dst_port, _, _, doff_reserved, flags = struct.unpack("!HHLLBB", ip_data[iph_length:iph_length+14])

            if flags != 2 and config.plugin_functions:
                if dst_ip in trails:
                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet, skip_write=True)
                elif src_ip in trails and dst_ip != localhost_ip:
                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet, skip_write=True)

            if flags == 2:  # SYN set (only)
                _ = _last_syn
                _last_syn = (sec, src_ip, src_port, dst_ip, dst_port)
                if _ == _last_syn:  # skip bursts
                    return

                if dst_ip in trails:
                    _ = _last_logged_syn
                    _last_logged_syn = _last_syn
                    if _ != _last_logged_syn:
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)

                elif src_ip in trails and dst_ip != localhost_ip:
                    _ = _last_logged_syn
                    _last_logged_syn = _last_syn
                    if _ != _last_logged_syn:
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)

                if config.USE_HEURISTICS:
                    if dst_ip != localhost_ip:
                        key = "%s~%s" % (src_ip, dst_ip)
                        if key not in _connect_src_dst:
                            _connect_src_dst[key] = set()
                            _connect_src_details[key] = set()
                        _connect_src_dst[key].add(dst_port)
                        _connect_src_details[key].add((sec, usec, src_port, dst_port))

            else:
                tcph_length = doff_reserved >> 4
                h_size = iph_length + (tcph_length << 2)
                tcp_data = ip_data[h_size:]

                if tcp_data.startswith("HTTP/"):
                    if any(_ in tcp_data[:tcp_data.find("\r\n\r\n")] for _ in ("X-Sinkhole:", "X-Malware-Sinkhole:", "Server: You got served", "Server: Apache 1.0/SinkSoft", "sinkdns.org")) or "\r\n\r\nsinkhole" in tcp_data:
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, src_ip, "sinkhole response (malware)", "(heuristic)"), packet)
                    else:
                        index = tcp_data.find("<title>")
                        if index >= 0:
                            title = tcp_data[index + len("<title>"):tcp_data.find("</title>", index)]
                            if all(_ in title.lower() for _ in ("this domain", "has been seized")):
                                log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, title, "seized domain (suspicious)", "(heuristic)"), packet)

                    content_type = None
                    first_index = tcp_data.find("\r\nContent-Type:")
                    if first_index >= 0:
                        first_index = first_index + len("\r\nContent-Type:")
                        last_index = tcp_data.find("\r\n", first_index)
                        if last_index >= 0:
                            content_type = tcp_data[first_index:last_index].strip().lower()

                    if content_type and content_type in SUSPICIOUS_CONTENT_TYPES:
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, content_type, "content type (suspicious)", "(heuristic)"), packet)

                method, path = None, None
                index = tcp_data.find("\r\n")
                if index >= 0:
                    line = tcp_data[:index]
                    if line.count(' ') == 2 and " HTTP/" in line:
                        method, path, _ = line.split(' ')

                if method and path:
                    post_data = None
                    host = dst_ip
                    first_index = tcp_data.find("\r\nHost:")

                    if first_index >= 0:
                        first_index = first_index + len("\r\nHost:")
                        last_index = tcp_data.find("\r\n", first_index)
                        if last_index >= 0:
                            host = tcp_data[first_index:last_index]
                            host = host.strip().lower()
                            if host.endswith(":80"):
                                host = host[:-3]
                            if host and host[0].isalpha() and dst_ip in trails:
                                log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.IP, "%s (%s)" % (dst_ip, host.split(':')[0]), trails[dst_ip][0], trails[dst_ip][1]), packet)
                            elif config.CHECK_HOST_DOMAINS and not host.replace('.', "").isdigit():
                                _check_domain(host, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
                    elif config.USE_HEURISTICS and config.CHECK_MISSING_HOST:
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, "%s%s" % (host, path), "missing host header (suspicious)", "(heuristic)"), packet)

                    index = tcp_data.find("\r\n\r\n")
                    if index >= 0:
                        post_data = tcp_data[index + 4:]

                    if config.USE_HEURISTICS and dst_port == 80 and path.startswith("http://") and not _check_domain_whitelisted(urlparse.urlparse(path).netloc.split(':')[0]):
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, path, "potential proxy probe (suspicious)", "(heuristic)"), packet)
                        return
                    elif "://" in path:
                        url = path.split("://", 1)[1]

                        if '/' not in url:
                            url = "%s/" % url

                        host, path = url.split('/', 1)
                        if host.endswith(":80"):
                            host = host[:-3]
                        path = "/%s" % path
                        proxy_domain = host.split(':')[0]
                        _check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
                    elif method == "CONNECT":
                        if '/' in path:
                            host, path = path.split('/', 1)
                            path = "/%s" % path
                        else:
                            host, path = path, '/'
                        if host.endswith(":80"):
                            host = host[:-3]
                        url = "%s%s" % (host, path)
                        proxy_domain = host.split(':')[0]
                        _check_domain(proxy_domain, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, packet)
                    else:
                        url = "%s%s" % (host, path)

                    if config.USE_HEURISTICS:
                        user_agent, result = None, None

                        first_index = tcp_data.find("\r\nUser-Agent:")
                        if first_index >= 0:
                            first_index = first_index + len("\r\nUser-Agent:")
                            last_index = tcp_data.find("\r\n", first_index)
                            if last_index >= 0:
                                user_agent = tcp_data[first_index:last_index]
                                user_agent = urllib.unquote(user_agent).strip()

                        if user_agent:
                            result = _result_cache.get(user_agent)
                            if result is None:
                                if not any(_ in user_agent for _ in WHITELIST_UA_KEYWORDS):
                                    match = re.search(SUSPICIOUS_UA_REGEX, user_agent)
                                    if match:
                                        def _(value):
                                            return value.replace('(', "\\(").replace(')', "\\)")

                                        parts = user_agent.split(match.group(0), 1)

                                        if len(parts) > 1 and parts[0] and parts[-1]:
                                            result = _result_cache[user_agent] = "%s (%s)" % (_(match.group(0)), _(user_agent))
                                        else:
                                            result = _result_cache[user_agent] = _(match.group(0)).join(("(%s)" if part else "%s") % _(part) for part in parts)
                                if not result:
                                    _result_cache[user_agent] = False

                            if result:
                                log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.UA, result, "user agent (suspicious)", "(heuristic)"), packet)

                    checks = [path.rstrip('/')]
                    if '?' in path:
                        checks.append(path.split('?')[0].rstrip('/'))

                    _ = os.path.splitext(checks[-1])
                    if _[1]:
                        checks.append(_[0])

                    if checks[-1].count('/') > 1:
                        checks.append(checks[-1][:checks[-1].rfind('/')])
                        checks.append(checks[0][checks[0].rfind('/'):].split('?')[0])

                    for check in filter(None, checks):
                        for _ in ("", host):
                            check = "%s%s" % (_, check)
                            if check in trails:
                                parts = url.split(check)
                                other = ("(%s)" % _ if _ else _ for _ in parts)
                                trail = check.join(other)
                                log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, trails[check][0], trails[check][1]))
                                return

                    if config.USE_HEURISTICS:
                        unquoted_path = urllib.unquote(path)
                        unquoted_post_data = urllib.unquote(post_data or "")
                        for char in SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS:
                            replacement = SUSPICIOUS_HTTP_REQUEST_FORCE_ENCODE_CHARS[char]
                            path = path.replace(char, replacement)
                            if post_data:
                                post_data = post_data.replace(char, replacement)

                        if not _check_domain_whitelisted(host):
                            if not any(_ in unquoted_path.lower() for _ in WHITELIST_HTTP_REQUEST_PATHS):
                                if any(_ in unquoted_path for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
                                    found = _result_cache.get(unquoted_path)
                                    if found is None:
                                        for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
                                            if re.search(regex, unquoted_path, re.I | re.DOTALL):
                                                found = desc
                                                break
                                        _result_cache[unquoted_path] = found or ""
                                    if found:
                                        trail = "%s(%s)" % (host, path)
                                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
                                        return

                                if any(_ in unquoted_post_data for _ in SUSPICIOUS_HTTP_REQUEST_PRE_CONDITION):
                                    found = _result_cache.get(unquoted_post_data)
                                    if found is None:
                                        for desc, regex in SUSPICIOUS_HTTP_REQUEST_REGEXES:
                                            if re.search(regex, unquoted_post_data, re.I | re.DOTALL):
                                                found = desc
                                                break
                                        _result_cache[unquoted_post_data] = found or ""
                                    if found:
                                        trail = "%s(%s \(%s %s\))" % (host, path, method, post_data.strip())
                                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.HTTP, trail, "%s (suspicious)" % found, "(heuristic)"), packet)
                                        return

                            if '.' in path:
                                _ = urlparse.urlparse("http://%s" % url)  # dummy scheme
                                path = path.lower()
                                filename = _.path.split('/')[-1]
                                name, extension = os.path.splitext(filename)
                                trail = "%s(%s)" % (host, path)
                                if extension and extension in SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS and not any(_ in path for _ in WHITELIST_DIRECT_DOWNLOAD_KEYWORDS) and '=' not in _.query and len(name) < 10:
                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "direct %s download (suspicious)" % extension, "(heuristic)"), packet)
                                elif filename in WEB_SHELLS:
                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "potential web shell (suspicious)", "(heuristic)"), packet)
                                else:
                                    for desc, regex in SUSPICIOUS_HTTP_PATH_REGEXES:
                                        if re.search(regex, filename, re.I):
                                            log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.TCP, TRAIL.URL, trail, "%s (suspicious)" % desc, "(heuristic)"), packet)
                                            break

        elif protocol == socket.IPPROTO_UDP:  # UDP
            _ = ip_data[iph_length:iph_length + 4]
            if len(_) < 4:
                return

            src_port, dst_port = struct.unpack("!HH", _)

            _ = _last_udp
            _last_udp = (sec, src_ip, src_port, dst_ip, dst_port)
            if _ == _last_udp:  # skip bursts
                return

            if src_port != 53 and dst_port != 53:  # not DNS
                if dst_ip in trails:
                    trail = dst_ip
                elif src_ip in trails:
                    trail = src_ip
                else:
                    trail = None

                if trail:
                    _ = _last_logged_udp
                    _last_logged_udp = _last_udp
                    if _ != _last_logged_udp:
                        log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, trail, trails[trail][0], trails[trail][1]), packet)

            else:
                dns_data = ip_data[iph_length + 8:]

                # Reference: http://www.ccs.neu.edu/home/amislove/teaching/cs4700/fall09/handouts/project1-primer.pdf
                if len(dns_data) > 6:
                    qdcount = struct.unpack("!H", dns_data[4:6])[0]
                    if qdcount > 0:
                        offset = 12
                        query = ""

                        while len(dns_data) > offset:
                            length = ord(dns_data[offset])
                            if not length:
                                query = query[:-1]
                                break
                            query += dns_data[offset + 1:offset + length + 1] + '.'
                            offset += length + 1

                        query = query.lower()

                        if not query or '.' not in query or not all(_ in VALID_DNS_CHARS for _ in query) or any(_ in query for _ in (".intranet.",)) or any(query.endswith(_) for _ in IGNORE_DNS_QUERY_SUFFIXES):
                            return

                        parts = query.split('.')

                        if ord(dns_data[2]) == 0x01:  # standard query
                            type_, class_ = struct.unpack("!HH", dns_data[offset + 1:offset + 5])

                            if len(parts) > 2:
                                domain = '.'.join(parts[-2:])

                                if not _check_domain_whitelisted(domain):  # e.g. <hash>.hashserver.cs.trendmicro.com
                                    if (sec - (_subdomains_sec or 0)) > DAILY_SECS:
                                        _subdomains.clear()
                                        _dns_exhausted_domains.clear()
                                        _subdomains_sec = sec

                                    subdomains = _subdomains.get(domain)

                                    if not subdomains:
                                        subdomains = _subdomains[domain] = set()

                                    if len(subdomains) < DNS_EXHAUSTION_THRESHOLD:
                                        subdomains.add('.'.join(parts[:-2]))
                                    else:
                                        if (sec - (_last_dns_exhaustion or 0)) > 60:
                                            trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
                                            log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "potential dns exhaustion (suspicious)", "(heuristic)"), packet)
                                            _dns_exhausted_domains.add(domain)
                                            _last_dns_exhaustion = sec

                                        return

                            # Reference: http://en.wikipedia.org/wiki/List_of_DNS_record_types
                            if type_ not in (12, 28) and class_ == 1:  # Type not in (PTR, AAAA), Class IN
                                if dst_ip in trails:
                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, "%s (%s)" % (dst_ip, query), trails[dst_ip][0], trails[dst_ip][1]), packet)
                                elif src_ip in trails:
                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)

                                _check_domain(query, sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, packet)

                        elif config.USE_HEURISTICS:
                            if ord(dns_data[2]) & 0x80:  # standard response
                                if ord(dns_data[3]) == 0x80:  # recursion available, no error
                                    _ = offset + 5
                                    try:
                                        while _ < len(dns_data):
                                            if ord(dns_data[_]) & 0xc0 != 0 and dns_data[_ + 2] == "\00" and dns_data[_ + 3] == "\x01":  # Type A
                                                break
                                            else:
                                                _ += 12 + struct.unpack("!H", dns_data[_ + 10: _ + 12])[0]

                                        _ = dns_data[_ + 12:_ + 16]
                                        if _:
                                            answer = socket.inet_ntoa(_)
                                            if answer in trails:
                                                _ = trails[answer]
                                                if "sinkhole" in _[0]:
                                                    trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
                                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "sinkholed by %s (malware)" % _[0].split(" ")[1], "(heuristic)"), packet)  # (e.g. kitro.pl, devomchart.com, jebena.ananikolic.su, vuvet.cn)
                                                elif "parking" in _[0]:
                                                    trail = "(%s).%s" % ('.'.join(parts[:-1]), '.'.join(parts[-1:]))
                                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, "parked site (suspicious)", "(heuristic)"), packet)
                                    except IndexError:
                                        pass

                                elif ord(dns_data[3]) == 0x83:  # recursion available, no such name
                                    if '.'.join(parts[-2:]) not in _dns_exhausted_domains and not _check_domain_whitelisted(query) and not _check_domain_member(query, trails):
                                        if parts[-1].isdigit():
                                            return

                                        if not (len(parts) > 4 and all(_.isdigit() and int(_) < 256 for _ in parts[:4])):  # generic check for DNSBL IP lookups
                                            for _ in filter(None, (query, "*.%s" % '.'.join(parts[-2:]) if query.count('.') > 1 else None)):
                                                if _ not in NO_SUCH_NAME_COUNTERS or NO_SUCH_NAME_COUNTERS[_][0] != sec / 3600:
                                                    NO_SUCH_NAME_COUNTERS[_] = [sec / 3600, 1, set()]
                                                else:
                                                    NO_SUCH_NAME_COUNTERS[_][1] += 1
                                                    NO_SUCH_NAME_COUNTERS[_][2].add(query)

                                                    if NO_SUCH_NAME_COUNTERS[_][1] > NO_SUCH_NAME_PER_HOUR_THRESHOLD:
                                                        if _.startswith("*."):
                                                            log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, "%s%s" % ("(%s)" % ','.join(item.replace(_[1:], "") for item in NO_SUCH_NAME_COUNTERS[_][2]), _[1:]), "excessive no such domain (suspicious)", "(heuristic)"), packet)
                                                            for item in NO_SUCH_NAME_COUNTERS[_][2]:
                                                                try:
                                                                    del NO_SUCH_NAME_COUNTERS[item]
                                                                except KeyError:
                                                                    pass
                                                        else:
                                                            log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, _, "excessive no such domain (suspicious)", "(heuristic)"), packet)

                                                        try:
                                                            del NO_SUCH_NAME_COUNTERS[_]
                                                        except KeyError:
                                                            pass

                                                        break

                                            if len(parts) > 2:
                                                part = parts[0] if parts[0] != "www" else parts[1]
                                                trail = "(%s).%s" % ('.'.join(parts[:-2]), '.'.join(parts[-2:]))
                                            elif len(parts) == 2:
                                                part = parts[0]
                                                trail = "(%s).%s" % (parts[0], parts[1])
                                            else:
                                                part = query
                                                trail = query

                                            if part and '-' not in part:
                                                result = _result_cache.get(part)

                                                if result is None:
                                                    # Reference: https://github.com/exp0se/dga_detector
                                                    probabilities = (float(part.count(c)) / len(part) for c in set(_ for _ in part))
                                                    entropy = -sum(p * math.log(p) / math.log(2.0) for p in probabilities)
                                                    if entropy > SUSPICIOUS_DOMAIN_ENTROPY_THRESHOLD:
                                                        result = "entropy threshold no such domain (suspicious)"

                                                    if not result:
                                                        if sum(_ in CONSONANTS for _ in part) > SUSPICIOUS_DOMAIN_CONSONANT_THRESHOLD:
                                                            result = "consonant threshold no such domain (suspicious)"

                                                    _result_cache[part] = result or False

                                                if result:
                                                    log_event((sec, usec, src_ip, src_port, dst_ip, dst_port, PROTO.UDP, TRAIL.DNS, trail, result, "(heuristic)"), packet)

        elif protocol in IPPROTO_LUT:  # non-TCP/UDP (e.g. ICMP)
            if protocol == socket.IPPROTO_ICMP:
                if ord(ip_data[iph_length]) != 0x08:  # Non-echo request
                    return
            elif protocol == socket.IPPROTO_ICMPV6:
                if ord(ip_data[iph_length]) != 0x80:  # Non-echo request
                    return

            if dst_ip in trails:
                log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, dst_ip, trails[dst_ip][0], trails[dst_ip][1]), packet)
            elif src_ip in trails:
                log_event((sec, usec, src_ip, '-', dst_ip, '-', IPPROTO_LUT[protocol], TRAIL.IP, src_ip, trails[src_ip][0], trails[src_ip][1]), packet)

    except struct.error:
        pass

    except Exception:
        if config.SHOW_DEBUG:
            traceback.print_exc()

Example 17

Project: pyNastran
Source File: xdb.py
View license
    def read_xdb(self, xdb_filename, etype, nsubcases, npload4s):
        self.nbytes = os.path.getsize(xdb_filename)
        with open(xdb_filename, mode='rb') as self.f:

            print('(4100)')
            #self.show(80)
            # 6 tri
            #    1024, 16777215, 16777097, 118, 0, 2, 39, 39, 3, 1, 1, 4, 40, 20010, 72501883, 5, 0, 0, 0, 0
            # 1 tri
            #    1024, 16777215, 16777127, 88,  0, 2, 29, 29, 3, 1, 1, 4, 40, 20010, 72501883, 5, 0, 0, 0, 0
            # 2-quad
            #    1024, 16777215, 16777121, 94,  0, 2, 31, 31, 3, 1, 1, 4, 40, 20010, 72501883, 5, 0, 0, 0, 0
            self.f.read(4100)
            self.n += 4100

            # CTR3-----
            table_name = self.read_table_name()
            print('table_name = %r (4100)' % table_name)
            self.f.read(4100)
            self.n += 4100

            # CTR3/CQD4-----
            # DDLFORDB-----
            # DISPR-----
            # EQEXINE-----
            # EQEXING-----
            # GRIDX-----
            # LIMITS-----
            # MAT1-----
            # PATHINT-----
            # PATHLINK-----
            # PATHQUAL-----
            # PLOAD4-----
            # PRODUCT-----
            # SPCFR-----
            # PROJECT-----
            # PSHELL-----
            # SID-----
            # SOLVE-----
            # SPC1-----
            # SPCFR-----
            # SUBCASE-----
            # SUBCASES-----
            # SUBTITL-----
            # SUBGRID-----
            for i in range(26 + npload4s + nsubcases):
                table_name = self.read_table_name()
                self.read_table_header(table_name, etype, npload4s)

            # SUPERS-----
            #self.show(1000, types='s')
            table_name = self.read_table_name()
            self.read_table(table_name, etype, nsubcases)

            # CTR3-----
            table_name = self.read_table_name()
            dn = 4092
            data = self.f.read(dn)
            self.n += dn
            #self.read_table(table_name, etype, nsubcases)

            # DDLFORDB-----
            table_name = self.read_table_name()
            print('table_name = %r (24616)' % table_name)

            data = self.f.read(24616)
            self.n += 24616

            # NASTRAN 8-----
            data = self.f.read(12)
            word = struct.unpack('12s', data)
            self.n += 12
            print('word = %r (16)' % word)

            data = self.f.read(16)
            self.n += 16

            # INTEL64 FAMILY 6 MOD                    WINDOWS 7           5-----
            data = self.f.read(64)
            word = struct.unpack('64s', data)
            self.n += 64
            print('word = %r (3944)' % word)

            data = self.f.read(3944)
            self.n += 3944

            # PROJECT-----
            table_name = self.read_table_name()
            print('table_name = %r (4108)' % table_name)

            data = self.f.read(4108)
            self.n += 4108

            # spaces-----
            data = self.f.read(56)
            word = struct.unpack('56s', data)
            self.n += 56
            print('word = %r (8)' % word)

            data = self.f.read(8)
            self.n += 8

            # spaces-----
            data = self.f.read(56)
            word = struct.unpack('56s', data)
            self.n += 56
            print('word = %r (8052)' % word)

            data = self.f.read(8052)
            self.n += 8052

            # PATHQUAL-----
            table_name = self.read_table_name()
            print('table_name = %r (12280)' % table_name)


            data = self.f.read(12280)
            self.n += 12280

            # PATHLINK-----
            table_name = self.read_table_name()
            print('table_name = %r (4108)' % table_name)

            data = self.f.read(4108)
            self.n += 4108

            # MODEL-----
            table_name = self.read_table_name()
            print('table_name = %r (20)' % table_name)

            data = self.f.read(20)
            self.n += 20

            # SOLID-----
            table_name = self.read_table_name()
            print('table_name = %r (20)' % table_name)

            data = self.f.read(20)
            self.n += 20

            # HIGHQUAL-----
            table_name = self.read_table_name()
            print('table_name = %r (20)' % table_name)

            data = self.f.read(20)
            self.n += 20

            # AUXMID-----
            table_name = self.read_table_name()
            print('table_name = %r (8080)' % table_name)

            data = self.f.read(8080)
            self.n += 8080

            # PATHINT-----
            table_name = self.read_table_name()
            print('table_name = %r (12292)' % table_name)

            data = self.f.read(12292)
            self.n += 12292

            # SUPERS-----
            table_name = self.read_table_name()
            print('table_name = %r (4076)' % table_name)

            data = self.f.read(4076)
            self.n += 4076

            # PSHELL-----
            # PSHELL-----
            # GRIDX-----
            # CTR3-----
            # LIMITS-----
            # EXEXING-----
            # EXEXINE-----
            # SID-----
            # PLOAD4-----
            for isubcase in range(3 + nsubcases + npload4s):
                table_name = self.read_table_name()
                self.read_table(table_name, etype, nsubcases)

            # SPC1-----
            # SUBCASES-----
            table_name = self.read_table_name()
            print('table_name = %r (12292)' % table_name)

            data = self.f.read(12292)
            self.n += 12292

            # SUBCASE-----
            table_name = self.read_table_name()
            print('table_name = %r (4076)' % table_name)

            data = self.f.read(4076)
            self.n += 4076

            # SUBGRID-----
            table_name = self.read_table_name()
            print('table_name = %r' % table_name)
            self.read_table(table_name, etype, nsubcases)

            # MODEL-----
            table_name = self.read_table_name()
            print('table_name = %r (20)' % table_name)

            data = self.f.read(20)
            self.n += 20

            # SOLID-----
            table_name = self.read_table_name()
            print('table_name = %r (20)' % table_name)

            data = self.f.read(20)
            self.n += 20

            # HIGHQUAL-----
            table_name = self.read_table_name()
            print('table_name = %r (20)' % table_name)

            data = self.f.read(20)
            self.n += 20

            # AUXMID-----
            table_name = self.read_table_name()
            print('table_name = %r (3094)' % table_name)

            data = self.f.read(3984)
            self.n += 3984

            # PATHLINK-----
            table_name = self.read_table_name()
            print('table_name = %r (944)' % table_name)

            data = self.f.read(944)
            self.n += 944

            # SUBTITL/SUBCTITL-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            print('table_name = %r (4104)' % table_name)
            data = self.f.read(4104)
            self.n += 4104

            for isubcase in range(nsubcases):
                # SUBCASE 1-----
                data = self.f.read(384)
                table_name, = struct.unpack('384s', data)
                self.n += 384
                print('  table_name = %r (384)' % table_name.strip())

                data = self.f.read(4)
                self.n += 4

            if etype == 'tri':
                if nsubcases == 6:
                    dn = 4908
                elif nsubcases == 1:
                    dn = 6848
                else:
                    raise NotImplementedError(nsubcases)
            elif etype == 'quad':
                if nsubcases == 2:
                    dn = 6460
                else:
                    raise NotImplementedError(nsubcases)
                #self.show(6468, types='s')
                #aaa
            else:
                raise NotImplementedError(etype)
            #self.show(dn, types='s')
            print('(%s)' % dn)
            data = self.f.read(dn)
            self.n += dn
            #self.show(100, types='s')
            #aa

            # SUPERS-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            print('table_name = %r (932)' % table_name)

            data = self.f.read(932)
            self.n += 932

            # DISPR-----
            table_name = self.read_table_name()
            print('table_name = %r (3736)' % table_name)

            data = self.f.read(3736)
            self.n += 3736

            # GRIDX-----
            table_name = self.read_table_name()
            print('table_name = %r (8536)' % table_name)

            data = self.f.read(8536)
            self.n += 8536

            # DISPR-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            print('table_name = %r (3148)' % table_name)

            data = self.f.read(3148)
            self.n += 3148

            # SUBCASE-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            print('table_name = %r (9124)' % table_name)

            data = self.f.read(9124)
            self.n += 9124

            # DISPR-----
            table_name = self.read_table_name()
            print('table_name = %r (3736)' % table_name)

            data = self.f.read(3736)
            self.n += 3736

            # LIMITS-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            print('table_name = %r (8536)' % table_name)

            if etype == 'tri':
                if nsubcases == 6:
                    dn = 8536
                elif nsubcases == 1:
                    dn = 8532
                else:
                    raise NotImplementedError(nsubcases)
            elif etype == 'quad':
                if nsubcases == 2:
                    dn = 8536
                else:
                    raise NotImplementedError(nsubcases)
            else:
                raise NotImplementedError(etype)
            #self.show(dn, types='s')
            data = self.f.read(dn)
            self.n += dn
            assert self.n == self.f.tell()

            #if etype == 'tri':
                #if nsubcases == 1:
                    #self.show(1500, types='s')
                    #return
            #elif etype == 'quad':
                #if nsubcases == 2:
                    #dn = (self.nbytes - self.n) // 5
                    #dn = 3744
                    #self.show(dn, types='s')
                    #print('dn = ', dn)
                    #return

            # DISPR-----
            table_name = self.read_table_name()
            #print('602')
            if table_name is None:
                return
            print('table_name = %r (3736)' % table_name)
            data = self.f.read(3736)
            self.n += 3736

            # EQEXING-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            self.read_table(table_name, etype, nsubcases)

            # EQEXINE-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            self.read_table(table_name, etype, nsubcases)

            # SID-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            self.read_table(table_name, etype, nsubcases)

            for isubcase in range(nsubcases):
                # PLOAD4-----
                table_name = self.read_table_name()
                self.read_table(table_name, etype, nsubcases)

            # SPC1-----
            table_name = self.read_table_name()
            if table_name is None:
                return
            print('table_name = %r (8532)' % table_name)
            data = self.f.read(8532)
            self.n += 8532

            self.show(5000, types='s')
        print('done!')

Example 18

Project: pyscf
Source File: icmpspt.py
View license
def icmpspt(mc, pttype="NEVPT2", energyE0=0.0, rdmM=0, frozen=0, PTM=1000, PTincore=False, fciExtraLine=[], have3RDM=False, root=0, nroots=1, verbose=None, AAAVsplit=1):

    #remove the -1 state
    import os
    os.system("rm %s/node0/Rotation*.state-1.tmp"%(mc.fcisolver.scratchDirectory))
    os.system("rm %s/node0/wave*.-1.tmp"%(mc.fcisolver.scratchDirectory))

#    if type(mc.fcisolver) is not dmrgci.DMRGCI:
#        if (mc.fcisolver.fcibase_class is not dmrgci.DMRGCI):
#            print "this works with dmrgscf and not regular mcscf"
#            exit(0)

    if (pttype != "NEVPT2" and AAAVsplit != 1):
        print "AAAVsplit only works with CASSCF natural orbitals and NEVPT2"
        exit(0)

    mc.fcisolver.startM = 100
    mc.fcisolver.maxM = max(rdmM,501)
    mc.fcisolver.clearSchedule()
    mc.fcisolver.restart = False

    if (not have3RDM):
        mc.fcisolver.has_threepdm = False

        #we will redo the calculations so lets get ride of -1 states
        import os
        os.system("rm %s/node0/Rotation-*.state-1.tmp"%(mc.fcisolver.scratchDirectory))
        os.system("rm %s/node0/wave-*.-1.tmp"%(mc.fcisolver.scratchDirectory))
        os.system("rm %s/node0/RestartReorder.dat_1"%(mc.fcisolver.scratchDirectory))
    else:
        mc.fcisolver.has_threepdm = True

    mc.fcisolver.generate_schedule()
    mc.fcisolver.extraline = []
    if (PTincore):
        mc.fcisolver.extraline.append('do_npdm_in_core')
    mc.fcisolver.extraline += fciExtraLine


    if (len(mc.fcisolver.orbsym) == 0 and mc.fcisolver.mol.symmetry):
        mcscf.casci_symm.label_symmetry_(mc, mc.mo_coeff)
    ericas = mc.get_h2cas()
    h1e = reduce(numpy.dot, (mc.mo_coeff.T, mc.get_hcore(), mc.mo_coeff))
    dmcore = numpy.dot(mc.mo_coeff[:,:mc.ncore], mc.mo_coeff[:,:mc.ncore].T)*2
    vj, vk = mc._scf.get_jk(mc.mol, dmcore)
    vhfcore = reduce(numpy.dot, (mc.mo_coeff.T, vj-vk*0.5, mc.mo_coeff))
    h1effcas = h1e+vhfcore

    dmrgci.writeIntegralFile(mc.fcisolver, h1effcas[mc.ncore:mc.ncore+mc.ncas, mc.ncore:mc.ncore+mc.ncas], ericas, mc.ncas, mc.nelecas)

    dm1eff = numpy.zeros(shape=(mc.ncas, mc.ncas)) #this is the state average density which is needed in NEVPT2
 
    #loop over all states besides the current root
    if (pttype == "NEVPT2" and nroots>1):
        stateIter = range(nroots)
        stateIter.remove(root)
        for istate in stateIter:
            dm3 = mc.fcisolver.make_rdm3(state=istate, norb=mc.ncas, nelec=mc.nelecas, dt=float_precision)    
            nelec = mc.nelecas[0]+mc.nelecas[1]
            dm2 = numpy.einsum('ijklmk', dm3)/(nelec-2)
            dm1 = numpy.einsum('ijkj', dm2)/(nelec-1)
            dm1eff += dm1

    #now add the contributaion due to the current root
    dm3 = mc.fcisolver.make_rdm3(state=root, norb=mc.ncas, nelec=mc.nelecas, dt=float_precision)    
    nelec = mc.nelecas[0]+mc.nelecas[1]
    dm2 = numpy.einsum('ijklmk', dm3)/(nelec-2)
    dm1 = numpy.einsum('ijkj', dm2)/(nelec-1)
    dm1eff += dm1
    dm1eff = dm1eff/(1.0*nroots)
    import os
    os.system("mkdir int")    
    numpy.save("int/E3",dm3)
    numpy.save("int/E3B.npy", dm3.transpose(0,3,1,4,2,5))
    numpy.save("int/E3C.npy", dm3.transpose(5,0,2,4,1,3))
    del dm3

    #backup the restartreorder file to -1. this is because responseaaav and responseaaac both overwrite this file
    #this means that when we want to restart a calculation after lets say responseaaav didnt finish, the new calculaitons
    #will use the restartreorder file that was written by the incomplete responseaaav run instead of the original dmrg run.
    reorderf1 = "%s/node0/RestartReorder.dat_1"%(mc.fcisolver.scratchDirectory)
    reorderf = "%s/node0/RestartReorder.dat"%(mc.fcisolver.scratchDirectory)
    import os.path
    reorder1present = os.path.isfile(reorderf1) 
    if (reorder1present):
        from subprocess import check_call
        output = check_call("cp %s %s"%(reorderf1, reorderf), shell=True)
    else :
        from subprocess import check_call
        check_call("cp %s %s"%(reorderf, reorderf1), shell=True)
    reorder = numpy.loadtxt("%s/node0/RestartReorder.dat"%(mc.fcisolver.scratchDirectory))


    if (pttype == "NEVPT2") :
        norbs, energyE0 = writeNevpt2Integrals(mc, dm1, dm2, dm1eff, AAAVsplit, frozen)
        sys.stdout.flush()
        print "wrote the integrals to disk"

        for k in range(AAAVsplit):
            writeDMRGConfFile(mc.nelecas[0], mc.nelecas[1], mc.ncore, mc.ncas,  norbs,
                              mc.fcisolver, PTM, "AAAV", mc.fcisolver.memory, mc.fcisolver.num_thrds, reorder, fciExtraLine, aaavsplit=AAAVsplit, aaavIter=k, root=root, name = "NEVPT2")
        writeDMRGConfFile(mc.nelecas[0], mc.nelecas[1], mc.ncore-frozen, mc.ncas,  norbs-frozen,
                          mc.fcisolver, PTM, "AAAC", mc.fcisolver.memory, mc.fcisolver.num_thrds, reorder, fciExtraLine,root=root, name = "NEVPT2")
        sys.stdout.flush()

        totalE = 0.0;
        totalE += executeNEVPT(nelec, mc.ncore, mc.ncas, frozen, mc.fcisolver.memory)# executeMRLCC(nelec, mc.ncore, mc.ncas)

        try:
            for k in range(AAAVsplit):
                outfile, infile = "responseNEVPT2_aaav%d.out"%(k), "responseNEVPT2_aaav%d.conf"%(k)
                output = check_call("%s  %s  %s > %s"%(mc.fcisolver.mpiprefix, mc.fcisolver.executable, infile, outfile), shell=True)
                file1 = open("%s/node0/dmrg.e"%(mc.fcisolver.scratchDirectory),"rb")
                import struct
                energy = struct.unpack('d', file1.read(8))[0]
                file1.close()
                totalE += energy
                print "perturber AAAV%i --  %18.9e"%(k, energy)
                sys.stdout.flush()

            if (mc.ncore-frozen != 0):
                outfile, infile = "responseNEVPT2_aaac.out", "responseNEVPT2_aaac.conf"
                output = check_call("%s  %s  %s > %s"%(mc.fcisolver.mpiprefix, mc.fcisolver.executable, infile, outfile), shell=True)
                file1 = open("%s/node0/dmrg.e"%(mc.fcisolver.scratchDirectory),"rb")
                energy = struct.unpack('d', file1.read(8))[0]
                file1.close()
                totalE += energy
                print "perturber AAAC --  %18.9e"%(energy)

        except ValueError:
            print(output)

        from subprocess import check_call
        return totalE
    else :
        #this is a bad way to do it, the problem is
        #that pyscf works with double precision and
        #
        #energyE0 = writeMRLCCIntegrals(mc, dm1, dm2)
        #sys.stdout.flush()
        energyE0, norbs = writeNumpyforMRLCC(mc, dm1, dm2, frozen) 
        sys.stdout.flush()
        writeDMRGConfFile(mc.nelecas[0], mc.nelecas[1], mc.ncore, mc.ncas,  norbs,
                          mc.fcisolver, PTM, "AAAV", mc.fcisolver.memory, mc.fcisolver.num_thrds, reorder, fciExtraLine, root=root, name="MRLCC")
        writeDMRGConfFile(mc.nelecas[0], mc.nelecas[1], mc.ncore-frozen, mc.ncas,  norbs-frozen,
                          mc.fcisolver, PTM, "AAAC", mc.fcisolver.memory, mc.fcisolver.num_thrds, reorder, fciExtraLine, root=root, name="MRLCC")
        totalE = 0.0
        totalE +=  executeMRLCC(nelec, mc.ncore, mc.ncas, frozen, mc.fcisolver.memory)
        from subprocess import check_call
        try:
            outfile, infile = "responseMRLCC_aaav0.out", "responseMRLCC_aaav0.conf"
            output = check_call("%s  %s  %s > %s"%(mc.fcisolver.mpiprefix, mc.fcisolver.executable, infile, outfile), shell=True)
            file1 = open("%s/node0/dmrg.e"%(mc.fcisolver.scratchDirectory),"rb")
            import struct
            energy = struct.unpack('d', file1.read(8))[0]
            file1.close()
            totalE += energy
            print "perturber AAAV --  %18.9e"%(energy)
        except ValueError:
            print "perturber AAAV -- NA"
            #exit()

        try:
            if (mc.ncore-frozen != 0):
                outfile, infile = "responseMRLCC_aaac.out", "responseMRLCC_aaac.conf"
                output = check_call("%s  %s  %s > %s"%(mc.fcisolver.mpiprefix, mc.fcisolver.executable, infile, outfile), shell=True)
                file1 = open("%s/node0/dmrg.e"%(mc.fcisolver.scratchDirectory),"rb")
                energy = struct.unpack('d', file1.read(8))[0]
                file1.close()
                totalE += energy
                print "perturber AAAC --  %18.9e"%(energy)
        except ValueError:
            print "perturber AAAC -- NA"

        print "total PT  -- %18.9e"%(totalE)
        return totalE

Example 19

Project: SmartAlarmClock
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 20

Project: traktforalfred
Source File: tz.py
View license
    def __init__(self, fileobj, filename=None):
        file_opened_here = False
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
            file_opened_here = True
        elif filename is not None:
            self._filename = filename
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        try:
            if fileobj.read(4).decode() != "TZif":
                raise ValueError("magic not found")

            fileobj.read(16)

            (
                # The number of UTC/local indicators stored in the file.
                ttisgmtcnt,

                # The number of standard/wall indicators stored in the file.
                ttisstdcnt,

                # The number of leap seconds for which data is
                # stored in the file.
                leapcnt,

                # The number of "transition times" for which data
                # is stored in the file.
                timecnt,

                # The number of "local time types" for which data
                # is stored in the file (must not be zero).
                typecnt,

                # The  number  of  characters  of "time zone
                # abbreviation strings" stored in the file.
                charcnt,

            ) = struct.unpack(">6l", fileobj.read(24))

            # The above header is followed by tzh_timecnt four-byte
            # values  of  type long,  sorted  in ascending order.
            # These values are written in ``standard'' byte order.
            # Each is used as a transition time (as  returned  by
            # time(2)) at which the rules for computing local time
            # change.

            if timecnt:
                self._trans_list = struct.unpack(">%dl" % timecnt,
                                                 fileobj.read(timecnt*4))
            else:
                self._trans_list = []

            # Next come tzh_timecnt one-byte values of type unsigned
            # char; each one tells which of the different types of
            # ``local time'' types described in the file is associated
            # with the same-indexed transition time. These values
            # serve as indices into an array of ttinfo structures that
            # appears next in the file.

            if timecnt:
                self._trans_idx = struct.unpack(">%dB" % timecnt,
                                                fileobj.read(timecnt))
            else:
                self._trans_idx = []

            # Each ttinfo structure is written as a four-byte value
            # for tt_gmtoff  of  type long,  in  a  standard  byte
            # order, followed  by a one-byte value for tt_isdst
            # and a one-byte  value  for  tt_abbrind.   In  each
            # structure, tt_gmtoff  gives  the  number  of
            # seconds to be added to UTC, tt_isdst tells whether
            # tm_isdst should be set by  localtime(3),  and
            # tt_abbrind serves  as an index into the array of
            # time zone abbreviation characters that follow the
            # ttinfo structure(s) in the file.

            ttinfo = []

            for i in range(typecnt):
                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

            abbr = fileobj.read(charcnt).decode()

            # Then there are tzh_leapcnt pairs of four-byte
            # values, written in  standard byte  order;  the
            # first  value  of  each pair gives the time (as
            # returned by time(2)) at which a leap second
            # occurs;  the  second  gives the  total  number of
            # leap seconds to be applied after the given time.
            # The pairs of values are sorted in ascending order
            # by time.

            # Not used, for now
            # if leapcnt:
            #    leap = struct.unpack(">%dl" % (leapcnt*2),
            #                         fileobj.read(leapcnt*8))

            # Then there are tzh_ttisstdcnt standard/wall
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as standard
            # time or wall clock time, and are used when
            # a time zone file is used in handling POSIX-style
            # time zone environment variables.

            if ttisstdcnt:
                isstd = struct.unpack(">%db" % ttisstdcnt,
                                      fileobj.read(ttisstdcnt))

            # Finally, there are tzh_ttisgmtcnt UTC/local
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as UTC or
            # local time, and are used when a time zone file
            # is used in handling POSIX-style time zone envi-
            # ronment variables.

            if ttisgmtcnt:
                isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                      fileobj.read(ttisgmtcnt))

            # ** Everything has been read **
        finally:
            if file_opened_here:
                fileobj.close()

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 21

Project: xbmc-nhl-gamecenter
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 22

Project: pymt
Source File: linuxwacom.py
View license
        def _thread_run(self, **kwargs):
            input_fn = kwargs.get('input_fn')
            queue = kwargs.get('queue')
            device = kwargs.get('device')
            drs = kwargs.get('default_ranges').get
            touches = {}
            touches_sent = []
            point = {}
            l_points = {}

            # prepare some vars to get limit of some component
            range_min_position_x    = 0
            range_max_position_x    = 2048
            range_min_position_y    = 0
            range_max_position_y    = 2048
            range_min_pressure      = 0
            range_max_pressure      = 255
            invert_x                = int(bool(drs('invert_x', 0)))
            invert_y                = int(bool(drs('invert_y', 0)))
            reset_touch             = False

            def process(points):
                actives = points.keys()
                for args in points.itervalues():
                    tid = args['id']
                    try:
                        touch = touches[tid]
                    except KeyError:
                        touch = LinuxWacomTouch(device, tid, args)
                        touches[touch.id] = touch
                    if touch.sx == args['x'] and touch.sy == args['y'] and tid in touches_sent:
                        continue
                    touch.move(args)
                    if tid not in touches_sent:
                        queue.append(('down', touch))
                        touches_sent.append(tid)
                    queue.append(('move', touch))

                for tid in touches.keys()[:]:
                    if tid not in actives:
                        touch = touches[tid]
                        if tid in touches_sent:
                            queue.append(('up', touch))
                            touches_sent.remove(tid)
                        del touches[tid]

            def normalize(value, vmin, vmax):
                return (value - vmin) / float(vmax - vmin)

            # open the input
            fd = open(input_fn, 'rb')

            # get the controler name (EVIOCGNAME)
            device_name = fcntl.ioctl(fd, EVIOCGNAME + (256 << 16), " " * 256).split('\x00')[0]
            pymt_logger.info('LinuxWacomTouch: using <%s>' % device_name)

            # get abs infos
            bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
            bit, = struct.unpack('Q', bit)
            for x in xrange(EV_MAX):
                # preserve this, we may want other things than EV_ABS
                if x != EV_ABS:
                    continue
                # EV_ABS available for this device ?
                if (bit & (1 << x)) == 0:
                    continue
                # ask abs info keys to the devices
                sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16), ' ' * sz_l)
                sbit, = struct.unpack('Q', sbit)
                for y in xrange(KEY_MAX):
                    if (sbit & (1 << y)) == 0:
                        continue
                    absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
                                          (struct_input_absinfo_sz << 16),
                                          ' ' * struct_input_absinfo_sz)
                    abs_value, abs_min, abs_max, abs_fuzz, \
                        abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
                    if y == ABS_X:
                        range_min_position_x = drs('min_position_x', abs_min)
                        range_max_position_x = drs('max_position_x', abs_max)
                        pymt_logger.info('LinuxWacomTouch: ' +
                            '<%s> range position X is %d - %d' % (
                            device_name, abs_min, abs_max))
                    elif y == ABS_Y:
                        range_min_position_y = drs('min_position_y', abs_min)
                        range_max_position_y = drs('max_position_y', abs_max)
                        pymt_logger.info('LinuxWacomTouch: ' +
                            '<%s> range position Y is %d - %d' % (
                            device_name, abs_min, abs_max))
                    elif y == ABS_PRESSURE:
                        range_min_pressure = drs('min_pressure', abs_min)
                        range_max_pressure = drs('max_pressure', abs_max)
                        pymt_logger.info('LinuxWacomTouch: ' +
                            '<%s> range pressure is %d - %d' % (
                            device_name, abs_min, abs_max))

            # read until the end
            point = {}
            changed = False
            touch_id = 0
            touch_x = 0
            touch_y = 0
            touch_pressure = 0
            while fd:

                data = fd.read(struct_input_event_sz)
                if len(data) < struct_input_event_sz:
                    break

                # extract each event
                for i in xrange(len(data) / struct_input_event_sz):
                    ev = data[i * struct_input_event_sz:]

                    # extract timeval + event infos
                    tv_sec, tv_usec, ev_type, ev_code, ev_value = \
                            struct.unpack('LLHHi', ev[:struct_input_event_sz])

                    if ev_type == EV_SYN and ev_code == SYN_REPORT:
                        if touch_id in l_points:
                            p = l_points[touch_id]
                        else:
                            p = dict()
                            l_points[touch_id] = p
                        p['id'] = touch_id
                        if reset_touch is False:
                            p['x'] = touch_x
                            p['y'] = touch_y
                            p['pressure'] = touch_pressure
                        if self.mode == 'pen' and touch_pressure == 0 and not reset_touch:
                            del l_points[touch_id]
                        if changed:
                            if not 'x' in p:
                                reset_touch = False
                                continue
                            process(l_points)
                            changed = False
                        if reset_touch:
                            l_points.clear()
                            reset_touch = False
                            process(l_points)
                        point = {}
                    elif ev_type == EV_MSC and ev_code == MSC_SERIAL:
                        touch_id = ev_value
                    elif ev_type == EV_ABS and ev_code == ABS_X:
                        val = normalize(ev_value,
                            range_min_position_x, range_max_position_x)
                        if invert_x:
                            val = 1. - val
                        touch_x = val
                        changed = True
                    elif ev_type == EV_ABS and ev_code == ABS_Y:
                        val = 1. - normalize(ev_value,
                            range_min_position_y, range_max_position_y)
                        if invert_y:
                            val = 1. - val
                        touch_y = val
                        changed = True
                    elif ev_type == EV_ABS and ev_code == ABS_PRESSURE:
                        touch_pressure = normalize(ev_value,
                            range_min_pressure, range_max_pressure)
                        changed = True
                    elif ev_type == EV_ABS and ev_code == ABS_MISC:
                        if ev_value == 0:
                            reset_touch = True

Example 23

Project: stashboard
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 24

Project: Veil-Evasion
Source File: machobin.py
View license
    def find_Needed_Items(self, theCmds):
        '''
        This method returns a dict with commands that we need
        for mach-o patching
        '''
        _tempDict = {}
        text_segment = {}
        text_section = {}
        LC_MAIN = {}
        LC_UNIXTREAD = {}
        LC_CODE_SIGNATURE = {}
        LC_DYLIB_CODE_SIGN_DRS = {}

        locationInFIle = 0
        last_cmd = 0
        for item in theCmds:
            locationInFIle = item['LOCInFIle']
            if item['DATA'][0:6] == "__TEXT" and item['Command'] == 0x01:
                text_segment = {
                    'segname': item['DATA'][0:0x10],
                    'VMAddress': item['DATA'][0x10:0x14],
                    'VMSize': item['DATA'][0x14:0x18],
                    'File Offset': item['DATA'][0x18:0x1C],
                    'File Size': item['DATA'][0x1C:0x20],
                    'MaxVMProt': item['DATA'][0x20:0x24],
                    'InitalVMProt': item['DATA'][0x24:0x28],
                    'NumberOfSections': item['DATA'][0x28:0x2C],
                    'Flags': item['DATA'][0x2C:0x30]
                }

                count = struct.unpack("<I", text_segment['NumberOfSections'])[0]
                i = 0
                while count > 0:
                    if '__text' in item['DATA'][0x30 + i:0x40 + i]:
                        text_section = {
                            'sectionName': item['DATA'][0x30 + i:0x40 + i],
                            'segmentName': item['DATA'][0x40 + i:0x50 + i],
                            'Address': item['DATA'][0x50 + i:0x54 + i],
                            'LOCAddress': locationInFIle + 0x50 + i,
                            'Size': item['DATA'][0x54 + i:0x58 + i],
                            'LOCTextSize': locationInFIle + 0x54 + i,
                            'Offset': item['DATA'][0x58 + i:0x5c + i],
                            'LocTextOffset': locationInFIle + 0x58 + i,
                            'Alignment': item['DATA'][0x5c + i:0x60 + i],
                            'Relocations': item['DATA'][0x60 + i:0x64 + i],
                            'NumberOfRelocs': item['DATA'][0x64 + i:0x68 + i],
                            'Flags': item['DATA'][0x68 + i:0x6c + i],
                            'Reserved1': item['DATA'][0x6c + i:0x70 + i],
                            'Reserved2': item['DATA'][0x70 + i:0x74 + i],
                        }
                        break
                    else:
                        count -= 1
                        i += 0x40

            elif item['DATA'][0:6] == "__TEXT" and item['Command'] == 0x19:
                text_segment = {
                    'segname': item['DATA'][0:0x10],
                    'VMAddress': item['DATA'][0x10:0x18],
                    'VMSize': item['DATA'][0x18:0x20],
                    'File Offset': item['DATA'][0x20:0x28],
                    'File Size': item['DATA'][0x28:0x30],
                    'MaxVMProt': item['DATA'][0x30:0x34],
                    'InitalVMProt': item['DATA'][0x34:0x38],
                    'NumberOfSections': item['DATA'][0x38:0x3C],
                    'Flags': item['DATA'][0x3c:0x40]
                }
                count = struct.unpack("<I", text_segment['NumberOfSections'])[0]
                i = 0
                while count > 0:

                    if '__text' in item['DATA'][0x40 + i:0x50 + i]:
                        text_section = {
                            'sectionName': item['DATA'][0x40 + i:0x50 + i],
                            'segmentName': item['DATA'][0x50 + i:0x60 + i],
                            'Address': item['DATA'][0x60 + i:0x68 + i],
                            'LOCAddress': locationInFIle + 0x60 + i,
                            'Size': item['DATA'][0x68 + i:0x70 + i],
                            'LOCTextSize': locationInFIle + 0x68 + i,
                            'Offset': item['DATA'][0x70 + i:0x74 + i],
                            'LocTextOffset': locationInFIle + 0x70 + i,
                            'Alignment': item['DATA'][0x74 + i:0x78 + i],
                            'Relocations': item['DATA'][0x78 + i:0x7c + i],
                            'NumberOfRelocs': item['DATA'][0x7c + i:0x80 + i],
                            'Flags': item['DATA'][0x80 + i:0x84 + i],
                            'Reserved1': item['DATA'][0x84 + i:0x88 + i],
                            'Reserved2': item['DATA'][0x88 + i:0x8c + i],
                            'Reserved3': item['DATA'][0x8c + i:0x90 + i],
                        }

                        break
                    else:
                        count -= 1
                        i += 0x4c

            if item['Command'] == 0x80000028:
                LC_MAIN = {
                    'LOCEntryOffset': locationInFIle,
                    'EntryOffset': item['DATA'][0x0:0x8],
                    'StackSize': item['DATA'][0x8:0x16]
                }
            elif item['Command'] == 0x00000005 and struct.unpack("<I", item['DATA'][0x0:0x4])[0] == 0x01:
                LC_UNIXTREAD = {
                    'LOCEntryOffset': locationInFIle,
                    'Flavor': item['DATA'][0x00:0x04],
                    'Count': item['DATA'][0x04:0x08],
                    'eax': item['DATA'][0x08:0x0C],
                    'ebx': item['DATA'][0x0C:0x10],
                    'ecx': item['DATA'][0x10:0x14],
                    'edx': item['DATA'][0x14:0x18],
                    'edi': item['DATA'][0x18:0x1C],
                    'esi': item['DATA'][0x1C:0x20],
                    'ebp': item['DATA'][0x20:0x24],
                    'esp': item['DATA'][0x24:0x28],
                    'ss': item['DATA'][0x28:0x2C],
                    'eflags': item['DATA'][0x2C:0x30],
                    'LOCeip': locationInFIle + 0x30,
                    'eip': item['DATA'][0x30:0x34],
                    'cs': item['DATA'][0x34:0x38],
                    'ds': item['DATA'][0x38:0x3C],
                    'es': item['DATA'][0x3C:0x40],
                    'fs': item['DATA'][0x40:0x44],
                    'gs': item['DATA'][0x44:0x48],
                }
            elif item['Command'] == 0x00000005 and struct.unpack("<I", item['DATA'][0x0:0x4])[0] == 0x04:
                LC_UNIXTREAD = {
                    'LOCEntryOffset': locationInFIle,
                    'Flavor': item['DATA'][0x00:0x04],
                    'Count': item['DATA'][0x04:0x08],
                    'rax': item['DATA'][0x08:0x10],
                    'rbx': item['DATA'][0x10:0x18],
                    'rcx': item['DATA'][0x18:0x20],
                    'rdx': item['DATA'][0x20:0x28],
                    'rdi': item['DATA'][0x28:0x30],
                    'rsi': item['DATA'][0x30:0x38],
                    'rbp': item['DATA'][0x38:0x40],
                    'rsp': item['DATA'][0x40:0x48],
                    'r8': item['DATA'][0x48:0x50],
                    'r9': item['DATA'][0x50:0x58],
                    'r10': item['DATA'][0x58:0x60],
                    'r11': item['DATA'][0x60:0x68],
                    'r12': item['DATA'][0x68:0x70],
                    'r13': item['DATA'][0x70:0x78],
                    'r14': item['DATA'][0x78:0x80],
                    'r15': item['DATA'][0x80:0x88],
                    'LOCrip': locationInFIle + 0x88,
                    'rip': item['DATA'][0x88:0x90],
                    'rflags': item['DATA'][0x90:0x98],
                    'cs': item['DATA'][0x98:0xA0],
                    'fs': item['DATA'][0xA0:0xA8],
                    'gs': item['DATA'][0xA8:0xB0],
                }

            if item['Command'] == 0x000001D:
                LC_CODE_SIGNATURE = {
                    'Data Offset': item['DATA'][0x0:0x4],
                    'Data Size': item['DATA'][0x0:0x8],
                }

            if item['Command'] == 0x0000002B:
                LC_DYLIB_CODE_SIGN_DRS = {
                    'Data Offset': item['DATA'][0x0:0x4],
                    'Data Size': item['DATA'][0x0:0x8],
                }

            if item['last_cmd'] > last_cmd:
                last_cmd = item['last_cmd']

        _tempDict = {'text_segment': text_segment, 'text_section': text_section,
                     'LC_MAIN': LC_MAIN, 'LC_UNIXTREAD': LC_UNIXTREAD,
                     'LC_CODE_SIGNATURE': LC_CODE_SIGNATURE,
                     'LC_DYLIB_CODE_SIGN_DRS': LC_DYLIB_CODE_SIGN_DRS,
                     'last_cmd': last_cmd
                     }

        return _tempDict

Example 25

Project: vivisect
Source File: parsers.py
View license
def p_01(va, val, buf, off, tsize):
    mnem = None
    iflags = 0
    opers = None

    diff = (val>>4) & 0xf

    if diff == 8:
        # sleep
        op = 0x0180
        mnem = 'sleep'
        opers = tuple()
        return op, mnem, opers, iflags, 2

    val2, = struct.unpack('>H', buf[off+2: off+4])
    isz = 4
    op = (val << 9) | (val2 >> 7)
    if diff == 0:
        mnem = 'mov'

        # all 0100#### opcodes share these:
        tsize = 4
        iflags |= IF_L

        d2 = val2>>8

        # mov   0100##... where ## is basically another mov encoding with different register sizes
        if d2 == 0x69:
            erd = (val2>>4) & 7
            ers = val2 & 7
            if val2 & 0x80:
                opers = (
                        H8RegDirOper(ers, tsize, va),
                        H8RegIndirOper(erd, tsize, va),
                        )
            else:
                opers = (
                        H8RegIndirOper(erd, tsize, va),
                        H8RegDirOper(ers, tsize, va),
                        )

        elif d2 == 0x6b:
            if val2 & 0x20:
                isz = 8
                val3, = struct.unpack('>I', buf[off+4:off+8])
                if val2 & 0x80:
                    # a
                    erd = val2 & 7
                    aa  = val3 & 0xffffffff
                    opers = (
                            H8RegDirOper(erd, tsize, va),
                            H8AbsAddrOper(aa, tsize, aasize=4),
                            )
                else:
                    # 2
                    ers = val2 & 7
                    aa  = val3 & 0xffffffff
                    opers = (
                            H8AbsAddrOper(aa, tsize, aasize=4),
                            H8RegDirOper(ers, tsize, va),
                            )
            else:
                val3, = struct.unpack('>H', buf[off+4:off+6])
                isz = 6
                if val2 & 0x80:
                    # 8
                    erd = val2 & 7
                    aa  = val3 & 0xffff
                    opers = (
                            H8RegDirOper(erd, tsize, va),
                            H8AbsAddrOper(aa, tsize, aasize=2),
                            )
                else:
                    # 0
                    ers = val2 & 7
                    aa  = val3 & 0xffff
                    opers = (
                            H8AbsAddrOper(aa, tsize, aasize=2),
                            H8RegDirOper(ers, tsize, va),
                            )

        elif d2 == 0x6d:    # TODO: test me!!
            newop, mnem, opers, iflags, nisz = p_6c_6d_0100(va, val2, buf, off+2, 4)
            isz = nisz + 2
            op = newop | (0x01000000)

        elif d2 == 0x6f:
            disp, = struct.unpack('>H', buf[off+4:off+6])
            isz = 6
            er0 = val2 & 7
            er1 = (val2>>4) & 7
            if val2 & 0x80:
                # mov.l ERs, @(d:16,ERd)
                opers = (
                        H8RegDirOper(er0, tsize, va),
                        H8RegIndirOper(er1, tsize, va, disp, dispsz=2),
                        )
            else:
                # mov.l @(d:16,ERs), ERd
                opers = (
                        H8RegIndirOper(er1, tsize, va, disp, dispsz=2),
                        H8RegDirOper(er0, tsize, va),
                        )

        elif d2 == 0x78:
            isz = 10
            val3, disp = struct.unpack('>HI', buf[off+4:off+10])
            if val3 & 0xff20 != 0x6b20: raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)

            er0 = val3 & 7
            er1 = (val2>>4) & 7
            if (val3 & 0x80):
                # mov.l ERs, @(d:24,ERd)
                opers = (
                        H8RegDirOper(er0, tsize, va),
                        H8RegIndirOper(er1, tsize, va, disp, dispsz=4),
                        )

            else:
                # mov.l @(d:24,ERs), ERd
                opers = (
                        H8RegIndirOper(er1, tsize, va, disp, dispsz=4),
                        H8RegDirOper(er0, tsize, va),
                        )

    elif diff in (1, 2, 3):
        # ldm/stm (ERn-ERn+diff), @-SP
        iflags = IF_L

        tsize = 4
        optest = val2 & 0xfff8
        rn = val2 & 0x7

        rcount = diff + 1
        if optest == 0x6df0:
            mnem = 'stm'
            opers = (
                    H8RegMultiOper(rn, rcount),
                    H8RegIndirOper(REG_SP, tsize, va, 0, oflags=OF_PREDEC),
                    )

        elif optest == 0x6d70:
            mnem = 'ldm'
            opers = (
                    H8RegIndirOper(REG_SP, tsize, va, 0, oflags=OF_POSTINC),
                    H8RegMultiOper(rn-diff, rcount),
                    )

        else:
            raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)


    elif diff == 4:
        # ldc/stc - anything that touches ccr or exr
        # we'll build it for ldc, and reverse it if it's stc
        d2 = val2>>8
        isStc = (val2>>7) & 1
        oflags = 0
        tsize = 2

        exr = val & 0x1

        if d2 == 6:
            op, nmnem, opers, iflags, nisz =  p_i8_CCR(va, val2, buf, off, tsize, exr)
            return op, 'andc', opers, iflags, isz

        elif d2 == 5:
            op, nmnem, opers, iflags, nisz =  p_i8_CCR(va, val2, buf, off, tsize, exr)
            return op, 'xorc', opers, iflags, isz

        else:
            iflags = IF_W
            tsize = 2

            if d2 == 0x04:              ##xx:8, EXR
                op, nmnem, opers, iflags, nisz =  p_i8_CCR(va, val2, buf, off, tsize, exr)
                return op, 'orc', opers, iflags, isz

            elif d2 == 0x07:              ##xx:8, EXR
                op, nmnem, opers, niflags, nisz =  p_i8_CCR(va, val2, buf, off, tsize, exr)
                iflags = IF_B
                return op, 'ldc', opers, iflags, isz

            
            elif d2 in (0x69, 0x6d):    #@ERs,CCR / @ERs+,CCR
                if d2 == 0x6d:
                    oflags = OF_POSTINC
                ers = (val2>>4) & 0x7
                opers = (
                        H8RegIndirOper(ers, tsize, va, oflags=oflags),
                        H8RegDirOper(REG_CCR + exr, 4, va)
                        )

            elif d2 in (0x6f, 0x78):  #@(d:16,ERs),CCR / @(d:24,ERs)
                if d2 == 0x78:
                    val3, disp = struct.unpack('>HI', buf[off+4:off+10])
                    isStc = (val3>>7) & 1
                    isz = 10
                    dispsz = 4
                else:
                    disp, = struct.unpack('>H', buf[off+4:off+6])
                    isz = 6
                    dispsz = 2
                ers = (val2>>4) & 0x7
                opers = (
                        H8RegIndirOper(ers, tsize, va, disp, dispsz),
                        H8RegDirOper(REG_CCR + exr, 4, va)
                        )

            elif d2 == 0x6b:    #@aa:16,CCR / @aa:24,CCR
                if val2 & 0x20:
                    aa, = struct.unpack('>I', buf[off+4:off+8])
                    isz = 8
                    aasize = 4
                else:
                    aa, = struct.unpack('>H', buf[off+4:off+6])
                    isz = 6
                    aasize = 2
                isStc = (val2>>7) & 1
                opers = (
                        H8AbsAddrOper(aa, tsize, aasize),
                        H8RegDirOper(REG_CCR + exr, 4, va)
                        )

            # after all the decisions...
            mnem = ('ldc','stc')[isStc]
            if isStc:
                opers = opers[::-1]

    elif diff == 0xc:
        if val2 & 0xfd00 == 0x5000:
            # mulxs
            mnem = 'mulxs'
            op, nmnem, opers, iflags, nisz =  p_Rs_Rd_4b(va, val, buf, off, tsize=1)
        else:
            raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)

    elif diff == 0xd:
        if val2 & 0xfd00 == 0x5100:
            mnem = 'divxs'
            # divxs
            op, nmnem, opers, iflags, nisz =  p_Rs_Rd_4b(va, val, buf, off, tsize)
        else:
            raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)

    elif diff == 0xe:
        if val2 & 0xff00 == 0x7b00:
            mnem = 'tas'        # FIXME: check out what this decodes to
            tsize = 1
            erd = (val2 >> 4) & 7
            opers = (
                    H8RegIndirOper(erd, tsize, va, oflags=0),
                    )

        else:
            raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)

    elif diff == 0xf:
        if val2 & 0xfc00 == 0x6400:
            # or/xor/and
            nop, nmnem, opers, iflags, nisz = p_ERs_ERd(va, val2, buf, off, tsize=4)
            op = (val << 8) | (val2 >> 8)
            mnembits = (val2 >> 8) & 3
            mnem = ('or', 'xor', 'and')[mnembits]
        else:
            raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)

    else:
        raise envi.InvalidInstruction(bytez=buf[off:off+16], va=va)

    return (op, mnem, opers, iflags, isz)

Example 26

Project: script.tv.show.next.aired
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 27

Project: fixofx
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % leapcnt*2,
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            tti = _ttinfo()
            tti.offset = ttinfo[i][0]
            tti.delta = datetime.timedelta(seconds=ttinfo[i][0])
            tti.isdst = ttinfo[i][1]
            tti.abbr = abbr[ttinfo[i][2]:abbr.find('\x00', ttinfo[i][2])]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 28

Project: gfw-api
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 29

Project: Sick-Beard-TPB
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 30

Project: Tickeys-linux
Source File: hidinput.py
View license
        def _thread_run(self, **kwargs):
            input_fn = kwargs.get('input_fn')
            queue = kwargs.get('queue')
            device = kwargs.get('device')
            drs = kwargs.get('default_ranges').get
            touches = {}
            touches_sent = []
            point = {}
            l_points = []

            # prepare some vars to get limit of some component
            range_min_position_x = 0
            range_max_position_x = 2048
            range_min_position_y = 0
            range_max_position_y = 2048
            range_min_pressure = 0
            range_max_pressure = 255
            range_min_abs_x = 0
            range_max_abs_x = 255
            range_min_abs_y = 0
            range_max_abs_y = 255
            range_min_abs_pressure = 0
            range_max_abs_pressure = 255
            invert_x = int(bool(drs('invert_x', 0)))
            invert_y = int(bool(drs('invert_y', 1)))
            rotation = drs('rotation', 0)

            def assign_coord(point, value, invert, coords):
                cx, cy = coords
                if invert:
                    value = 1. - value
                if rotation == 0:
                    point[cx] = value
                elif rotation == 90:
                    point[cy] = value
                elif rotation == 180:
                    point[cx] = 1. - value
                elif rotation == 270:
                    point[cy] = 1. - value

            def assign_rel_coord(point, value, invert, coords):
                cx, cy = coords
                if invert:
                    value = -1 * value
                if rotation == 0:
                    point[cx] += value
                elif rotation == 90:
                    point[cy] += value
                elif rotation == 180:
                    point[cx] += -value
                elif rotation == 270:
                    point[cy] += -value

            def process_as_multitouch(tv_sec, tv_usec, ev_type,
                                      ev_code, ev_value):
                # sync event
                if ev_type == EV_SYN:
                    if ev_code == SYN_MT_REPORT:
                        if 'id' not in point:
                            return
                        l_points.append(point.copy())
                    elif ev_code == SYN_REPORT:
                        process(l_points)
                        del l_points[:]

                elif ev_type == EV_MSC and ev_code in (MSC_RAW, MSC_SCAN):
                    pass

                else:
                    # compute multitouch track
                    if ev_code == ABS_MT_TRACKING_ID:
                        point.clear()
                        point['id'] = ev_value
                    elif ev_code == ABS_MT_POSITION_X:
                        val = normalize(ev_value,
                                        range_min_position_x,
                                        range_max_position_x)
                        assign_coord(point, val, invert_x, 'xy')
                    elif ev_code == ABS_MT_POSITION_Y:
                        val = 1. - normalize(ev_value,
                                             range_min_position_y,
                                             range_max_position_y)
                        assign_coord(point, val, invert_y, 'yx')
                    elif ev_code == ABS_MT_ORIENTATION:
                        point['orientation'] = ev_value
                    elif ev_code == ABS_MT_BLOB_ID:
                        point['blobid'] = ev_value
                    elif ev_code == ABS_MT_PRESSURE:
                        point['pressure'] = normalize(ev_value,
                                                      range_min_pressure,
                                                      range_max_pressure)
                    elif ev_code == ABS_MT_TOUCH_MAJOR:
                        point['size_w'] = ev_value
                    elif ev_code == ABS_MT_TOUCH_MINOR:
                        point['size_h'] = ev_value

            def process_as_mouse_or_keyboard(
                tv_sec, tv_usec, ev_type, ev_code, ev_value):

                if ev_type == EV_SYN:
                    if ev_code == SYN_REPORT:
                        process([point])
                elif ev_type == EV_REL:
                    if ev_code == 0:
                        assign_rel_coord(point,
                            min(1., max(-1., ev_value / 1000.)),
                            invert_x, 'xy')
                    elif ev_code == 1:
                        assign_rel_coord(point,
                            min(1., max(-1., ev_value / 1000.)),
                            invert_y, 'yx')
                elif ev_code == ABS_X:
                    val = normalize(ev_value,
                                    range_min_abs_x,
                                    range_max_abs_x)
                    assign_coord(point, val, invert_x, 'xy')
                elif ev_code == ABS_Y:
                    val = 1. - normalize(ev_value,
                                         range_min_abs_y,
                                         range_max_abs_y)
                    assign_coord(point, val, invert_y, 'yx')
                elif ev_code == ABS_PRESSURE:
                    point['pressure'] = normalize(ev_value,
                                                  range_min_abs_pressure,
                                                  range_max_abs_pressure)
                elif ev_type == EV_KEY:
                    buttons = {
                        272: 'left',
                        273: 'right',
                        274: 'middle',
                        275: 'side',
                        276: 'extra',
                        277: 'forward',
                        278: 'back',
                        279: 'task',
                        330: 'touch',
                        320: 'pen'}

                    if ev_code in buttons.keys():
                        if ev_value:
                            if 'button' not in point:
                                point['button'] = buttons[ev_code]
                                point['id'] += 1
                                if '_avoid' in point:
                                    del point['_avoid']
                        elif 'button' in point:
                            if point['button'] == buttons[ev_code]:
                                del point['button']
                                point['id'] += 1
                                point['_avoid'] = True
                    else:
                        if ev_value == 1:
                            l = keyboard_keys[ev_code][-1
                                if 'shift' in Window._modifiers else 0]
                            if l == 'shift' or l == 'alt':
                                Window._modifiers.append(l)
                            Window.dispatch(
                                'on_key_down',
                                Keyboard.keycodes[l.lower()],
                                ev_code, keys_str.get(l, l),
                                Window._modifiers)
                        if ev_value == 0:
                            l = keyboard_keys[ev_code][-1
                                if 'shift' in Window._modifiers else 0]
                            Window.dispatch(
                                'on_key_up',
                                Keyboard.keycodes[l.lower()],
                                ev_code,
                                keys_str.get(l, l),
                                Window._modifiers)
                            if l == 'shift':
                                Window._modifiers.remove('shift')
                        # if ev_value == 2:
                        #     Window.dispatch('on_key_down', ev_code)

            def process(points):
                if not is_multitouch:
                    Window.mouse_pos = (
                        points[0]['x'] * Window.width,
                        points[0]['y'] * Window.height)

                actives = [args['id']
                           for args in points
                           if 'id' in args and not '_avoid' in args]
                for args in points:
                    tid = args['id']
                    try:
                        touch = touches[tid]
                        if touch.sx == args['x'] and touch.sy == args['y']:
                            continue
                        touch.move(args)
                        if tid not in touches_sent:
                            queue.append(('begin', touch))
                            touches_sent.append(tid)
                        queue.append(('update', touch))
                    except KeyError:
                        if '_avoid' not in args:
                            touch = HIDMotionEvent(device, tid, args)
                            touches[touch.id] = touch
                            if tid not in touches_sent:
                                queue.append(('begin', touch))
                                touches_sent.append(tid)

                for tid in list(touches.keys())[:]:
                    if tid not in actives:
                        touch = touches[tid]
                        if tid in touches_sent:
                            touch.update_time_end()
                            queue.append(('end', touch))
                            touches_sent.remove(tid)
                        del touches[tid]

            def normalize(value, vmin, vmax):
                return (value - vmin) / float(vmax - vmin)

            # open the input
            fd = open(input_fn, 'rb')

            # get the controler name (EVIOCGNAME)
            device_name = str(fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
                                      " " * 256)).split('\x00')[0]
            Logger.info('HIDMotionEvent: using <%s>' % device_name)

            # get abs infos
            bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
            bit, = struct.unpack('Q', bit)
            is_multitouch = False
            for x in range(EV_MAX):
                # preserve this, we may want other things than EV_ABS
                if x != EV_ABS:
                    continue
                # EV_ABS available for this device ?
                if (bit & (1 << x)) == 0:
                    continue
                # ask abs info keys to the devices
                sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
                                   ' ' * sz_l)
                sbit, = struct.unpack('Q', sbit)
                for y in range(KEY_MAX):
                    if (sbit & (1 << y)) == 0:
                        continue
                    absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
                                          (struct_input_absinfo_sz << 16),
                                          ' ' * struct_input_absinfo_sz)
                    abs_value, abs_min, abs_max, abs_fuzz, \
                        abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
                    if y == ABS_MT_POSITION_X:
                        is_multitouch = True
                        range_min_position_x = drs('min_position_x', abs_min)
                        range_max_position_x = drs('max_position_x', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range position X is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_MT_POSITION_Y:
                        is_multitouch = True
                        range_min_position_y = drs('min_position_y', abs_min)
                        range_max_position_y = drs('max_position_y', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range position Y is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_MT_PRESSURE:
                        range_min_pressure = drs('min_pressure', abs_min)
                        range_max_pressure = drs('max_pressure', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_X:
                        range_min_abs_x = drs('min_abs_x', abs_min)
                        range_max_abs_x = drs('max_abs_x', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS X position is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_Y:
                        range_min_abs_y = drs('min_abs_y', abs_min)
                        range_max_abs_y = drs('max_abs_y', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS Y position is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_PRESSURE:
                        range_min_abs_pressure = drs(
                            'min_abs_pressure', abs_min)
                        range_max_abs_pressure = drs(
                            'max_abs_pressure', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))

            # init the point
            if not is_multitouch:
                point = {'x': .5, 'y': .5, 'id': 0, '_avoid': True}

            # read until the end
            while fd:

                data = fd.read(struct_input_event_sz)
                if len(data) < struct_input_event_sz:
                    break

                # extract each event
                for i in range(int(len(data) / struct_input_event_sz)):
                    ev = data[i * struct_input_event_sz:]

                    # extract timeval + event infos
                    infos = struct.unpack('LLHHi', ev[:struct_input_event_sz])

                    if is_multitouch:
                        process_as_multitouch(*infos)
                    else:
                        process_as_mouse_or_keyboard(*infos)

Example 31

Project: Tickeys-linux
Source File: hidinput.py
View license
        def _thread_run(self, **kwargs):
            input_fn = kwargs.get('input_fn')
            queue = kwargs.get('queue')
            device = kwargs.get('device')
            drs = kwargs.get('default_ranges').get
            touches = {}
            touches_sent = []
            point = {}
            l_points = []

            # prepare some vars to get limit of some component
            range_min_position_x = 0
            range_max_position_x = 2048
            range_min_position_y = 0
            range_max_position_y = 2048
            range_min_pressure = 0
            range_max_pressure = 255
            range_min_abs_x = 0
            range_max_abs_x = 255
            range_min_abs_y = 0
            range_max_abs_y = 255
            range_min_abs_pressure = 0
            range_max_abs_pressure = 255
            invert_x = int(bool(drs('invert_x', 0)))
            invert_y = int(bool(drs('invert_y', 1)))
            rotation = drs('rotation', 0)

            def assign_coord(point, value, invert, coords):
                cx, cy = coords
                if invert:
                    value = 1. - value
                if rotation == 0:
                    point[cx] = value
                elif rotation == 90:
                    point[cy] = value
                elif rotation == 180:
                    point[cx] = 1. - value
                elif rotation == 270:
                    point[cy] = 1. - value

            def assign_rel_coord(point, value, invert, coords):
                cx, cy = coords
                if invert:
                    value = -1 * value
                if rotation == 0:
                    point[cx] += value
                elif rotation == 90:
                    point[cy] += value
                elif rotation == 180:
                    point[cx] += -value
                elif rotation == 270:
                    point[cy] += -value

            def process_as_multitouch(tv_sec, tv_usec, ev_type,
                                      ev_code, ev_value):
                # sync event
                if ev_type == EV_SYN:
                    if ev_code == SYN_MT_REPORT:
                        if 'id' not in point:
                            return
                        l_points.append(point.copy())
                    elif ev_code == SYN_REPORT:
                        process(l_points)
                        del l_points[:]

                elif ev_type == EV_MSC and ev_code in (MSC_RAW, MSC_SCAN):
                    pass

                else:
                    # compute multitouch track
                    if ev_code == ABS_MT_TRACKING_ID:
                        point.clear()
                        point['id'] = ev_value
                    elif ev_code == ABS_MT_POSITION_X:
                        val = normalize(ev_value,
                                        range_min_position_x,
                                        range_max_position_x)
                        assign_coord(point, val, invert_x, 'xy')
                    elif ev_code == ABS_MT_POSITION_Y:
                        val = 1. - normalize(ev_value,
                                             range_min_position_y,
                                             range_max_position_y)
                        assign_coord(point, val, invert_y, 'yx')
                    elif ev_code == ABS_MT_ORIENTATION:
                        point['orientation'] = ev_value
                    elif ev_code == ABS_MT_BLOB_ID:
                        point['blobid'] = ev_value
                    elif ev_code == ABS_MT_PRESSURE:
                        point['pressure'] = normalize(ev_value,
                                                      range_min_pressure,
                                                      range_max_pressure)
                    elif ev_code == ABS_MT_TOUCH_MAJOR:
                        point['size_w'] = ev_value
                    elif ev_code == ABS_MT_TOUCH_MINOR:
                        point['size_h'] = ev_value

            def process_as_mouse_or_keyboard(
                tv_sec, tv_usec, ev_type, ev_code, ev_value):

                if ev_type == EV_SYN:
                    if ev_code == SYN_REPORT:
                        process([point])
                elif ev_type == EV_REL:
                    if ev_code == 0:
                        assign_rel_coord(point,
                            min(1., max(-1., ev_value / 1000.)),
                            invert_x, 'xy')
                    elif ev_code == 1:
                        assign_rel_coord(point,
                            min(1., max(-1., ev_value / 1000.)),
                            invert_y, 'yx')
                elif ev_code == ABS_X:
                    val = normalize(ev_value,
                                    range_min_abs_x,
                                    range_max_abs_x)
                    assign_coord(point, val, invert_x, 'xy')
                elif ev_code == ABS_Y:
                    val = 1. - normalize(ev_value,
                                         range_min_abs_y,
                                         range_max_abs_y)
                    assign_coord(point, val, invert_y, 'yx')
                elif ev_code == ABS_PRESSURE:
                    point['pressure'] = normalize(ev_value,
                                                  range_min_abs_pressure,
                                                  range_max_abs_pressure)
                elif ev_type == EV_KEY:
                    buttons = {
                        272: 'left',
                        273: 'right',
                        274: 'middle',
                        275: 'side',
                        276: 'extra',
                        277: 'forward',
                        278: 'back',
                        279: 'task',
                        330: 'touch',
                        320: 'pen'}

                    if ev_code in buttons.keys():
                        if ev_value:
                            if 'button' not in point:
                                point['button'] = buttons[ev_code]
                                point['id'] += 1
                                if '_avoid' in point:
                                    del point['_avoid']
                        elif 'button' in point:
                            if point['button'] == buttons[ev_code]:
                                del point['button']
                                point['id'] += 1
                                point['_avoid'] = True
                    else:
                        if ev_value == 1:
                            l = keyboard_keys[ev_code][-1
                                if 'shift' in Window._modifiers else 0]
                            if l == 'shift' or l == 'alt':
                                Window._modifiers.append(l)
                            Window.dispatch(
                                'on_key_down',
                                Keyboard.keycodes[l.lower()],
                                ev_code, keys_str.get(l, l),
                                Window._modifiers)
                        if ev_value == 0:
                            l = keyboard_keys[ev_code][-1
                                if 'shift' in Window._modifiers else 0]
                            Window.dispatch(
                                'on_key_up',
                                Keyboard.keycodes[l.lower()],
                                ev_code,
                                keys_str.get(l, l),
                                Window._modifiers)
                            if l == 'shift':
                                Window._modifiers.remove('shift')
                        # if ev_value == 2:
                        #     Window.dispatch('on_key_down', ev_code)

            def process(points):
                if not is_multitouch:
                    Window.mouse_pos = (
                        points[0]['x'] * Window.width,
                        points[0]['y'] * Window.height)

                actives = [args['id']
                           for args in points
                           if 'id' in args and not '_avoid' in args]
                for args in points:
                    tid = args['id']
                    try:
                        touch = touches[tid]
                        if touch.sx == args['x'] and touch.sy == args['y']:
                            continue
                        touch.move(args)
                        if tid not in touches_sent:
                            queue.append(('begin', touch))
                            touches_sent.append(tid)
                        queue.append(('update', touch))
                    except KeyError:
                        if '_avoid' not in args:
                            touch = HIDMotionEvent(device, tid, args)
                            touches[touch.id] = touch
                            if tid not in touches_sent:
                                queue.append(('begin', touch))
                                touches_sent.append(tid)

                for tid in list(touches.keys())[:]:
                    if tid not in actives:
                        touch = touches[tid]
                        if tid in touches_sent:
                            touch.update_time_end()
                            queue.append(('end', touch))
                            touches_sent.remove(tid)
                        del touches[tid]

            def normalize(value, vmin, vmax):
                return (value - vmin) / float(vmax - vmin)

            # open the input
            fd = open(input_fn, 'rb')

            # get the controler name (EVIOCGNAME)
            device_name = str(fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
                                      " " * 256)).split('\x00')[0]
            Logger.info('HIDMotionEvent: using <%s>' % device_name)

            # get abs infos
            bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
            bit, = struct.unpack('Q', bit)
            is_multitouch = False
            for x in range(EV_MAX):
                # preserve this, we may want other things than EV_ABS
                if x != EV_ABS:
                    continue
                # EV_ABS available for this device ?
                if (bit & (1 << x)) == 0:
                    continue
                # ask abs info keys to the devices
                sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
                                   ' ' * sz_l)
                sbit, = struct.unpack('Q', sbit)
                for y in range(KEY_MAX):
                    if (sbit & (1 << y)) == 0:
                        continue
                    absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
                                          (struct_input_absinfo_sz << 16),
                                          ' ' * struct_input_absinfo_sz)
                    abs_value, abs_min, abs_max, abs_fuzz, \
                        abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
                    if y == ABS_MT_POSITION_X:
                        is_multitouch = True
                        range_min_position_x = drs('min_position_x', abs_min)
                        range_max_position_x = drs('max_position_x', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range position X is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_MT_POSITION_Y:
                        is_multitouch = True
                        range_min_position_y = drs('min_position_y', abs_min)
                        range_max_position_y = drs('max_position_y', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range position Y is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_MT_PRESSURE:
                        range_min_pressure = drs('min_pressure', abs_min)
                        range_max_pressure = drs('max_pressure', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_X:
                        range_min_abs_x = drs('min_abs_x', abs_min)
                        range_max_abs_x = drs('max_abs_x', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS X position is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_Y:
                        range_min_abs_y = drs('min_abs_y', abs_min)
                        range_max_abs_y = drs('max_abs_y', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS Y position is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_PRESSURE:
                        range_min_abs_pressure = drs(
                            'min_abs_pressure', abs_min)
                        range_max_abs_pressure = drs(
                            'max_abs_pressure', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))

            # init the point
            if not is_multitouch:
                point = {'x': .5, 'y': .5, 'id': 0, '_avoid': True}

            # read until the end
            while fd:

                data = fd.read(struct_input_event_sz)
                if len(data) < struct_input_event_sz:
                    break

                # extract each event
                for i in range(int(len(data) / struct_input_event_sz)):
                    ev = data[i * struct_input_event_sz:]

                    # extract timeval + event infos
                    infos = struct.unpack('LLHHi', ev[:struct_input_event_sz])

                    if is_multitouch:
                        process_as_multitouch(*infos)
                    else:
                        process_as_mouse_or_keyboard(*infos)

Example 32

Project: Tickeys-linux
Source File: linuxwacom.py
View license
        def _thread_run(self, **kwargs):
            input_fn = kwargs.get('input_fn')
            queue = kwargs.get('queue')
            device = kwargs.get('device')
            drs = kwargs.get('default_ranges').get
            touches = {}
            touches_sent = []
            l_points = {}

            # prepare some vars to get limit of some component
            range_min_position_x = 0
            range_max_position_x = 2048
            range_min_position_y = 0
            range_max_position_y = 2048
            range_min_pressure = 0
            range_max_pressure = 255
            invert_x = int(bool(drs('invert_x', 0)))
            invert_y = int(bool(drs('invert_y', 0)))
            reset_touch = False

            def process(points):
                actives = list(points.keys())
                for args in points.values():
                    tid = args['id']
                    try:
                        touch = touches[tid]
                    except KeyError:
                        touch = LinuxWacomMotionEvent(device, tid, args)
                        touches[touch.id] = touch
                    if touch.sx == args['x'] \
                            and touch.sy == args['y'] \
                            and tid in touches_sent:
                        continue
                    touch.move(args)
                    if tid not in touches_sent:
                        queue.append(('begin', touch))
                        touches_sent.append(tid)
                    queue.append(('update', touch))

                for tid in list(touches.keys())[:]:
                    if tid not in actives:
                        touch = touches[tid]
                        if tid in touches_sent:
                            touch.update_time_end()
                            queue.append(('end', touch))
                            touches_sent.remove(tid)
                        del touches[tid]

            def normalize(value, vmin, vmax):
                return (value - vmin) / float(vmax - vmin)

            # open the input
            try:
                fd = open(input_fn, 'rb')
            except IOError:
                Logger.exception('Unable to open %s' % input_fn)
                return

            # get the controler name (EVIOCGNAME)
            device_name = fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
                                      " " * 256).split('\x00')[0]
            Logger.info('LinuxWacom: using <%s>' % device_name)

            # get abs infos
            bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
            bit, = struct.unpack('Q', bit)
            for x in range(EV_MAX):
                # preserve this, we may want other things than EV_ABS
                if x != EV_ABS:
                    continue
                # EV_ABS available for this device ?
                if (bit & (1 << x)) == 0:
                    continue
                # ask abs info keys to the devices
                sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
                                   ' ' * sz_l)
                sbit, = struct.unpack('Q', sbit)
                for y in range(KEY_MAX):
                    if (sbit & (1 << y)) == 0:
                        continue
                    absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
                                          (struct_input_absinfo_sz << 16),
                                          ' ' * struct_input_absinfo_sz)
                    abs_value, abs_min, abs_max, abs_fuzz, \
                        abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
                    if y == ABS_X:
                        range_min_position_x = drs('min_position_x', abs_min)
                        range_max_position_x = drs('max_position_x', abs_max)
                        Logger.info('LinuxWacom: ' +
                                    '<%s> range position X is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_Y:
                        range_min_position_y = drs('min_position_y', abs_min)
                        range_max_position_y = drs('max_position_y', abs_max)
                        Logger.info('LinuxWacom: ' +
                                    '<%s> range position Y is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_PRESSURE:
                        range_min_pressure = drs('min_pressure', abs_min)
                        range_max_pressure = drs('max_pressure', abs_max)
                        Logger.info('LinuxWacom: ' +
                                    '<%s> range pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))

            # read until the end
            changed = False
            touch_id = 0
            touch_x = 0
            touch_y = 0
            touch_pressure = 0
            while fd:

                data = fd.read(struct_input_event_sz)
                if len(data) < struct_input_event_sz:
                    break

                # extract each event
                for i in range(len(data) / struct_input_event_sz):
                    ev = data[i * struct_input_event_sz:]

                    # extract timeval + event infos
                    tv_sec, tv_usec, ev_type, ev_code, ev_value = \
                        struct.unpack('LLHHi', ev[:struct_input_event_sz])

                    if ev_type == EV_SYN and ev_code == SYN_REPORT:
                        if touch_id in l_points:
                            p = l_points[touch_id]
                        else:
                            p = dict()
                            l_points[touch_id] = p
                        p['id'] = touch_id
                        if reset_touch is False:
                            p['x'] = touch_x
                            p['y'] = touch_y
                            p['pressure'] = touch_pressure
                        if self.mode == 'pen' \
                                and touch_pressure == 0 \
                                and not reset_touch:
                            del l_points[touch_id]
                        if changed:
                            if not 'x' in p:
                                reset_touch = False
                                continue
                            process(l_points)
                            changed = False
                        if reset_touch:
                            l_points.clear()
                            reset_touch = False
                            process(l_points)
                    elif ev_type == EV_MSC and ev_code == MSC_SERIAL:
                        touch_id = ev_value
                    elif ev_type == EV_ABS and ev_code == ABS_X:
                        val = normalize(ev_value,
                                        range_min_position_x,
                                        range_max_position_x)
                        if invert_x:
                            val = 1. - val
                        touch_x = val
                        changed = True
                    elif ev_type == EV_ABS and ev_code == ABS_Y:
                        val = 1. - normalize(ev_value,
                                             range_min_position_y,
                                             range_max_position_y)
                        if invert_y:
                            val = 1. - val
                        touch_y = val
                        changed = True
                    elif ev_type == EV_ABS and ev_code == ABS_PRESSURE:
                        touch_pressure = normalize(ev_value,
                                                   range_min_pressure,
                                                   range_max_pressure)
                        changed = True
                    elif ev_type == EV_ABS and ev_code == ABS_MISC:
                        if ev_value == 0:
                            reset_touch = True

Example 33

Project: haoide
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, str):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 34

Project: SublimeApex
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, str):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 35

Project: youtify
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 36

Project: amun
Source File: amun_rdp_core.py
View license
	def checkForConferenceRequest(self, data):		
		tpktVersion = struct.unpack('B', data[0])[0]
		tpktReserved = struct.unpack('B', data[1])[0]
		tpktPLengthHigh = struct.unpack('B', data[2])[0]
		tpktPLengthLow = struct.unpack('B', data[3])[0]
		pktLength = struct.unpack('>H', data[2:4])[0]

		x224length = struct.unpack('B', data[4])[0]
		x224type = struct.unpack('B', data[5])[0]
		x224eot = struct.unpack('B', data[6])[0]

		posi = 9	
		bertypeLength = data[9:11]
		if bertypeLength[-1] == '\x01':
			bertypeLength += data[11]
			posi = 12
		elif bertypeLength[-1] == '\x02':
			bertypeLength += data[11:13]
			posi = 13

		callingDomainSelector = data[posi:posi+2]
		if callingDomainSelector[-1] == '\x01':
			callingDomainSelector += data[posi+2]
			posi = posi + 3
		elif callingDomainSelector[-1] == '\x02':
			callingDomainSelector += data[posi+2:posi+4]
			posi = posi + 4
			
		calledDomainSelector = data[posi:posi+2]
		if calledDomainSelector[-1] == '\x01':
			calledDomainSelector += data[posi+2]
			posi = posi + 3
		elif calledDomainSelector[-1] == '\x02': 
			calledDomainSelector += data[posi+2:posi+4]
			posi = posi + 4

		upwardFlag = data[posi:posi+2]
		if upwardFlag[-1] == '\x01':
			upwardFlag += data[posi+2]
			posi = posi + 3
		elif upwardFlag[-1] == '\x02':
			upwardFlag += data[posi+2:posi+4]
			posi = posi + 4
		# is two bytes long
		targetParameters = data[posi:posi+2]
		posi = posi + 2

		maxChannelIds = data[posi:posi+2]
		if maxChannelIds[-1] == '\x01':
			maxChannelIds += data[posi+2]
			posi = posi + 3
		elif maxChannelIds[-1] == '\x02':
			maxChannelIds += data[posi+2:posi+4]
			posi = posi + 4

		maxUserIds = data[posi:posi+2]
		if maxUserIds[-1] == '\x01':
			maxUserIds += data[posi+2]
			posi = posi + 3
		elif maxUserIds[-1] == '\x02':
			maxUserIds += data[posi+2:posi+4]
			posi = posi + 4

		maxTokenIds = data[posi:posi+2]
		if maxTokenIds[-1] == '\x01':
			maxTokenIds += data[posi+2]
			posi = posi + 3
		elif maxTokenIds[-1] == '\x02':
			maxTokenIds += data[posi+2:posi+4]
			posi = posi + 4
	
		numPriorities = data[posi:posi+2]
		if numPriorities[-1] == '\x01':
			numPriorities += data[posi+2]
			posi = posi + 3
		elif numPriorities[-1] == '\x02':
			numPriorities += data[posi+2:posi+4]
			posi = posi + 4

		minThroughput = data[posi:posi+2]
		if minThroughput[-1] == '\x01':
			minThroughput += data[posi+2]
			posi = posi + 3
		elif minThroughput[-1] == '\x02':
			minThroughput += data[posi+2:posi+4]
			posi = posi + 4

		maxHeight = data[posi:posi+2]
		if maxHeight[-1] == '\x01':
			maxHeight += data[posi+2]
			posi = posi + 3
		elif maxHeight[-1] == '\x02':
			maxHeight += data[posi+2:posi+4]
			posi = posi + 4

		maxMCSPDUsize = data[posi:posi+2]
		if maxMCSPDUsize[-1] == '\x01':
			maxMCSPDUsize += data[posi+2]
			posi = posi + 3
		elif maxMCSPDUsize[-1] == '\x02':
			maxMCSPDUsize += data[posi+2:posi+4]
			posi = posi + 4
		
		protocolVersion = data[posi:posi+2]
		if protocolVersion[-1] == '\x01':
			protocolVersion += data[posi+2]
			posi = posi + 3
		elif protocolVersion[-1] == '\x02':
			protocolVersion += data[posi+2:posi+4]
			posi = posi + 4

		if self.debug:
			print "TPKT Header: version = %s" % (tpktVersion)
			print "TPKT Header: Reserved = %s" % (tpktReserved)
			print "TPKT Header: Packet length - high part = %s" % (tpktPLengthHigh)
			print "TPKT Header: Packet length - low part = %s" % (tpktPLengthLow)
			print "Packet Length: %s (%s)" % (pktLength, len(data))
			print "X.224: Length indicator = %s" % (x224length)
			print "X.224: Type = %s (%s)" % (x224type, hex(x224type))
			if hex(x224type)=='0xf0':
				print "\t Data TPDU"
			print "X.224: EOT = %s (%s)" % (x224eot, hex(x224eot))
			if data[8]=='\x65':
				print "MCS_TYPE_CONNECTINITIAL"
			print "BER: Type Length:", [bertypeLength]
			print "Connect-Initial::callingDomainSelector", [callingDomainSelector]
			print "Connect-Initial::calledDomainSelector", [calledDomainSelector]
			print "Connect-Initial::upwardFlag", [upwardFlag]
			print "Connect-Initial::targetParameters", [targetParameters]
			print "DomainParameters::maxChannelIds", [maxChannelIds]
			print "DomainParameters::maxUserIds", [maxUserIds]
			print "DomainParameters::maxTokenIds", [maxTokenIds]
			print "DomainParameters::numPriorities", [numPriorities]
			print "DomainParameters::minThroughput", [minThroughput]
			print "DomainParameters::maxHeight", [maxHeight]
			print "DomainParameters::maxMCSPDUsize", [maxMCSPDUsize]
			print "DomainParameters::protocolVersion", [protocolVersion]
			print [data[posi:]]

		if x224length == 2 and data[7]=='\x7f' and data[8]=='\x65':
			return True
		return False

Example 37

Project: xbmc-addon-tvtumbler
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 38

Project: tic
Source File: mpesm.py
View license
def main():
    BYTES = 500
    NUM_MNEM = 30
    SIG_FILE = "./mpesm.sig"
    THRESHOLD = .85
    VERBOSE = False
    DIR_PROCESSING = False
    signatures = {}
    file_list = []
    nos = 0
    ep = 0
    ep_ava = 0

    parser = ArgumentParser(description="Mnemonic PE Signature Matching")
    parser.add_argument("-n", "--num-mnem",
                        dest="num_mnem", help="Use a lenght of 'n' mnemonics (default: " + str(NUM_MNEM) + ')')
    parser.add_argument("-s", "--signatures",
                        dest="sig_file", help="signature file to use (default: " + SIG_FILE + ')')
    parser.add_argument("-b", "--bytes",
                        dest="bytes", help="Grab and disassemble x bytes from EP, you should only need to change this if you give a super large number for -n (default: " + str(BYTES) + ')')
    parser.add_argument("-t", "--threshold",
                        dest="threshold", help="Display all matches greater than -t supplied similarity (default: " + str(THRESHOLD) + ')')
    parser.add_argument("-v", "--verbose",
                        dest="verbose", help="Verbose output", action='store_true')
    parser.add_argument("file", nargs=1, help='File to analyze')
    args = parser.parse_args()

    if args.sig_file:
        SIG_FILE = args.sig_file
    if args.threshold:
        THRESHOLD = float(args.threshold)
    if args.bytes:
        BYTES = args.bytes
    if args.num_mnem:
        NUM_MNEM = args.num_mnem
    if args.verbose:
        VERBOSE = True

    config = ConfigParser.RawConfigParser()
    config.read(SIG_FILE)

    if len(config.sections()) == 0:
        print "Error Reading from config file: %s, it's either empty or not present" %(SIG_FILE)
        sys.exit(1)
    for s in config.sections():
        signatures[s] = {}
        signatures[s]['mnemonics'] = config.get(s, 'mnemonics').split(',')
        if config.has_option(s, 'num_mnemonics'):
            signatures[s]['num_mnemonics'] = config.getint(s, 'num_mnemonics')
        if config.has_option(s, 'major_linker'):
            signatures[s]['major_linker'] = config.getint(s, 'major_linker')
        if config.has_option(s, 'minor_linker'):
            signatures[s]['minor_linker'] = config.getint(s, 'minor_linker')
        if config.has_option(s, 'numberofsections'):
            signatures[s]['numberofsections'] = config.getint(s, 'numberofsections')

    if os.path.isdir(args.file[0]):
        file_list = glob.glob(args.file[0]+'/*')
        DIR_PROCESSING = True
    else:
        file_list.append(args.file[0])

    for f in file_list:
        file_type = None
        if VERBOSE:
            print '[*] Processing: ' + f
        try:
            fe = pefile.PE(f)
            file_type = 'PE'
        except Exception as e:
            if VERBOSE:
                sys.stderr.write("[*] Error with %s - %s\n" %(f, str(e)))


        if not file_type:
            try:
                fe = macholib.MachO.MachO(f)
                file_type = 'MACHO'

            except Exception as e:
                if VERBOSE:
                    sys.stderr.write("[*] Error with %s - %s\n" %(f, str(e)))

        if not file_type:
            sys.stderr.write("[*] Error with %s - not a PE or Mach-O\n" % f)



        if file_type == 'PE':
            try:
                minor_linker = 0
                major_linker = 0
                try:
                    minor_linker = fe.OPTIONAL_HEADER.MinorLinkerVersion
                    major_linker = fe.OPTIONAL_HEADER.MajorLinkerVersion
                except Exception as e:
                    pass
                if hasattr(fe, 'FILE_HEADER') and hasattr(fe.FILE_HEADER, 'NumberOfSections'):
                    nos = fe.FILE_HEADER.NumberOfSections
                if hasattr(fe, 'OPTIONAL_HEADER') and hasattr(fe.OPTIONAL_HEADER, 'AddressOfEntryPoint'):
                    ep = fe.OPTIONAL_HEADER.AddressOfEntryPoint
                if hasattr(fe, 'OPTIONAL_HEADER') and hasattr(fe.OPTIONAL_HEADER, 'ImageBase') and ep > 0:
                    ep_ava = ep+fe.OPTIONAL_HEADER.ImageBase
                    data = fe.get_memory_mapped_image()[ep:ep+BYTES]
                    #
                    # Determine if the file is 32bit or 64bit
                    #
                    mode = CS_MODE_32
                    if fe.OPTIONAL_HEADER.Magic == 0x20b:
                        mode = CS_MODE_64

                    md = Cs(CS_ARCH_X86, mode)
                    match = []
                    for (address, size, mnemonic, op_str) in md.disasm_lite(data, 0x1000):
                        match.append(mnemonic.encode('utf-8').strip())

                    for s in signatures:
                        m = match
                        sig = signatures[s]['mnemonics']
                        if m and m[0] == sig[0] or THRESHOLD < .7:
                            additional_info = []
                            if 'minor_linker' in signatures[s]:
                                if minor_linker == signatures[s]['minor_linker']:
                                    additional_info.append('Minor Linker Version Match: True')
                                else:
                                    additional_info.append('Minor Linker Version Match: False')
                            if 'major_linker' in signatures[s]:
                                if major_linker == signatures[s]['major_linker']:
                                    additional_info.append('Major Linker Version Match: True')
                                else:
                                    additional_info.append('Major Linker Version Match: False')
                            if 'numberofsections' in signatures[s]:
                                if nos == signatures[s]['numberofsections']:
                                    additional_info.append('Number Of Sections Match: True')
                                else:
                                    additional_info.append('Number Of Sections Match: False')

                            if 'num_mnemonics' in signatures[s]:
                                nm = signatures[s]['num_mnemonics']
                                m = match[:nm]
                                sig = signatures[s]['mnemonics'][:nm]
                            else:
                                m = match[:NUM_MNEM]
                                sig = signatures[s]['mnemonics'][:NUM_MNEM]
                            distance = tapered_levenshtein(sig, m)
                            similarity = 1.0 - distance/float(max(len(sig), len(m)))
                            if similarity > THRESHOLD:
                                if DIR_PROCESSING:
                                    print "[%s] [%s] (Edits: %s | Similarity: %0.3f) (%s)" %(f, s, distance, similarity, ' | '.join(additional_info))
                                else:
                                    print "[%s] (Edits: %s | Similarity: %0.3f) (%s)" %(s, distance, similarity, ' | '.join(additional_info))
                                if VERBOSE:
                                    print "%s\n%s\n" %(sig, m)
            except Exception as e:
                print str(e)
        elif file_type == 'MACHO':
            macho_file = open(f, 'rb')
            macho_data = macho_file.read()
            macho_file.close()
            for header in fe.headers:
                # Limit it to X86
                if header.header.cputype not in [7, 0x01000007]:
                    continue

                # Limit it to Object and Executable files
                if header.header.filetype not in [1, 2]:
                    continue

                magic = int(header.MH_MAGIC)
                offset = int(header.offset)

                all_sections = []
                entrypoint_type = ''
                entrypoint_address = 0
                for cmd in header.commands:
                    load_cmd = cmd[0]
                    cmd_info = cmd[1]
                    cmd_data = cmd[2]
                    cmd_name = load_cmd.get_cmd_name()
                    if cmd_name in ('LC_SEGMENT', 'LC_SEGMENT_64'):
                        for section_data in cmd_data:
                            sd = section_data.describe()
                            all_sections.append(sd)

                    elif cmd_name in ('LC_THREAD', 'LC_UNIXTHREAD'):
                        entrypoint_type = 'old'
                        flavor = int(struct.unpack(header.endian + 'I', cmd_data[0:4])[0])
                        count = int(struct.unpack(header.endian + 'I', cmd_data[4:8])[0])
                        if flavor == 1:
                            entrypoint_address = int(struct.unpack(header.endian + 'I', cmd_data[48:52])[0])
                        elif flavor == 4:
                            entrypoint_address = int(struct.unpack(header.endian + 'Q', cmd_data[136:144])[0])

                    elif cmd_name == 'LC_MAIN':
                        entrypoint_type = 'new'
                        entrypoint_address = cmd_info.describe()['entryoff']

                entrypoint_data = ''
                if entrypoint_type == 'new':
                    entrypoint_offset = offset + entrypoint_address
                    entrypoint_data = macho_data[entrypoint_offset:entrypoint_offset+500]
                elif entrypoint_type == 'old':
                    found_section = False
                    for sec in all_sections:
                        if entrypoint_address >= sec['addr'] and entrypoint_address < (sec['addr'] + sec['size']):
                            found_section = True
                            entrypoint_address = (entrypoint_address - sec['addr']) + sec['offset']
                            break

                    if found_section:
                        entrypoint_offset = offset + entrypoint_address
                        entrypoint_data = macho_data[entrypoint_offset:entrypoint_offset+500]

                mode = CS_MODE_32
                if magic == 0xcffaedfe:
                    mode = CS_MODE_64

                md = Cs(CS_ARCH_X86, mode)
                match = []
                if entrypoint_data:
                    try:
                        for (address, size, mnemonic, op_str) in md.disasm_lite(entrypoint_data, 0x1000):
                            match.append(mnemonic.encode('utf-8').strip())
                    except Exception as e:
                        print str(e)

                    for s in signatures:
                        m = match
                        sig = signatures[s]['mnemonics']
                        if m and m[0] == sig[0] or THRESHOLD < .7:
                            additional_info = []
                            if 'num_mnemonics' in signatures[s]:
                                nm = signatures[s]['num_mnemonics']
                                m = match[:nm]
                                sig = signatures[s]['mnemonics'][:nm]
                            else:
                                m = match[:NUM_MNEM]
                                sig = signatures[s]['mnemonics'][:NUM_MNEM]

                            distance = tapered_levenshtein(sig, m)
                            similarity = 1.0 - distance/float(max(len(sig), len(m)))
                            if similarity > THRESHOLD:
                                if DIR_PROCESSING:
                                    print "[%s] [%s] (Edits: %s | Similarity: %0.3f) (%s)" %(f, s, distance, similarity, ' | '.join(additional_info))
                                else:
                                    print "[%s] (Edits: %s | Similarity: %0.3f) (%s)" %(s, distance, similarity, ' | '.join(additional_info))
                                if VERBOSE:
                                    print "%s\n%s\n" %(sig, m)

Example 39

Project: nzbget-subliminal
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 40

Project: cgstudiomap
Source File: tz.py
View license
    def __init__(self, fileobj, filename=None):
        file_opened_here = False
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
            file_opened_here = True
        elif filename is not None:
            self._filename = filename
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        try:
            if fileobj.read(4).decode() != "TZif":
                raise ValueError("magic not found")

            fileobj.read(16)

            (
                # The number of UTC/local indicators stored in the file.
                ttisgmtcnt,

                # The number of standard/wall indicators stored in the file.
                ttisstdcnt,

                # The number of leap seconds for which data is
                # stored in the file.
                leapcnt,

                # The number of "transition times" for which data
                # is stored in the file.
                timecnt,

                # The number of "local time types" for which data
                # is stored in the file (must not be zero).
                typecnt,

                # The  number  of  characters  of "time zone
                # abbreviation strings" stored in the file.
                charcnt,

            ) = struct.unpack(">6l", fileobj.read(24))

            # The above header is followed by tzh_timecnt four-byte
            # values  of  type long,  sorted  in ascending order.
            # These values are written in ``standard'' byte order.
            # Each is used as a transition time (as  returned  by
            # time(2)) at which the rules for computing local time
            # change.

            if timecnt:
                self._trans_list = struct.unpack(">%dl" % timecnt,
                                                 fileobj.read(timecnt*4))
            else:
                self._trans_list = []

            # Next come tzh_timecnt one-byte values of type unsigned
            # char; each one tells which of the different types of
            # ``local time'' types described in the file is associated
            # with the same-indexed transition time. These values
            # serve as indices into an array of ttinfo structures that
            # appears next in the file.

            if timecnt:
                self._trans_idx = struct.unpack(">%dB" % timecnt,
                                                fileobj.read(timecnt))
            else:
                self._trans_idx = []

            # Each ttinfo structure is written as a four-byte value
            # for tt_gmtoff  of  type long,  in  a  standard  byte
            # order, followed  by a one-byte value for tt_isdst
            # and a one-byte  value  for  tt_abbrind.   In  each
            # structure, tt_gmtoff  gives  the  number  of
            # seconds to be added to UTC, tt_isdst tells whether
            # tm_isdst should be set by  localtime(3),  and
            # tt_abbrind serves  as an index into the array of
            # time zone abbreviation characters that follow the
            # ttinfo structure(s) in the file.

            ttinfo = []

            for i in range(typecnt):
                ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

            abbr = fileobj.read(charcnt).decode()

            # Then there are tzh_leapcnt pairs of four-byte
            # values, written in  standard byte  order;  the
            # first  value  of  each pair gives the time (as
            # returned by time(2)) at which a leap second
            # occurs;  the  second  gives the  total  number of
            # leap seconds to be applied after the given time.
            # The pairs of values are sorted in ascending order
            # by time.

            # Not used, for now
            # if leapcnt:
            #    leap = struct.unpack(">%dl" % (leapcnt*2),
            #                         fileobj.read(leapcnt*8))

            # Then there are tzh_ttisstdcnt standard/wall
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as standard
            # time or wall clock time, and are used when
            # a time zone file is used in handling POSIX-style
            # time zone environment variables.

            if ttisstdcnt:
                isstd = struct.unpack(">%db" % ttisstdcnt,
                                      fileobj.read(ttisstdcnt))

            # Finally, there are tzh_ttisgmtcnt UTC/local
            # indicators, each stored as a one-byte value;
            # they tell whether the transition times associated
            # with local time types were specified as UTC or
            # local time, and are used when a time zone file
            # is used in handling POSIX-style time zone envi-
            # ronment variables.

            if ttisgmtcnt:
                isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                      fileobj.read(ttisgmtcnt))

            # ** Everything has been read **
        finally:
            if file_opened_here:
                fileobj.close()

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 41

Project: nzbToMedia
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 42

Project: bpftools
Source File: gen_dns.py
View license
def gen(params, l3_off=0, ipversion=4, negate=False):
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        prog="%s dns --" % (sys.argv[0]),
        description=r'''

This tool creates a raw Berkeley Packet Filter (BPF) rule that will
match packets which are DNS queries against listed domains. For
example:

  %(prog)s example.com

will print a BPF rule matching all packets that look like a DNS packet
first query being equal to "example.com". Another example:

  %(prog)s *.www.fint.me

will match packets that have a any prefix (subdomain) and exactly
"www.fint.me" as suffix. It will match:

    blah.www.fint.me
    anyanyany.www.fint.me

but it will not match:

   www.fint.me
   blah.blah.www.fint.me

Also, star has a special meaning only if it's a sole part of
subdomain: "*xxx.example.com" is treated as a literal star, so is
"xxx*.example.com". On the other hand "xxx.*.example.com" will have a
wildcard meaning.

Question mark '?' matches exactly one characer. For example this rule:

  %(prog)s fin?.me

will match:

   fint.me, finT.me, finX.me, finZ,me

but will not match:

   finXX.me, fiXX.me, www.finX.me, fin.me

You can create a single rule matching than one domain:

  %(prog)s example.com *.www.fint.me

The "--ignorecase" option will produce BPF bytecode that matches
domains in case insensitive way. Beware, the genrated bytecode will be
significantly longer.

Leading and trailing dots are ignored, this commands are equivalent:

  %(prog)s example.com fint.me
  %(prog)s .example.com fint.me.

A special consideration is given if the suffix is '**' (star
star). This is interperted as "any suffix", for example this:

  %(prog)s example.**

Will match:

   example.com example.de example.co.uk example.anything.whatsoever

But not:

   www.example.com eexample.com

Wildcard matches can specify ranges, for example

  %(prog)s *{4-255}.example.com

will match any subdomains of example.com of 4 and more
characters. Only a syntax with explicit minimum and maximum is
supported.

''')

    parser.add_argument('-i', '--ignorecase', action='store_true',
                        help='match domains in case-insensitive way')
    parser.add_argument('domains', nargs='+',
                        help='DNS domain patterns to match on')

    args = parser.parse_args(params)

    list_of_rules = []

    for domain in args.domains:
        # remove trailing and leading whitespace
        domain = domain.strip().lstrip(".")

        if domain.endswith('**'):
            free_suffix = True
            domain = domain[:-2]
        else:
            free_suffix = False

        if free_suffix and domain.endswith("."):
            exact_free_suffix = True
        else:
            exact_free_suffix = False

        # Ensure the trailing dot
        domain = domain.rstrip(".")
        if not free_suffix:
            domain += '.'

        parts = domain.split(".")
        rule = []
        for i, part in enumerate(parts):
            matchstar = re.match('^[*]({(?P<min>\d+)-(?P<max>\d+)})?$', part)
            part = urllib.unquote(part)

            is_last = len(parts) - 1 == i

            # is_char is used to determine whether a particular byte is
            # a normal char or not. For the domain part length byte we
            # set it to False, or to None to signify that the length
            # should be masked and ignored.
            if is_last and free_suffix and not exact_free_suffix:
                len_is_char = None
            else:
                len_is_char = False

            if matchstar:
                rule.append( (False, matchstar.groupdict()) )
            else:
                rule.append( (True, [(len_is_char, chr(len(part)))] \
                                  + [(True, c) for c in part]) )

        list_of_rules.append( list(merge(rule)) )

    def match_exact(rule, label, last=False):
        mask = []
        for is_char, b in rule:
            if is_char and b == '?':
                mask.append( '\xff' )
            elif is_char and args.ignorecase:
                mask.append( '\x20' )
            elif is_char is None and last:
                # ignore the length of last part if free_suffix
                mask.append( '\xff' )
            else:
                # else, literal matching
                mask.append( '\x00' )
        mask = ''.join(mask)
        s = ''.join(map(lambda (is_char, b): b, rule))
        print "    ; Match: %s %r  mask=%s" % (s.encode('hex'), s, mask.encode('hex'))
        off = 0
        while s:
            if len(s) >= 4:
                m, s = s[:4], s[4:]
                mm, mask = mask[:4], mask[4:]
                m, = struct.unpack('!I', m)
                mm, = struct.unpack('!I', mm)
                print "    ld [x + %i]" % off
                if mm:
                    print "    or #0x%08x" % mm
                    m |= mm
                print "    jneq #0x%08x, %s" % (m, label,)
                off += 4
            elif len(s) >= 2:
                m, s = s[:2], s[2:]
                mm, mask = mask[:2], mask[2:]
                m, = struct.unpack('!H', m)
                mm, = struct.unpack('!H', mm)
                print "    ldh [x + %i]" % off
                if mm:
                    print "    or #0x%04x" % mm
                    m |= mm
                print "    jneq #0x%04x, %s" % (m, label,)
                off += 2
            else:
                m, s = s[:1], s[1:]
                m, = struct.unpack('!B', m)
                mm, mask = mask[:1], mask[1:]
                mm, = struct.unpack('!B', mm)
                print "    ldb [x + %i]" % off
                if mm:
                    print "    or #0x%02x" % mm
                    m |= mm
                print "    jneq #0x%02x, %s" % (m, label,)
                off += 1
        if not last:
            print "    txa"
            print "    add #%i" % (off,)
            print "    tax"

    def match_star(mdict, label):
        mi, ma = mdict['min'], mdict['max']
        if not(mi and ma):
            print "    ; Match: *"
        else:
            mi, ma = int(mi), int(ma)
            print "    ; Match: *{%s-%s}" % (mi, ma)
        print "    ldb [x + 0]"
        if mi or ma:
            if mi == ma and mi > 0 and ma < 255:
                print "    jneq #%s, %s" % (mi, label,)
            else:
                if mi > 0:
                    print "    jlt #%s, %s" % (mi, label,)
                if ma < 255:
                    print "    jgt #%s, %s" % (ma, label,)
        print "    add x"
        print "    add #1"
        print "    tax"

    if ipversion == 4:
        print "    ldx 4*([%i]&0xf)" % (l3_off,)
        print "    ; l3_off(%i) + 8 of udp + 12 of dns" % (l3_off,)
        print "    ld #%i" % (l3_off + 8 + 12) # 8B of udp + 12B of dns header
        print "    add x"
    elif ipversion == 6:
        # assuming first "next header" is UDP
        print "    ld #%i" % (l3_off + 40 + 8 + 12) # 40B of ipv6 + 8B of udp + 12B of dns header

    print "    tax"
    print "    ; a = x = M[0] = offset of first dns query byte"
    print "    %sst M[0]" % ('' if len(list_of_rules) > 1 else '; ',)
    print

    for i, rules in enumerate(list_of_rules):
        print "lb_%i:" % (i,)
        print "    %sldx M[0]" % ('' if i != 0 else '; ')
        for j, rule in enumerate(rules):
            last = (j == len(rules)-1)
            label = 'lb_%i' % (i+1,)
            if not isinstance(rule, dict):
                match_exact(rule, label, last)
            else:
                match_star(rule, label)
        print "    ret #%i" % (65535 if not negate else 0)
        print

    print "lb_%i:" % (i+1,)
    print "    ret #%i" % (0 if not negate else 65535)

    name_parts = []
    for domain in args.domains:
        if domain[0] == '-':
            continue

        domain = domain.strip(".").strip()
        parts = []
        for part in domain.split("."):
            if part == '*':
                parts.append( 'any' )
            else:
                parts.append( ''.join(c if c in ACCEPTABLE_CHARS else 'x'
                                      for c in part) )
        name_parts.append( '_'.join(parts) )
    return '_'.join(name_parts)

Example 43

Project: Sick-Beard
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 44

Project: coinkite-real-time-invoice
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 45

Project: CouchPotatoServer
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, string_types):
            self._filename = fileobj
            fileobj = open(fileobj, 'rb')
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = repr(fileobj)

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 46

Project: CouchPotatoV1
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 47

Project: dateutil
Source File: tz.py
View license
    def _read_tzfile(self, fileobj):
        out = _tzfile()

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).
        if fileobj.read(4).decode() != "TZif":
            raise ValueError("magic not found")

        fileobj.read(16)

        (
            # The number of UTC/local indicators stored in the file.
            ttisgmtcnt,

            # The number of standard/wall indicators stored in the file.
            ttisstdcnt,

            # The number of leap seconds for which data is
            # stored in the file.
            leapcnt,

            # The number of "transition times" for which data
            # is stored in the file.
            timecnt,

            # The number of "local time types" for which data
            # is stored in the file (must not be zero).
            typecnt,

            # The  number  of  characters  of "time zone
            # abbreviation strings" stored in the file.
            charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            out.trans_list = list(struct.unpack(">%dl" % timecnt,
                                                  fileobj.read(timecnt*4)))
        else:
            out.trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.

        if timecnt:
            out.trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            out.trans_idx = []

        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt).decode()

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now (but read anyway for correct file position)
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # Build ttinfo list
        out.ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind = ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = 60 * ((gmtoff + 30) // 60)
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.dstoffset = datetime.timedelta(0)
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            out.ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        out.ttinfo_std = None
        out.ttinfo_dst = None
        out.ttinfo_before = None
        if out.ttinfo_list:
            if not out.trans_list:
                out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
            else:
                for i in range(timecnt-1, -1, -1):
                    tti = out.trans_idx[i]
                    if not out.ttinfo_std and not tti.isdst:
                        out.ttinfo_std = tti
                    elif not out.ttinfo_dst and tti.isdst:
                        out.ttinfo_dst = tti

                    if out.ttinfo_std and out.ttinfo_dst:
                        break
                else:
                    if out.ttinfo_dst and not out.ttinfo_std:
                        out.ttinfo_std = out.ttinfo_dst

                for tti in out.ttinfo_list:
                    if not tti.isdst:
                        out.ttinfo_before = tti
                        break
                else:
                    out.ttinfo_before = out.ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = None
        for i, tti in enumerate(out.trans_idx):
            if not tti.isdst:
                offset = tti.offset
                laststdoffset = offset
            else:
                if laststdoffset is not None:
                    # Store the DST offset as well and update it in the list
                    tti.dstoffset = tti.offset - laststdoffset
                    out.trans_idx[i] = tti

                offset = laststdoffset or 0

            out.trans_list[i] += offset

        # In case we missed any DST offsets on the way in for some reason, make
        # a second pass over the list, looking for the /next/ DST offset.
        laststdoffset = None
        for i in reversed(range(len(out.trans_idx))):
            tti = out.trans_idx[i]
            if tti.isdst:
                if not (tti.dstoffset or laststdoffset is None):
                    tti.dstoffset = tti.offset - laststdoffset
            else:
                laststdoffset = tti.offset

            if not isinstance(tti.dstoffset, datetime.timedelta):
                tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset)
            
            out.trans_idx[i] = tti

        out.trans_idx = tuple(out.trans_idx)
        out.trans_list = tuple(out.trans_list)

        return out

Example 48

Project: crunchy-xml-decoder
Source File: tz.py
View license
    def __init__(self, fileobj):
        if isinstance(fileobj, basestring):
            self._filename = fileobj
            fileobj = open(fileobj)
        elif hasattr(fileobj, "name"):
            self._filename = fileobj.name
        else:
            self._filename = `fileobj`

        # From tzfile(5):
        #
        # The time zone information files used by tzset(3)
        # begin with the magic characters "TZif" to identify
        # them as time zone information files, followed by
        # sixteen bytes reserved for future use, followed by
        # six four-byte values of type long, written in a
        # ``standard'' byte order (the high-order  byte
        # of the value is written first).

        if fileobj.read(4) != "TZif":
            raise ValueError, "magic not found"

        fileobj.read(16)

        (
         # The number of UTC/local indicators stored in the file.
         ttisgmtcnt,

         # The number of standard/wall indicators stored in the file.
         ttisstdcnt,
         
         # The number of leap seconds for which data is
         # stored in the file.
         leapcnt,

         # The number of "transition times" for which data
         # is stored in the file.
         timecnt,

         # The number of "local time types" for which data
         # is stored in the file (must not be zero).
         typecnt,

         # The  number  of  characters  of "time zone
         # abbreviation strings" stored in the file.
         charcnt,

        ) = struct.unpack(">6l", fileobj.read(24))

        # The above header is followed by tzh_timecnt four-byte
        # values  of  type long,  sorted  in ascending order.
        # These values are written in ``standard'' byte order.
        # Each is used as a transition time (as  returned  by
        # time(2)) at which the rules for computing local time
        # change.

        if timecnt:
            self._trans_list = struct.unpack(">%dl" % timecnt,
                                             fileobj.read(timecnt*4))
        else:
            self._trans_list = []

        # Next come tzh_timecnt one-byte values of type unsigned
        # char; each one tells which of the different types of
        # ``local time'' types described in the file is associated
        # with the same-indexed transition time. These values
        # serve as indices into an array of ttinfo structures that
        # appears next in the file.
        
        if timecnt:
            self._trans_idx = struct.unpack(">%dB" % timecnt,
                                            fileobj.read(timecnt))
        else:
            self._trans_idx = []
        
        # Each ttinfo structure is written as a four-byte value
        # for tt_gmtoff  of  type long,  in  a  standard  byte
        # order, followed  by a one-byte value for tt_isdst
        # and a one-byte  value  for  tt_abbrind.   In  each
        # structure, tt_gmtoff  gives  the  number  of
        # seconds to be added to UTC, tt_isdst tells whether
        # tm_isdst should be set by  localtime(3),  and
        # tt_abbrind serves  as an index into the array of
        # time zone abbreviation characters that follow the
        # ttinfo structure(s) in the file.

        ttinfo = []

        for i in range(typecnt):
            ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))

        abbr = fileobj.read(charcnt)

        # Then there are tzh_leapcnt pairs of four-byte
        # values, written in  standard byte  order;  the
        # first  value  of  each pair gives the time (as
        # returned by time(2)) at which a leap second
        # occurs;  the  second  gives the  total  number of
        # leap seconds to be applied after the given time.
        # The pairs of values are sorted in ascending order
        # by time.

        # Not used, for now
        if leapcnt:
            leap = struct.unpack(">%dl" % (leapcnt*2),
                                 fileobj.read(leapcnt*8))

        # Then there are tzh_ttisstdcnt standard/wall
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as standard
        # time or wall clock time, and are used when
        # a time zone file is used in handling POSIX-style
        # time zone environment variables.

        if ttisstdcnt:
            isstd = struct.unpack(">%db" % ttisstdcnt,
                                  fileobj.read(ttisstdcnt))

        # Finally, there are tzh_ttisgmtcnt UTC/local
        # indicators, each stored as a one-byte value;
        # they tell whether the transition times associated
        # with local time types were specified as UTC or
        # local time, and are used when a time zone file
        # is used in handling POSIX-style time zone envi-
        # ronment variables.

        if ttisgmtcnt:
            isgmt = struct.unpack(">%db" % ttisgmtcnt,
                                  fileobj.read(ttisgmtcnt))

        # ** Everything has been read **

        # Build ttinfo list
        self._ttinfo_list = []
        for i in range(typecnt):
            gmtoff, isdst, abbrind =  ttinfo[i]
            # Round to full-minutes if that's not the case. Python's
            # datetime doesn't accept sub-minute timezones. Check
            # http://python.org/sf/1447945 for some information.
            gmtoff = (gmtoff+30)//60*60
            tti = _ttinfo()
            tti.offset = gmtoff
            tti.delta = datetime.timedelta(seconds=gmtoff)
            tti.isdst = isdst
            tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
            tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
            tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
            self._ttinfo_list.append(tti)

        # Replace ttinfo indexes for ttinfo objects.
        trans_idx = []
        for idx in self._trans_idx:
            trans_idx.append(self._ttinfo_list[idx])
        self._trans_idx = tuple(trans_idx)

        # Set standard, dst, and before ttinfos. before will be
        # used when a given time is before any transitions,
        # and will be set to the first non-dst ttinfo, or to
        # the first dst, if all of them are dst.
        self._ttinfo_std = None
        self._ttinfo_dst = None
        self._ttinfo_before = None
        if self._ttinfo_list:
            if not self._trans_list:
                self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
            else:
                for i in range(timecnt-1,-1,-1):
                    tti = self._trans_idx[i]
                    if not self._ttinfo_std and not tti.isdst:
                        self._ttinfo_std = tti
                    elif not self._ttinfo_dst and tti.isdst:
                        self._ttinfo_dst = tti
                    if self._ttinfo_std and self._ttinfo_dst:
                        break
                else:
                    if self._ttinfo_dst and not self._ttinfo_std:
                        self._ttinfo_std = self._ttinfo_dst

                for tti in self._ttinfo_list:
                    if not tti.isdst:
                        self._ttinfo_before = tti
                        break
                else:
                    self._ttinfo_before = self._ttinfo_list[0]

        # Now fix transition times to become relative to wall time.
        #
        # I'm not sure about this. In my tests, the tz source file
        # is setup to wall time, and in the binary file isstd and
        # isgmt are off, so it should be in wall time. OTOH, it's
        # always in gmt time. Let me know if you have comments
        # about this.
        laststdoffset = 0
        self._trans_list = list(self._trans_list)
        for i in range(len(self._trans_list)):
            tti = self._trans_idx[i]
            if not tti.isdst:
                # This is std time.
                self._trans_list[i] += tti.offset
                laststdoffset = tti.offset
            else:
                # This is dst time. Convert to std.
                self._trans_list[i] += laststdoffset
        self._trans_list = tuple(self._trans_list)

Example 49

Project: eventlet
Source File: dnssec.py
View license
def _validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
    """Validate an RRset against a single signature rdata

    The owner name of the rrsig is assumed to be the same as the owner name
    of the rrset.

    @param rrset: The RRset to validate
    @type rrset: dns.rrset.RRset or (dns.name.Name, dns.rdataset.Rdataset)
    tuple
    @param rrsig: The signature rdata
    @type rrsig: dns.rrset.Rdata
    @param keys: The key dictionary.
    @type keys: a dictionary keyed by dns.name.Name with node or rdataset
    values
    @param origin: The origin to use for relative names
    @type origin: dns.name.Name or None
    @param now: The time to use when validating the signatures.  The default
    is the current time.
    @type now: int
    """

    if isinstance(origin, string_types):
        origin = dns.name.from_text(origin, dns.name.root)

    for candidate_key in _find_candidate_keys(keys, rrsig):
        if not candidate_key:
            raise ValidationFailure('unknown key')

        # For convenience, allow the rrset to be specified as a (name,
        # rdataset) tuple as well as a proper rrset
        if isinstance(rrset, tuple):
            rrname = rrset[0]
            rdataset = rrset[1]
        else:
            rrname = rrset.name
            rdataset = rrset

        if now is None:
            now = time.time()
        if rrsig.expiration < now:
            raise ValidationFailure('expired')
        if rrsig.inception > now:
            raise ValidationFailure('not yet valid')

        hash = _make_hash(rrsig.algorithm)

        if _is_rsa(rrsig.algorithm):
            keyptr = candidate_key.key
            (bytes_,) = struct.unpack('!B', keyptr[0:1])
            keyptr = keyptr[1:]
            if bytes_ == 0:
                (bytes_,) = struct.unpack('!H', keyptr[0:2])
                keyptr = keyptr[2:]
            rsa_e = keyptr[0:bytes_]
            rsa_n = keyptr[bytes_:]
            keylen = len(rsa_n) * 8
            pubkey = Crypto.PublicKey.RSA.construct(
                (Crypto.Util.number.bytes_to_long(rsa_n),
                 Crypto.Util.number.bytes_to_long(rsa_e)))
            sig = (Crypto.Util.number.bytes_to_long(rrsig.signature),)
        elif _is_dsa(rrsig.algorithm):
            keyptr = candidate_key.key
            (t,) = struct.unpack('!B', keyptr[0:1])
            keyptr = keyptr[1:]
            octets = 64 + t * 8
            dsa_q = keyptr[0:20]
            keyptr = keyptr[20:]
            dsa_p = keyptr[0:octets]
            keyptr = keyptr[octets:]
            dsa_g = keyptr[0:octets]
            keyptr = keyptr[octets:]
            dsa_y = keyptr[0:octets]
            pubkey = Crypto.PublicKey.DSA.construct(
                (Crypto.Util.number.bytes_to_long(dsa_y),
                 Crypto.Util.number.bytes_to_long(dsa_g),
                 Crypto.Util.number.bytes_to_long(dsa_p),
                 Crypto.Util.number.bytes_to_long(dsa_q)))
            (dsa_r, dsa_s) = struct.unpack('!20s20s', rrsig.signature[1:])
            sig = (Crypto.Util.number.bytes_to_long(dsa_r),
                   Crypto.Util.number.bytes_to_long(dsa_s))
        elif _is_ecdsa(rrsig.algorithm):
            if rrsig.algorithm == ECDSAP256SHA256:
                curve = ecdsa.curves.NIST256p
                key_len = 32
            elif rrsig.algorithm == ECDSAP384SHA384:
                curve = ecdsa.curves.NIST384p
                key_len = 48
            else:
                # shouldn't happen
                raise ValidationFailure('unknown ECDSA curve')
            keyptr = candidate_key.key
            x = Crypto.Util.number.bytes_to_long(keyptr[0:key_len])
            y = Crypto.Util.number.bytes_to_long(keyptr[key_len:key_len * 2])
            assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
            point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
            verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point,
                                                                      curve)
            pubkey = ECKeyWrapper(verifying_key, key_len)
            r = rrsig.signature[:key_len]
            s = rrsig.signature[key_len:]
            sig = ecdsa.ecdsa.Signature(Crypto.Util.number.bytes_to_long(r),
                                        Crypto.Util.number.bytes_to_long(s))
        else:
            raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)

        hash.update(_to_rdata(rrsig, origin)[:18])
        hash.update(rrsig.signer.to_digestable(origin))

        if rrsig.labels < len(rrname) - 1:
            suffix = rrname.split(rrsig.labels + 1)[1]
            rrname = dns.name.from_text('*', suffix)
        rrnamebuf = rrname.to_digestable(origin)
        rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
                              rrsig.original_ttl)
        rrlist = sorted(rdataset)
        for rr in rrlist:
            hash.update(rrnamebuf)
            hash.update(rrfixed)
            rrdata = rr.to_digestable(origin)
            rrlen = struct.pack('!H', len(rrdata))
            hash.update(rrlen)
            hash.update(rrdata)

        digest = hash.digest()

        if _is_rsa(rrsig.algorithm):
            # PKCS1 algorithm identifier goop
            digest = _make_algorithm_id(rrsig.algorithm) + digest
            padlen = keylen // 8 - len(digest) - 3
            digest = struct.pack('!%dB' % (2 + padlen + 1),
                                 *([0, 1] + [0xFF] * padlen + [0])) + digest
        elif _is_dsa(rrsig.algorithm) or _is_ecdsa(rrsig.algorithm):
            pass
        else:
            # Raise here for code clarity; this won't actually ever happen
            # since if the algorithm is really unknown we'd already have
            # raised an exception above
            raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)

        if pubkey.verify(digest, sig):
            return
    raise ValidationFailure('verify failure')

Example 50

Project: genie
Source File: __init__.py
View license
    def parse_stream(self, player=1, image_adapter_cls=None):
        """
            Use the image adapter class to create an image.
        """
        stream = self.slp_file.stream

        width, height = self.structure.width, self.structure.height
        if image_adapter_cls is None:
            image_adapter_cls = self.slp_file.image_adapter_cls
        adapter = image_adapter_cls(self)

        # First, the boundaries.
        stream.seek(self.structure.outline_table_offset)
        left_boundaries = []
        for y in xrange(height):
            left, right = struct.unpack('=HH', stream.read(4))
            if (left == 0x8000 or right == 0x8000):
                # fully transparent row
                adapter.draw_pixels(0, y, width, None)
                # this will tell the parser to skip this line later.
                left_boundaries.append(None)
            else:
                # draw transparent pixels.
                left_boundaries.append(left)
                adapter.draw_pixels(0, y, left, None)
                adapter.draw_pixels(width - right, y, right, None)

        # The command offsets.
        command_offsets = []
        for y in xrange(height):
            command_offsets.append(struct.unpack('=I', stream.read(4))[0])

        # Now, the actual commands.
        stream.seek(command_offsets[0])
        x = left_boundaries[0]
        y = 0

        while x is None:
            # maybe the first row is transparent already?
            y += 1
            x = left_boundaries[y]

        def _get_byte():
            """ take a byte from the stream. """
            return struct.unpack('=B', stream.read(1))[0]

        def _get_4ornext(opcode):
            """
                either return the 4 most significant bits from the opcode
                or the complete next byte if the former is 0.
            """
            return (opcode >> 4) or _get_byte()

        def _get_bigbig(opcode):
            """ right-shift 4 bits to the right + next byte """
            return ((opcode & 0xf0) << 4) + _get_byte()

        def _draw_pixels(amount, palette_index):
            assert x + amount <= width
            if palette_index is None:
                color = None
            else:
                color = self.slp_file.palette[palette_index]
            adapter.draw_pixels(x, y, amount, color)

        def _get_palette_index(player, relindex):
            return player * 16 + relindex

        def _draw_special_color(amount, index):
            """
                index = 2: player color
                index = 1: black. (or so?)

                This contradicts Bryce's SLP.rtf, but it looks pretty strange
                if 1 is the player color. TODO?
            """
            if index == 2:
                palette_index = _get_palette_index(player, 0)
            else:
                palette_index = 0
            _draw_pixels(amount, palette_index)

        while y < height:
            opcode = _get_byte()
            twobit = opcode & 0b11
            fourbit = opcode & 0b1111

            if x > width:
                raise Exception('%d > %d' % (x, width))

            if fourbit == 0x0f:
                y += 1
                if y < height:
                    x = left_boundaries[y]
                    while x is None:
                        # fully transparent line! (skip this line)
                        y += 1
                        x = left_boundaries[y]
                    if stream.tell() != command_offsets[y]:
                        # not an error, but excessive padding might suggest something is slightly wrong!
                        print "Warning: line %d has %d bytes of air after commands" % (y - 1,
                                command_offsets[y] - stream.tell())
                        # get ourselves aligned again
                        stream.seek(command_offsets[y])
            elif fourbit == 0x06:
                # player colors
                amount = _get_4ornext(opcode)
                #print 'player colors', amount
                for _ in xrange(amount):
                     relindex = _get_byte()
                     _draw_pixels(1, _get_palette_index(player, relindex))
                     x += 1
            elif fourbit == 0x0e:
                # Extended command (shadows etc.)
                # get the high 4 bits for the extended command
                # I only found information about this opcode in
                # the slp.rtf file (see README).
                # For now, this doesn't actually do anything apart
                # from reading the correct number of bytes from the
                # stream to parse the image data correctly.
                extended = opcode >> 4
                #print 'Extended command!', extended
                if extended == 0:
                    # woho! this should only be drawn if the
                    # sprite is not x-flipped. TODO.
                    pass
                elif extended == 1:
                    # this should only be drawn if the sprite
                    # is x-flipped. TODO.
                    pass
                elif extended in (2, 3):
                    # do some fiddling with transform color tables.
                    # see documentation.
                    pass
                elif extended in (4, 6):
                    # special color 1/2, but only 1 byte.
                    _draw_special_color(1, {4: 1, 6: 2}[extended])
                    x += 1
                elif extended in (5, 7):
                    # special color 1/2, read amount from stream.
                    amount = _get_byte()
                    _draw_special_color(amount, {5: 1, 7: 2}[extended])
                    x += amount
                else:
                    raise NotImplementedError('Unknown extended opcode: %r' % extended)
            elif fourbit == 0x07:
                # fill
                amount = _get_4ornext(opcode)
                #print 'fill', amount
                palette_index = _get_byte()
                _draw_pixels(amount, palette_index)
                x += amount
            elif fourbit == 0x0a:
                amount = _get_4ornext(opcode)
                #print 'player fill', amount
                # TODO: this is not really correct
                _draw_pixels(amount, _get_palette_index(player, _get_byte()))
                x += amount
            elif fourbit == 0x0b:
                amount = _get_4ornext(opcode)
                #print 'Ignoring 0x0b opcode for %d pixels' % amount
                x += amount
            elif fourbit in (0x4e, 0x5e):
                raise NotImplementedError('The 0x%x opcode is not yet implemented.' % fourbit)
            elif twobit == 0:
                # draw
                amount = opcode >> 2
                #print 'draw', amount
                for _ in xrange(amount):
                    _draw_pixels(1, _get_byte())
                    x += 1
            elif twobit == 1:
                # skip pixels
                # 2ornext
                amount = opcode >> 2
                #print 'skip', amount
                if amount == 0:
                    amount = _get_byte()
                _draw_pixels(amount, None)
                x += amount
            elif twobit == 2:
                amount = _get_bigbig(opcode)
                #print 'big draw', amount
                for _ in xrange(amount):
                    _draw_pixels(1, _get_byte())
                    x += 1
            elif twobit == 3:
                amount = _get_bigbig(opcode)
                #print 'big skip', amount
                _draw_pixels(amount, None)
                x += amount
            else:
                raise Exception()

        return adapter.get_image()