array.array

Here are the examples of the python api array.array taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: WoT-Replay-To-JSON
Source File: blowfish.py
View license
    def __init__(self, key):
        """
        Creates an instance of blowfish using 'key' as the encryption key.
 
        Key is a string of bytes, used to seed calculations.
        Once the instance of the object is created, the key is no longer necessary.
        """
        if not self.KEY_MIN_LEN <= len(key) <= self.KEY_MAX_LEN:
            raise ValueError("Attempted to initialize Blowfish cipher with key of invalid length: %(len)i" % {
             'len': len(key),
            })
 
        self._p_boxes = array.array('I', [
            0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
            0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
            0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
            0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917,
            0x9216D5D9, 0x8979FB1B
        ])
 
        self._s_boxes = (
            array.array('I', [
                0xD1310BA6, 0x98DFB5AC, 0x2FFD72DB, 0xD01ADFB7,
                0xB8E1AFED, 0x6A267E96, 0xBA7C9045, 0xF12C7F99,
                0x24A19947, 0xB3916CF7, 0x0801F2E2, 0x858EFC16,
                0x636920D8, 0x71574E69, 0xA458FEA3, 0xF4933D7E,
                0x0D95748F, 0x728EB658, 0x718BCD58, 0x82154AEE,
                0x7B54A41D, 0xC25A59B5, 0x9C30D539, 0x2AF26013,
                0xC5D1B023, 0x286085F0, 0xCA417918, 0xB8DB38EF,
                0x8E79DCB0, 0x603A180E, 0x6C9E0E8B, 0xB01E8A3E,
                0xD71577C1, 0xBD314B27, 0x78AF2FDA, 0x55605C60,
                0xE65525F3, 0xAA55AB94, 0x57489862, 0x63E81440,
                0x55CA396A, 0x2AAB10B6, 0xB4CC5C34, 0x1141E8CE,
                0xA15486AF, 0x7C72E993, 0xB3EE1411, 0x636FBC2A,
                0x2BA9C55D, 0x741831F6, 0xCE5C3E16, 0x9B87931E,
                0xAFD6BA33, 0x6C24CF5C, 0x7A325381, 0x28958677,
                0x3B8F4898, 0x6B4BB9AF, 0xC4BFE81B, 0x66282193,
                0x61D809CC, 0xFB21A991, 0x487CAC60, 0x5DEC8032,
                0xEF845D5D, 0xE98575B1, 0xDC262302, 0xEB651B88,
                0x23893E81, 0xD396ACC5, 0x0F6D6FF3, 0x83F44239,
                0x2E0B4482, 0xA4842004, 0x69C8F04A, 0x9E1F9B5E,
                0x21C66842, 0xF6E96C9A, 0x670C9C61, 0xABD388F0,
                0x6A51A0D2, 0xD8542F68, 0x960FA728, 0xAB5133A3,
                0x6EEF0B6C, 0x137A3BE4, 0xBA3BF050, 0x7EFB2A98,
                0xA1F1651D, 0x39AF0176, 0x66CA593E, 0x82430E88,
                0x8CEE8619, 0x456F9FB4, 0x7D84A5C3, 0x3B8B5EBE,
                0xE06F75D8, 0x85C12073, 0x401A449F, 0x56C16AA6,
                0x4ED3AA62, 0x363F7706, 0x1BFEDF72, 0x429B023D,
                0x37D0D724, 0xD00A1248, 0xDB0FEAD3, 0x49F1C09B,
                0x075372C9, 0x80991B7B, 0x25D479D8, 0xF6E8DEF7,
                0xE3FE501A, 0xB6794C3B, 0x976CE0BD, 0x04C006BA,
                0xC1A94FB6, 0x409F60C4, 0x5E5C9EC2, 0x196A2463,
                0x68FB6FAF, 0x3E6C53B5, 0x1339B2EB, 0x3B52EC6F,
                0x6DFC511F, 0x9B30952C, 0xCC814544, 0xAF5EBD09,
                0xBEE3D004, 0xDE334AFD, 0x660F2807, 0x192E4BB3,
                0xC0CBA857, 0x45C8740F, 0xD20B5F39, 0xB9D3FBDB,
                0x5579C0BD, 0x1A60320A, 0xD6A100C6, 0x402C7279,
                0x679F25FE, 0xFB1FA3CC, 0x8EA5E9F8, 0xDB3222F8,
                0x3C7516DF, 0xFD616B15, 0x2F501EC8, 0xAD0552AB,
                0x323DB5FA, 0xFD238760, 0x53317B48, 0x3E00DF82,
                0x9E5C57BB, 0xCA6F8CA0, 0x1A87562E, 0xDF1769DB,
                0xD542A8F6, 0x287EFFC3, 0xAC6732C6, 0x8C4F5573,
                0x695B27B0, 0xBBCA58C8, 0xE1FFA35D, 0xB8F011A0,
                0x10FA3D98, 0xFD2183B8, 0x4AFCB56C, 0x2DD1D35B,
                0x9A53E479, 0xB6F84565, 0xD28E49BC, 0x4BFB9790,
                0xE1DDF2DA, 0xA4CB7E33, 0x62FB1341, 0xCEE4C6E8,
                0xEF20CADA, 0x36774C01, 0xD07E9EFE, 0x2BF11FB4,
                0x95DBDA4D, 0xAE909198, 0xEAAD8E71, 0x6B93D5A0,
                0xD08ED1D0, 0xAFC725E0, 0x8E3C5B2F, 0x8E7594B7,
                0x8FF6E2FB, 0xF2122B64, 0x8888B812, 0x900DF01C,
                0x4FAD5EA0, 0x688FC31C, 0xD1CFF191, 0xB3A8C1AD,
                0x2F2F2218, 0xBE0E1777, 0xEA752DFE, 0x8B021FA1,
                0xE5A0CC0F, 0xB56F74E8, 0x18ACF3D6, 0xCE89E299,
                0xB4A84FE0, 0xFD13E0B7, 0x7CC43B81, 0xD2ADA8D9,
                0x165FA266, 0x80957705, 0x93CC7314, 0x211A1477,
                0xE6AD2065, 0x77B5FA86, 0xC75442F5, 0xFB9D35CF,
                0xEBCDAF0C, 0x7B3E89A0, 0xD6411BD3, 0xAE1E7E49,
                0x00250E2D, 0x2071B35E, 0x226800BB, 0x57B8E0AF,
                0x2464369B, 0xF009B91E, 0x5563911D, 0x59DFA6AA,
                0x78C14389, 0xD95A537F, 0x207D5BA2, 0x02E5B9C5,
                0x83260376, 0x6295CFA9, 0x11C81968, 0x4E734A41,
                0xB3472DCA, 0x7B14A94A, 0x1B510052, 0x9A532915,
                0xD60F573F, 0xBC9BC6E4, 0x2B60A476, 0x81E67400,
                0x08BA6FB5, 0x571BE91F, 0xF296EC6B, 0x2A0DD915,
                0xB6636521, 0xE7B9F9B6, 0xFF34052E, 0xC5855664,
                0x53B02D5D, 0xA99F8FA1, 0x08BA4799, 0x6E85076A
            ]),
            array.array('I', [
                0x4B7A70E9, 0xB5B32944, 0xDB75092E, 0xC4192623,
                0xAD6EA6B0, 0x49A7DF7D, 0x9CEE60B8, 0x8FEDB266,
                0xECAA8C71, 0x699A17FF, 0x5664526C, 0xC2B19EE1,
                0x193602A5, 0x75094C29, 0xA0591340, 0xE4183A3E,
                0x3F54989A, 0x5B429D65, 0x6B8FE4D6, 0x99F73FD6,
                0xA1D29C07, 0xEFE830F5, 0x4D2D38E6, 0xF0255DC1,
                0x4CDD2086, 0x8470EB26, 0x6382E9C6, 0x021ECC5E,
                0x09686B3F, 0x3EBAEFC9, 0x3C971814, 0x6B6A70A1,
                0x687F3584, 0x52A0E286, 0xB79C5305, 0xAA500737,
                0x3E07841C, 0x7FDEAE5C, 0x8E7D44EC, 0x5716F2B8,
                0xB03ADA37, 0xF0500C0D, 0xF01C1F04, 0x0200B3FF,
                0xAE0CF51A, 0x3CB574B2, 0x25837A58, 0xDC0921BD,
                0xD19113F9, 0x7CA92FF6, 0x94324773, 0x22F54701,
                0x3AE5E581, 0x37C2DADC, 0xC8B57634, 0x9AF3DDA7,
                0xA9446146, 0x0FD0030E, 0xECC8C73E, 0xA4751E41,
                0xE238CD99, 0x3BEA0E2F, 0x3280BBA1, 0x183EB331,
                0x4E548B38, 0x4F6DB908, 0x6F420D03, 0xF60A04BF,
                0x2CB81290, 0x24977C79, 0x5679B072, 0xBCAF89AF,
                0xDE9A771F, 0xD9930810, 0xB38BAE12, 0xDCCF3F2E,
                0x5512721F, 0x2E6B7124, 0x501ADDE6, 0x9F84CD87,
                0x7A584718, 0x7408DA17, 0xBC9F9ABC, 0xE94B7D8C,
                0xEC7AEC3A, 0xDB851DFA, 0x63094366, 0xC464C3D2,
                0xEF1C1847, 0x3215D908, 0xDD433B37, 0x24C2BA16,
                0x12A14D43, 0x2A65C451, 0x50940002, 0x133AE4DD,
                0x71DFF89E, 0x10314E55, 0x81AC77D6, 0x5F11199B,
                0x043556F1, 0xD7A3C76B, 0x3C11183B, 0x5924A509,
                0xF28FE6ED, 0x97F1FBFA, 0x9EBABF2C, 0x1E153C6E,
                0x86E34570, 0xEAE96FB1, 0x860E5E0A, 0x5A3E2AB3,
                0x771FE71C, 0x4E3D06FA, 0x2965DCB9, 0x99E71D0F,
                0x803E89D6, 0x5266C825, 0x2E4CC978, 0x9C10B36A,
                0xC6150EBA, 0x94E2EA78, 0xA5FC3C53, 0x1E0A2DF4,
                0xF2F74EA7, 0x361D2B3D, 0x1939260F, 0x19C27960,
                0x5223A708, 0xF71312B6, 0xEBADFE6E, 0xEAC31F66,
                0xE3BC4595, 0xA67BC883, 0xB17F37D1, 0x018CFF28,
                0xC332DDEF, 0xBE6C5AA5, 0x65582185, 0x68AB9802,
                0xEECEA50F, 0xDB2F953B, 0x2AEF7DAD, 0x5B6E2F84,
                0x1521B628, 0x29076170, 0xECDD4775, 0x619F1510,
                0x13CCA830, 0xEB61BD96, 0x0334FE1E, 0xAA0363CF,
                0xB5735C90, 0x4C70A239, 0xD59E9E0B, 0xCBAADE14,
                0xEECC86BC, 0x60622CA7, 0x9CAB5CAB, 0xB2F3846E,
                0x648B1EAF, 0x19BDF0CA, 0xA02369B9, 0x655ABB50,
                0x40685A32, 0x3C2AB4B3, 0x319EE9D5, 0xC021B8F7,
                0x9B540B19, 0x875FA099, 0x95F7997E, 0x623D7DA8,
                0xF837889A, 0x97E32D77, 0x11ED935F, 0x16681281,
                0x0E358829, 0xC7E61FD6, 0x96DEDFA1, 0x7858BA99,
                0x57F584A5, 0x1B227263, 0x9B83C3FF, 0x1AC24696,
                0xCDB30AEB, 0x532E3054, 0x8FD948E4, 0x6DBC3128,
                0x58EBF2EF, 0x34C6FFEA, 0xFE28ED61, 0xEE7C3C73,
                0x5D4A14D9, 0xE864B7E3, 0x42105D14, 0x203E13E0,
                0x45EEE2B6, 0xA3AAABEA, 0xDB6C4F15, 0xFACB4FD0,
                0xC742F442, 0xEF6ABBB5, 0x654F3B1D, 0x41CD2105,
                0xD81E799E, 0x86854DC7, 0xE44B476A, 0x3D816250,
                0xCF62A1F2, 0x5B8D2646, 0xFC8883A0, 0xC1C7B6A3,
                0x7F1524C3, 0x69CB7492, 0x47848A0B, 0x5692B285,
                0x095BBF00, 0xAD19489D, 0x1462B174, 0x23820E00,
                0x58428D2A, 0x0C55F5EA, 0x1DADF43E, 0x233F7061,
                0x3372F092, 0x8D937E41, 0xD65FECF1, 0x6C223BDB,
                0x7CDE3759, 0xCBEE7460, 0x4085F2A7, 0xCE77326E,
                0xA6078084, 0x19F8509E, 0xE8EFD855, 0x61D99735,
                0xA969A7AA, 0xC50C06C2, 0x5A04ABFC, 0x800BCADC,
                0x9E447A2E, 0xC3453484, 0xFDD56705, 0x0E1E9EC9,
                0xDB73DBD3, 0x105588CD, 0x675FDA79, 0xE3674340,
                0xC5C43465, 0x713E38D8, 0x3D28F89E, 0xF16DFF20,
                0x153E21E7, 0x8FB03D4A, 0xE6E39F2B, 0xDB83ADF7
            ]),
            array.array('I', [
                0xE93D5A68, 0x948140F7, 0xF64C261C, 0x94692934,
                0x411520F7, 0x7602D4F7, 0xBCF46B2E, 0xD4A20068,
                0xD4082471, 0x3320F46A, 0x43B7D4B7, 0x500061AF,
                0x1E39F62E, 0x97244546, 0x14214F74, 0xBF8B8840,
                0x4D95FC1D, 0x96B591AF, 0x70F4DDD3, 0x66A02F45,
                0xBFBC09EC, 0x03BD9785, 0x7FAC6DD0, 0x31CB8504,
                0x96EB27B3, 0x55FD3941, 0xDA2547E6, 0xABCA0A9A,
                0x28507825, 0x530429F4, 0x0A2C86DA, 0xE9B66DFB,
                0x68DC1462, 0xD7486900, 0x680EC0A4, 0x27A18DEE,
                0x4F3FFEA2, 0xE887AD8C, 0xB58CE006, 0x7AF4D6B6,
                0xAACE1E7C, 0xD3375FEC, 0xCE78A399, 0x406B2A42,
                0x20FE9E35, 0xD9F385B9, 0xEE39D7AB, 0x3B124E8B,
                0x1DC9FAF7, 0x4B6D1856, 0x26A36631, 0xEAE397B2,
                0x3A6EFA74, 0xDD5B4332, 0x6841E7F7, 0xCA7820FB,
                0xFB0AF54E, 0xD8FEB397, 0x454056AC, 0xBA489527,
                0x55533A3A, 0x20838D87, 0xFE6BA9B7, 0xD096954B,
                0x55A867BC, 0xA1159A58, 0xCCA92963, 0x99E1DB33,
                0xA62A4A56, 0x3F3125F9, 0x5EF47E1C, 0x9029317C,
                0xFDF8E802, 0x04272F70, 0x80BB155C, 0x05282CE3,
                0x95C11548, 0xE4C66D22, 0x48C1133F, 0xC70F86DC,
                0x07F9C9EE, 0x41041F0F, 0x404779A4, 0x5D886E17,
                0x325F51EB, 0xD59BC0D1, 0xF2BCC18F, 0x41113564,
                0x257B7834, 0x602A9C60, 0xDFF8E8A3, 0x1F636C1B,
                0x0E12B4C2, 0x02E1329E, 0xAF664FD1, 0xCAD18115,
                0x6B2395E0, 0x333E92E1, 0x3B240B62, 0xEEBEB922,
                0x85B2A20E, 0xE6BA0D99, 0xDE720C8C, 0x2DA2F728,
                0xD0127845, 0x95B794FD, 0x647D0862, 0xE7CCF5F0,
                0x5449A36F, 0x877D48FA, 0xC39DFD27, 0xF33E8D1E,
                0x0A476341, 0x992EFF74, 0x3A6F6EAB, 0xF4F8FD37,
                0xA812DC60, 0xA1EBDDF8, 0x991BE14C, 0xDB6E6B0D,
                0xC67B5510, 0x6D672C37, 0x2765D43B, 0xDCD0E804,
                0xF1290DC7, 0xCC00FFA3, 0xB5390F92, 0x690FED0B,
                0x667B9FFB, 0xCEDB7D9C, 0xA091CF0B, 0xD9155EA3,
                0xBB132F88, 0x515BAD24, 0x7B9479BF, 0x763BD6EB,
                0x37392EB3, 0xCC115979, 0x8026E297, 0xF42E312D,
                0x6842ADA7, 0xC66A2B3B, 0x12754CCC, 0x782EF11C,
                0x6A124237, 0xB79251E7, 0x06A1BBE6, 0x4BFB6350,
                0x1A6B1018, 0x11CAEDFA, 0x3D25BDD8, 0xE2E1C3C9,
                0x44421659, 0x0A121386, 0xD90CEC6E, 0xD5ABEA2A,
                0x64AF674E, 0xDA86A85F, 0xBEBFE988, 0x64E4C3FE,
                0x9DBC8057, 0xF0F7C086, 0x60787BF8, 0x6003604D,
                0xD1FD8346, 0xF6381FB0, 0x7745AE04, 0xD736FCCC,
                0x83426B33, 0xF01EAB71, 0xB0804187, 0x3C005E5F,
                0x77A057BE, 0xBDE8AE24, 0x55464299, 0xBF582E61,
                0x4E58F48F, 0xF2DDFDA2, 0xF474EF38, 0x8789BDC2,
                0x5366F9C3, 0xC8B38E74, 0xB475F255, 0x46FCD9B9,
                0x7AEB2661, 0x8B1DDF84, 0x846A0E79, 0x915F95E2,
                0x466E598E, 0x20B45770, 0x8CD55591, 0xC902DE4C,
                0xB90BACE1, 0xBB8205D0, 0x11A86248, 0x7574A99E,
                0xB77F19B6, 0xE0A9DC09, 0x662D09A1, 0xC4324633,
                0xE85A1F02, 0x09F0BE8C, 0x4A99A025, 0x1D6EFE10,
                0x1AB93D1D, 0x0BA5A4DF, 0xA186F20F, 0x2868F169,
                0xDCB7DA83, 0x573906FE, 0xA1E2CE9B, 0x4FCD7F52,
                0x50115E01, 0xA70683FA, 0xA002B5C4, 0x0DE6D027,
                0x9AF88C27, 0x773F8641, 0xC3604C06, 0x61A806B5,
                0xF0177A28, 0xC0F586E0, 0x006058AA, 0x30DC7D62,
                0x11E69ED7, 0x2338EA63, 0x53C2DD94, 0xC2C21634,
                0xBBCBEE56, 0x90BCB6DE, 0xEBFC7DA1, 0xCE591D76,
                0x6F05E409, 0x4B7C0188, 0x39720A3D, 0x7C927C24,
                0x86E3725F, 0x724D9DB9, 0x1AC15BB4, 0xD39EB8FC,
                0xED545578, 0x08FCA5B5, 0xD83D7CD3, 0x4DAD0FC4,
                0x1E50EF5E, 0xB161E6F8, 0xA28514D9, 0x6C51133C,
                0x6FD5C7E7, 0x56E14EC4, 0x362ABFCE, 0xDDC6C837,
                0xD79A3234, 0x92638212, 0x670EFA8E, 0x406000E0
            ]),
            array.array('I', [
                0x3A39CE37, 0xD3FAF5CF, 0xABC27737, 0x5AC52D1B,
                0x5CB0679E, 0x4FA33742, 0xD3822740, 0x99BC9BBE,
                0xD5118E9D, 0xBF0F7315, 0xD62D1C7E, 0xC700C47B,
                0xB78C1B6B, 0x21A19045, 0xB26EB1BE, 0x6A366EB4,
                0x5748AB2F, 0xBC946E79, 0xC6A376D2, 0x6549C2C8,
                0x530FF8EE, 0x468DDE7D, 0xD5730A1D, 0x4CD04DC6,
                0x2939BBDB, 0xA9BA4650, 0xAC9526E8, 0xBE5EE304,
                0xA1FAD5F0, 0x6A2D519A, 0x63EF8CE2, 0x9A86EE22,
                0xC089C2B8, 0x43242EF6, 0xA51E03AA, 0x9CF2D0A4,
                0x83C061BA, 0x9BE96A4D, 0x8FE51550, 0xBA645BD6,
                0x2826A2F9, 0xA73A3AE1, 0x4BA99586, 0xEF5562E9,
                0xC72FEFD3, 0xF752F7DA, 0x3F046F69, 0x77FA0A59,
                0x80E4A915, 0x87B08601, 0x9B09E6AD, 0x3B3EE593,
                0xE990FD5A, 0x9E34D797, 0x2CF0B7D9, 0x022B8B51,
                0x96D5AC3A, 0x017DA67D, 0xD1CF3ED6, 0x7C7D2D28,
                0x1F9F25CF, 0xADF2B89B, 0x5AD6B472, 0x5A88F54C,
                0xE029AC71, 0xE019A5E6, 0x47B0ACFD, 0xED93FA9B,
                0xE8D3C48D, 0x283B57CC, 0xF8D56629, 0x79132E28,
                0x785F0191, 0xED756055, 0xF7960E44, 0xE3D35E8C,
                0x15056DD4, 0x88F46DBA, 0x03A16125, 0x0564F0BD,
                0xC3EB9E15, 0x3C9057A2, 0x97271AEC, 0xA93A072A,
                0x1B3F6D9B, 0x1E6321F5, 0xF59C66FB, 0x26DCF319,
                0x7533D928, 0xB155FDF5, 0x03563482, 0x8ABA3CBB,
                0x28517711, 0xC20AD9F8, 0xABCC5167, 0xCCAD925F,
                0x4DE81751, 0x3830DC8E, 0x379D5862, 0x9320F991,
                0xEA7A90C2, 0xFB3E7BCE, 0x5121CE64, 0x774FBE32,
                0xA8B6E37E, 0xC3293D46, 0x48DE5369, 0x6413E680,
                0xA2AE0810, 0xDD6DB224, 0x69852DFD, 0x09072166,
                0xB39A460A, 0x6445C0DD, 0x586CDECF, 0x1C20C8AE,
                0x5BBEF7DD, 0x1B588D40, 0xCCD2017F, 0x6BB4E3BB,
                0xDDA26A7E, 0x3A59FF45, 0x3E350A44, 0xBCB4CDD5,
                0x72EACEA8, 0xFA6484BB, 0x8D6612AE, 0xBF3C6F47,
                0xD29BE463, 0x542F5D9E, 0xAEC2771B, 0xF64E6370,
                0x740E0D8D, 0xE75B1357, 0xF8721671, 0xAF537D5D,
                0x4040CB08, 0x4EB4E2CC, 0x34D2466A, 0x0115AF84,
                0xE1B00428, 0x95983A1D, 0x06B89FB4, 0xCE6EA048,
                0x6F3F3B82, 0x3520AB82, 0x011A1D4B, 0x277227F8,
                0x611560B1, 0xE7933FDC, 0xBB3A792B, 0x344525BD,
                0xA08839E1, 0x51CE794B, 0x2F32C9B7, 0xA01FBAC9,
                0xE01CC87E, 0xBCC7D1F6, 0xCF0111C3, 0xA1E8AAC7,
                0x1A908749, 0xD44FBD9A, 0xD0DADECB, 0xD50ADA38,
                0x0339C32A, 0xC6913667, 0x8DF9317C, 0xE0B12B4F,
                0xF79E59B7, 0x43F5BB3A, 0xF2D519FF, 0x27D9459C,
                0xBF97222C, 0x15E6FC2A, 0x0F91FC71, 0x9B941525,
                0xFAE59361, 0xCEB69CEB, 0xC2A86459, 0x12BAA8D1,
                0xB6C1075E, 0xE3056A0C, 0x10D25065, 0xCB03A442,
                0xE0EC6E0E, 0x1698DB3B, 0x4C98A0BE, 0x3278E964,
                0x9F1F9532, 0xE0D392DF, 0xD3A0342B, 0x8971F21E,
                0x1B0A7441, 0x4BA3348C, 0xC5BE7120, 0xC37632D8,
                0xDF359F8D, 0x9B992F2E, 0xE60B6F47, 0x0FE3F11D,
                0xE54CDA54, 0x1EDAD891, 0xCE6279CF, 0xCD3E7E6F,
                0x1618B166, 0xFD2C1D05, 0x848FD2C5, 0xF6FB2299,
                0xF523F357, 0xA6327623, 0x93A83531, 0x56CCCD02,
                0xACF08162, 0x5A75EBB5, 0x6E163697, 0x88D273CC,
                0xDE966292, 0x81B949D0, 0x4C50901B, 0x71C65614,
                0xE6C6C7BD, 0x327A140A, 0x45E1D006, 0xC3F27B9A,
                0xC9AA53FD, 0x62A80F00, 0xBB25BFE2, 0x35BDD2F6,
                0x71126905, 0xB2040222, 0xB6CBCF7C, 0xCD769C2B,
                0x53113EC0, 0x1640E3D3, 0x38ABBD60, 0x2547ADF0,
                0xBA38209C, 0xF746CE76, 0x77AFA1C5, 0x20756060,
                0x85CBFE4E, 0x8AE88DD8, 0x7AAAF9B0, 0x4CF9AA7E,
                0x1948C25C, 0x02FB8A8C, 0x01C36AE4, 0xD6EBE1F9,
                0x90D4F869, 0xA65CDEA0, 0x3F09252D, 0xC208E69F,
                0xB74E6132, 0xCE77E25B, 0x578FDFE3, 0x3AC372E6
            ])
        )
 
        # Cycle through the p-boxes and round-robin XOR the
        # key with the p-boxes
        key_len = len(key)
        index = 0
        for i in xrange(len(self._p_boxes)):
            self._p_boxes[i] = self._p_boxes[i] ^ (
             (ord(key[index % key_len]) << 24) +
             (ord(key[(index + 1) % key_len]) << 16) +
             (ord(key[(index + 2) % key_len]) << 8) +
             (ord(key[(index + 3) % key_len]))
            )
            index += 4
 
        # For the chaining process
        l = r = 0
 
        # Begin chain replacing the p-boxes
        for i in xrange(0, len(self._p_boxes), 2):
            (l, r) = self.cipher(l, r, self.ENCRYPT)
            self._p_boxes[i] = l
            self._p_boxes[i + 1] = r
 
        # Chain replace the s-boxes
        for i in xrange(len(self._s_boxes)):
            for j in xrange(0, len(self._s_boxes[i]), 2):
                (l, r) = self.cipher(l, r, self.ENCRYPT)
                self._s_boxes[i][j] = l
                self._s_boxes[i][j + 1] = r

Example 2

Project: pyomo
Source File: model.py
View license
def to_standard_form(self):
    """
    Produces a standard-form representation of the model. Returns
    the coefficient matrix (A), the cost vector (c), and the
    constraint vector (b), where the 'standard form' problem is

    min/max c'x
    s.t.    Ax = b
            x >= 0

    All three returned values are instances of the array.array
    class, and store Python floats (C doubles).
    """

    from pyomo.repn import generate_canonical_repn


    # We first need to create an map of all variables to their column
    # number
    colID = {}
    ID2name = {}
    id = 0
    tmp = self.variables().keys()
    tmp.sort()

    for v in tmp:
        colID[v] = id
        ID2name[id] = v
        id += 1

    # First we go through the constraints and introduce slack and excess
    # variables to eliminate inequality constraints
    #
    # N.B. Structure heirarchy:
    #
    # active_components: {class: {attr_name: object}}
    # object -> Constraint: ._data: {ndx: _ConstraintData}
    # _ConstraintData: .lower, .body, .upper
    #
    # So, altogether, we access a lower bound via
    #
    # model.component_map(active=True)[Constraint]['con_name']['index'].lower
    #
    # {le,ge,eq}Constraints are
    # {constraint_name: {index: {variable_or_none: coefficient}} objects
    # that represent each constraint. None in the innermost dictionary
    # represents the constant term.
    #
    # i.e.
    #
    # min  x1 + 2*x2 +          x4
    # s.t. x1                         = 1
    #           x2   + 3*x3          <= -1
    #      x1 +                 x4   >= 3
    #      x1 + 2*x2 +      +   3*x4 >= 0
    #
    #
    # would be represented as (modulo the names of the variables,
    # constraints, and indices)
    #
    # eqConstraints = {'c1': {None: {'x1':1, None:-1}}}
    # leConstraints = {'c2': {None: {'x2':1, 'x3':3, None:1}}}
    # geConstraints = {'c3': {None: {'x1':1, 'x4':1, None:-3}},
    #                  'c4': {None: {'x1':1, 'x2':2, 'x4':1, None:0}}}
    #
    # Note the we have the luxury of dealing only with linear terms.
    var_id_map = {}
    leConstraints = {}
    geConstraints = {}
    eqConstraints = {}
    objectives = {}
    # For each registered component
    for c in self.component_map(active=True):

        # Get all subclasses of Constraint
        if issubclass(c, Constraint):
            cons = self.component_map(c, active=True)

            # Get the name of the constraint, and the constraint set itself
            for con_set_name in cons:
                con_set = cons[con_set_name]

                # For each indexed constraint in the constraint set
                for ndx in con_set._data:
                    con = con_set._data[ndx]

                    # Process the body
                    terms = self._process_canonical_repn(
                        generate_canonical_repn(con.body, var_id_map))

                    # Process the bounds of the constraint
                    if con.equality:
                        # Equality constraint, only check lower bound
                        lb = self._process_canonical_repn(
                            generate_canonical_repn(con.lower, var_id_map))

                        # Update terms
                        for k in lb:
                            v = lb[k]
                            if k in terms:
                                terms[k] -= v
                            else:
                                terms[k] = -v

                        # Add constraint to equality constraints
                        eqConstraints[(con_set_name, ndx)] = terms
                    else:

                        # Process upper bounds (<= constraints)
                        if con.upper is not None:
                            # Less than or equal to constraint
                            tmp = dict(terms)

                            ub = self._process_canonical_repn(
                                generate_canonical_repn(con.upper, var_id_map))

                            # Update terms
                            for k in ub:
                                if k in terms:
                                    tmp[k] -= ub[k]
                                else:
                                    tmp[k] = -ub[k]

                            # Add constraint to less than or equal to
                            # constraints
                            leConstraints[(con_set_name, ndx)] = tmp

                        # Process lower bounds (>= constraints)
                        if con.lower is not None:
                            # Less than or equal to constraint
                            tmp = dict(terms)

                            lb = self._process_canonical_repn(
                                generate_canonical_repn(con.lower, var_id_map))

                            # Update terms
                            for k in lb:
                                if k in terms:
                                    tmp[k] -= lb[k]
                                else:
                                    tmp[k] = -lb[k]

                            # Add constraint to less than or equal to
                            # constraints
                            geConstraints[(con_set_name, ndx)] = tmp
        elif issubclass(c, Objective):
            # Process objectives
            objs = self.component_map(c, active=True)

            # Get the name of the objective, and the objective set itself
            for obj_set_name in objs:
                obj_set = objs[obj_set_name]

                # For each indexed objective in the objective set
                for ndx in obj_set._data:
                    obj = obj_set._data[ndx]
                    # Process the objective
                    terms = self._process_canonical_repn(
                        generate_canonical_repn(obj.expr, var_id_map))

                    objectives[(obj_set_name, ndx)] = terms


    # We now have all the constraints. Add a slack variable for every
    # <= constraint and an excess variable for every >= constraint.
    nSlack = len(leConstraints)
    nExcess = len(geConstraints)

    nConstraints = len(leConstraints) + len(geConstraints) + \
                   len(eqConstraints)
    nVariables = len(colID) + nSlack + nExcess
    nRegVariables = len(colID)

    # Make the arrays
    coefficients = array.array("d", [0]*nConstraints*nVariables)
    constraints = array.array("d", [0]*nConstraints)
    costs = array.array("d", [0]*nVariables)

    # Populate the coefficient matrix
    constraintID = 0

    # Add less than or equal to constraints
    for ndx in leConstraints:
        con = leConstraints[ndx]
        for termKey in con:
            coef = con[termKey]

            if termKey is None:
                # Constraint coefficient
                constraints[constraintID] = -coef
            else:
                # Variable coefficient
                col = colID[termKey]
                coefficients[constraintID*nVariables + col] = coef

        # Add the slack
        coefficients[constraintID*nVariables + nRegVariables + \
                    constraintID] = 1
        constraintID += 1

    # Add greater than or equal to constraints
    for ndx in geConstraints:
        con = geConstraints[ndx]
        for termKey in con:
            coef = con[termKey]

            if termKey is None:
                # Constraint coefficient
                constraints[constraintID] = -coef
            else:
                # Variable coefficient
                col = colID[termKey]
                coefficients[constraintID*nVariables + col] = coef

        # Add the slack
        coefficients[constraintID*nVariables + nRegVariables + \
                    constraintID] = -1
        constraintID += 1

    # Add equality constraints
    for ndx in eqConstraints:
        con = eqConstraints[ndx]
        for termKey in con:
            coef = con[termKey]

            if termKey is None:
                # Constraint coefficient
                constraints[constraintID] = -coef
            else:
                # Variable coefficient
                col = colID[termKey]
                coefficients[constraintID*nVariables + col] = coef

        constraintID += 1

    # Determine cost coefficients
    for obj_name in objectives:
        obj = objectives[obj_name]()
        for var in obj:
            costs[colID[var]] = obj[var]

    # Print the model
    #
    # The goal is to print
    #
    #         var1   var2   var3   ...
    #       +--                     --+
    #       | cost1  cost2  cost3  ...|
    #       +--                     --+
    #       +--                     --+ +-- --+
    # con1  | coef11 coef12 coef13 ...| | eq1 |
    # con2  | coef21 coef22 coef23 ...| | eq2 |
    # con2  | coef31 coef32 coef33 ...| | eq3 |
    #  .    |   .      .      .   .   | |  .  |
    #  .    |   .      .      .    .  | |  .  |
    #  .    |   .      .      .     . | |  .  |

    constraintPadding = 2
    numFmt = "% 1.4f"
    altFmt = "% 1.1g"
    maxColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0))
    maxConstraintColWidth = max(len(numFmt % 0.0), len(altFmt % 0.0))

    # Generate constraint names
    maxConNameLen = 0
    conNames = []
    for name in leConstraints:
        strName = str(name)
        if len(strName) > maxConNameLen:
            maxConNameLen = len(strName)
        conNames.append(strName)
    for name in geConstraints:
        strName = str(name)
        if len(strName) > maxConNameLen:
            maxConNameLen = len(strName)
        conNames.append(strName)
    for name in eqConstraints:
        strName = str(name)
        if len(strName) > maxConNameLen:
            maxConNameLen = len(strName)
        conNames.append(strName)

    # Generate the variable names
    varNames = [None]*len(colID)
    for name in colID:
        tmp_name = " " + name
        if len(tmp_name) > maxColWidth:
            maxColWidth = len(tmp_name)
        varNames[colID[name]] = tmp_name
    for i in xrange(0, nSlack):
        tmp_name = " _slack_%i" % i
        if len(tmp_name) > maxColWidth:
            maxColWidth = len(tmp_name)
        varNames.append(tmp_name)
    for i in xrange(0, nExcess):
        tmp_name = " _excess_%i" % i
        if len(tmp_name) > maxColWidth:
            maxColWidth = len(tmp_name)
        varNames.append(tmp_name)

    # Variable names
    line = " "*maxConNameLen + (" "*constraintPadding) + " "
    for col in xrange(0, nVariables):
        # Format entry
        token = varNames[col]

        # Pad with trailing whitespace
        token += " "*(maxColWidth - len(token))

        # Add to line
        line += " " + token + " "
    print(line+'\n')

    # Cost vector
    print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \
          " "*((maxColWidth+2)*nVariables - 4) + "--+" + '\n')
    line = " "*maxConNameLen + (" "*constraintPadding) + "|"
    for col in xrange(0, nVariables):
        # Format entry
        token = numFmt % costs[col]
        if len(token) > maxColWidth:
            token = altFmt % costs[col]

        # Pad with trailing whitespace
        token += " "*(maxColWidth - len(token))

        # Add to line
        line += " " + token + " "
    line += "|"
    print(line+'\n')
    print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \
          " "*((maxColWidth+2)*nVariables - 4) + "--+"+'\n')

    # Constraints
    print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \
          " "*((maxColWidth+2)*nVariables - 4) + "--+" + \
          (" "*constraintPadding) + "+--" + \
          (" "*(maxConstraintColWidth-1)) + "--+"+'\n')
    for row in xrange(0, nConstraints):
        # Print constraint name
        line = conNames[row] + (" "*constraintPadding) + (" "*(maxConNameLen - len(conNames[row]))) + "|"

        # Print each coefficient
        for col in xrange(0, nVariables):
            # Format entry
            token = numFmt % coefficients[nVariables*row + col]
            if len(token) > maxColWidth:
                token = altFmt % coefficients[nVariables*row + col]

            # Pad with trailing whitespace
            token += " "*(maxColWidth - len(token))

            # Add to line
            line += " " + token + " "

        line += "|" + (" "*constraintPadding) + "|"

        # Add constraint vector
        token = numFmt % constraints[row]
        if len(token) > maxConstraintColWidth:
            token = altFmt % constraints[row]

        # Pad with trailing whitespace
        token += " "*(maxConstraintColWidth - len(token))

        line += " " + token + "  |"
        print(line+'\n')
    print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \
          " "*((maxColWidth+2)*nVariables - 4) + "--+" + \
          (" "*constraintPadding) + "+--" + (" "*(maxConstraintColWidth-1))\
          + "--+"+'\n')

    return (coefficients, costs, constraints)

Example 3

Project: pyomo
Source File: matrix.py
View license
def compile_block_linear_constraints(parent_block,
                                     constraint_name,
                                     skip_trivial_constraints=False,
                                     single_precision_storage=False,
                                     verbose=False,
                                     descend_into=True):

    if verbose:
        print("")
        print("Compiling linear constraints on block with name: %s"
              % (parent_block.name))

    if not parent_block.is_constructed():
        raise RuntimeError(
            "Attempting to compile block '%s' with unconstructed "
            "component(s)" % (parent_block.name))

    #
    # Linear MatrixConstraint in CSR format
    #
    SparseMat_pRows = []
    SparseMat_jCols = []
    SparseMat_Vals = []
    Ranges = []
    RangeTypes = []

    def _get_bound(exp):
        if exp is None:
            return None
        if is_fixed(exp):
            return value(exp)
        raise ValueError("non-fixed bound: " + str(exp))

    start_time = time.time()
    if verbose:
        print("Sorting active blocks...")

    sortOrder = SortComponents.indices | SortComponents.alphabetical
    all_blocks = [_b for _b in parent_block.block_data_objects(
        active=True,
        sort=sortOrder,
        descend_into=descend_into)]

    stop_time = time.time()
    if verbose:
        print("Time to sort active blocks: %.2f seconds"
              % (stop_time-start_time))

    start_time = time.time()
    if verbose:
        print("Collecting variables on active blocks...")

    #
    # First Pass: assign each variable a deterministic id
    #             (an index in a list)
    #
    VarSymbolToVarObject = []
    for block in all_blocks:
        VarSymbolToVarObject.extend(
            block.component_data_objects(Var,
                                         sort=sortOrder,
                                         descend_into=False))
    VarIDToVarSymbol = \
        dict((id(vardata), index)
             for index, vardata in enumerate(VarSymbolToVarObject))

    stop_time = time.time()
    if verbose:
        print("Time to collect variables on active blocks: %.2f seconds"
              % (stop_time-start_time))

    start_time = time.time()
    if verbose:
        print("Compiling active linear constraints...")

    #
    # Second Pass: collect and remove active linear constraints
    #
    constraint_data_to_remove = []
    empty_constraint_containers_to_remove = []
    constraint_containers_to_remove = []
    constraint_containers_to_check = set()
    referenced_variable_symbols = set()
    nnz = 0
    nrows = 0
    SparseMat_pRows = [0]
    for block in all_blocks:

        if hasattr(block, '_canonical_repn'):
            del block._canonical_repn
        if hasattr(block, '_ampl_repn'):
            del block._ampl_repn

        for constraint in block.component_objects(Constraint,
                                                  active=True,
                                                  sort=sortOrder,
                                                  descend_into=False):

            assert not isinstance(constraint, MatrixConstraint)

            if len(constraint) == 0:

                empty_constraint_containers_to_remove.append((block, constraint))

            else:

                singleton = isinstance(constraint, SimpleConstraint)

                for index, constraint_data in iteritems(constraint):

                    if constraint_data.body.polynomial_degree() <= 1:

                        # collect for removal
                        if singleton:
                            constraint_containers_to_remove.append((block, constraint))
                        else:
                            constraint_data_to_remove.append((constraint, index))
                            constraint_containers_to_check.add((block, constraint))

                        canonical_repn = generate_canonical_repn(constraint_data.body)

                        assert isinstance(canonical_repn, LinearCanonicalRepn)

                        row_variable_symbols = []
                        row_coefficients = []
                        if canonical_repn.variables is None:
                            if skip_trivial_constraints:
                                continue
                        else:
                            row_variable_symbols = \
                                [VarIDToVarSymbol[id(vardata)]
                                 for vardata in canonical_repn.variables]
                            referenced_variable_symbols.update(
                                row_variable_symbols)
                            assert canonical_repn.linear is not None
                            row_coefficients = canonical_repn.linear

                        SparseMat_pRows.append(SparseMat_pRows[-1] + \
                                               len(row_variable_symbols))
                        SparseMat_jCols.extend(row_variable_symbols)
                        SparseMat_Vals.extend(row_coefficients)

                        nnz += len(row_variable_symbols)
                        nrows += 1

                        L = _get_bound(constraint_data.lower)
                        U = _get_bound(constraint_data.upper)
                        constant = value(canonical_repn.constant)
                        if constant is None:
                            constant = 0

                        Ranges.append(L - constant if (L is not None) else 0)
                        Ranges.append(U - constant if (U is not None) else 0)
                        if (L is not None) and \
                           (U is not None) and \
                           (not constraint_data.equality):
                            RangeTypes.append(MatrixConstraint.LowerBound |
                                              MatrixConstraint.UpperBound)
                        elif constraint_data.equality:
                            RangeTypes.append(MatrixConstraint.Equality)
                        elif L is not None:
                            assert U is None
                            RangeTypes.append(MatrixConstraint.LowerBound)
                        else:
                            assert U is not None
                            RangeTypes.append(MatrixConstraint.UpperBound)

                        # Start freeing up memory
                        constraint_data.set_value(None)

    ncols = len(referenced_variable_symbols)

    stop_time = time.time()
    if verbose:
        print("Time to compile active linear constraints: %.2f seconds"
              % (stop_time-start_time))

    start_time = time.time()
    if verbose:
        print("Removing compiled constraint objects...")

    #
    # Remove compiled constraints
    #
    constraints_removed = 0
    constraint_containers_removed = 0
    for block, constraint in empty_constraint_containers_to_remove:
        block.del_component(constraint)
        constraint_containers_removed += 1
    for constraint, index in constraint_data_to_remove:
        del constraint[index]
        constraints_removed += 1
    for block, constraint in constraint_containers_to_remove:
        block.del_component(constraint)
        constraints_removed += 1
        constraint_containers_removed += 1
    for block, constraint in constraint_containers_to_check:
        if len(constraint) == 0:
            block.del_component(constraint)
            constraint_containers_removed += 1

    stop_time = time.time()
    if verbose:
        print("Eliminated %s constraints and %s Constraint container objects"
              % (constraints_removed, constraint_containers_removed))
        print("Time to remove compiled constraint objects: %.2f seconds"
              % (stop_time-start_time))

    start_time = time.time()
    if verbose:
        print("Assigning variable column indices...")

    #
    # Assign a column index to the set of referenced variables
    #
    ColumnIndexToVarSymbol = sorted(referenced_variable_symbols)
    VarSymbolToColumnIndex = dict((symbol, column)
                                  for column, symbol in enumerate(ColumnIndexToVarSymbol))
    SparseMat_jCols = [VarSymbolToColumnIndex[symbol] for symbol in SparseMat_jCols]
    del VarSymbolToColumnIndex
    ColumnIndexToVarObject = [VarSymbolToVarObject[var_symbol]
                              for var_symbol in ColumnIndexToVarSymbol]

    stop_time = time.time()
    if verbose:
        print("Time to assign variable column indices: %.2f seconds"
              % (stop_time-start_time))

    start_time = time.time()
    if verbose:
        print("Converting compiled constraint data to array storage...")
        print("  - Using %s precision for numeric values"
              % ('single' if single_precision_storage else 'double'))

    #
    # Convert to array storage
    #

    number_storage = 'f' if single_precision_storage else 'd'
    SparseMat_pRows = array.array('L', SparseMat_pRows)
    SparseMat_jCols = array.array('L', SparseMat_jCols)
    SparseMat_Vals = array.array(number_storage, SparseMat_Vals)
    Ranges = array.array(number_storage, Ranges)
    RangeTypes = array.array('B', RangeTypes)

    stop_time = time.time()
    if verbose:
        storage_bytes = \
            SparseMat_pRows.buffer_info()[1] * SparseMat_pRows.itemsize + \
            SparseMat_jCols.buffer_info()[1] * SparseMat_jCols.itemsize + \
            SparseMat_Vals.buffer_info()[1] * SparseMat_Vals.itemsize + \
            Ranges.buffer_info()[1] * Ranges.itemsize + \
            RangeTypes.buffer_info()[1] * RangeTypes.itemsize
        print("Sparse Matrix Dimension:")
        print("  - Rows: "+str(nrows))
        print("  - Cols: "+str(ncols))
        print("  - Nonzeros: "+str(nnz))
        print("Compiled Data Storage: "+str(_label_bytes(storage_bytes)))
        print("Time to convert compiled constraint data to "
              "array storage: %.2f seconds" % (stop_time-start_time))

    parent_block.add_component(constraint_name,
                               MatrixConstraint(nrows, ncols, nnz,
                                                SparseMat_pRows,
                                                SparseMat_jCols,
                                                SparseMat_Vals,
                                                Ranges,
                                                RangeTypes,
                                                ColumnIndexToVarObject))

Example 4

Project: scikit-learn
Source File: random.py
View license
def random_choice_csc(n_samples, classes, class_probability=None,
                      random_state=None):
    """Generate a sparse random matrix given column class distributions

    Parameters
    ----------
    n_samples : int,
        Number of samples to draw in each column.

    classes : list of size n_outputs of arrays of size (n_classes,)
        List of classes for each column.

    class_probability : list of size n_outputs of arrays of size (n_classes,)
        Optional (default=None). Class distribution of each column. If None the
        uniform distribution is assumed.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    random_matrix : sparse csc matrix of size (n_samples, n_outputs)

    """
    data = array.array('i')
    indices = array.array('i')
    indptr = array.array('i', [0])

    for j in range(len(classes)):
        classes[j] = np.asarray(classes[j])
        if classes[j].dtype.kind != 'i':
            raise ValueError("class dtype %s is not supported" %
                             classes[j].dtype)
        classes[j] = astype(classes[j], np.int64, copy=False)

        # use uniform distribution if no class_probability is given
        if class_probability is None:
            class_prob_j = np.empty(shape=classes[j].shape[0])
            class_prob_j.fill(1 / classes[j].shape[0])
        else:
            class_prob_j = np.asarray(class_probability[j])

        if np.sum(class_prob_j) != 1.0:
            raise ValueError("Probability array at index {0} does not sum to "
                             "one".format(j))

        if class_prob_j.shape[0] != classes[j].shape[0]:
            raise ValueError("classes[{0}] (length {1}) and "
                             "class_probability[{0}] (length {2}) have "
                             "different length.".format(j,
                                                        classes[j].shape[0],
                                                        class_prob_j.shape[0]))

        # If 0 is not present in the classes insert it with a probability 0.0
        if 0 not in classes[j]:
            classes[j] = np.insert(classes[j], 0, 0)
            class_prob_j = np.insert(class_prob_j, 0, 0.0)

        # If there are nonzero classes choose randomly using class_probability
        rng = check_random_state(random_state)
        if classes[j].shape[0] > 1:
            p_nonzero = 1 - class_prob_j[classes[j] == 0]
            nnz = int(n_samples * p_nonzero)
            ind_sample = sample_without_replacement(n_population=n_samples,
                                                    n_samples=nnz,
                                                    random_state=random_state)
            indices.extend(ind_sample)

            # Normalize probabilites for the nonzero elements
            classes_j_nonzero = classes[j] != 0
            class_probability_nz = class_prob_j[classes_j_nonzero]
            class_probability_nz_norm = (class_probability_nz /
                                         np.sum(class_probability_nz))
            classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
                                          rng.rand(nnz))
            data.extend(classes[j][classes_j_nonzero][classes_ind])
        indptr.append(len(indices))

    return sp.csc_matrix((data, indices, indptr),
                         (n_samples, len(classes)),
                         dtype=int)

Example 5

View license
def time_layer( numEpochs, batchSize, inputPlanes, inputSize, outputPlanes, filterSize ):
    print('building network...')
    net = PyDeepCL.NeuralNet( inputPlanes, inputSize )
#    net.addLayer( PyDeepCL.ConvolutionalMaker().numFilters(inputPlanes)
#        .filterSize(1).padZeros().biased().linear() ) # this is just to make sure that gradient needs to be 
#                                                      # backproped through next layer
    net.addLayer( PyDeepCL.ForceBackpropMaker() ) # this forces the next layer to backprop gradients to
                          # this layer
    net.addLayer( PyDeepCL.ConvolutionalMaker().numFilters(outputPlanes)
        .filterSize(filterSize).biased().linear() )
    net.addLayer( PyDeepCL.FullyConnectedMaker().numPlanes(1).imageSize(1) )
    net.addLayer( PyDeepCL.SoftMaxMaker() )
    print( net.asString() )

    images = array.array( 'f', [0] * (batchSize*inputPlanes*inputSize*inputSize) )
    for i in range( batchSize*inputPlanes*inputSize*inputSize ):
        images[i] = random.random() - 0.5
#    grad = array.array('f',[0] * batchSize * outputPlanes * (inputSize - filterSize + 1) )
#    for i in range( batchSize * outputPlanes * (inputSize - filterSize + 1) ):
#        grad[i] = random.random() - 0.5
    labels = array.array('i',[0] * batchSize )
    
    print('warming up...')
    #try:
    net.setBatchSize(batchSize)

    # warm up forward
    for i in range(8):
        last = time.time()
        net.propagate( images )
        now = time.time()
        print('  warm up propagate all-layer time', now - last )
        last = now
    net.backPropFromLabels( 0.001, labels )
    now = time.time()
    print('   warm up backprop all-layer time', now - last )
    last = now

    layer = net.getLayer(2)
    print('running forward prop timings:')
    for i in range(numEpochs):
        layer.propagate()
    now = time.time()
    print('forward layer total time', now - last )
    print('forward layer average time', ( now - last ) / float(numEpochs) )
    writeResults( layer.asString() + ', forward: ' + str( ( now - last ) / float(numEpochs) * 1000 ) + 'ms' )

    print('warm up backwards again')
    layer.backProp(0.001)
    layer.backProp(0.001)
    print('warm up backwards done. start timings:')

    now = time.time()
    last = now
    for i in range(numEpochs):
        layer.backProp(0.001)
    now = time.time()
    print('backwar layer total time', now - last )
    print('backwar layer average time', ( now - last ) / float(numEpochs) )
    writeResults( layer.asString() + ', backward: ' + str( ( now - last ) / float(numEpochs) * 1000 ) + 'ms' )
    last = now

Example 6

Project: mhff
Source File: tex.py
View license
def convert_tex(tex_file, png_file=None, ignore_alpha=False):
    tex = open(tex_file, 'rb')

    header = array.array('I', tex.read(16))
    if header[0] != 0x584554:
        raise ValueError('not a TEX file')

    constant = header[1] & 0xfff
    if constant not in [0xa5, 0xa6]:
        raise ValueError('unknown constant')
    unknown1 = (header[1] >> 12) & 0xfff
    size_shift = (header[1] >> 24) & 0xf # always == 0
    unknown2 = (header[1] >> 28) & 0xf # 2 = normal, 6 = cube map

    mipmap_count = header[2] & 0x3f
    width = (header[2] >> 6) & 0x1fff
    height = (header[2] >> 19) & 0x1fff

    texture_count = header[3] & 0xff
    color_type = (header[3] >> 8) & 0xff
    unknown3 = (header[3] >> 16) & 0x1fff # always == 1

    if unknown2 == 6:
        cube_map_junk = tex.read(0x6c) # data related to cube maps in some way
        height *= 6
    offsets = array.array('I', tex.read(4*mipmap_count*texture_count))
    pixel_data_start = tex.tell()
    pixel_data = None
    if mipmap_count > 1:
        pixel_data = tex.read(offsets[1] - offsets[0])
        if unknown2 == 6:
            for i in range(6):
                tex.seek(pixel_data_start + offsets[i*mipmap_count])
                pixel_data += tex.read(offsets[i*mipmap_count+1] - offsets[i*mipmap_count])
    else:
        pixel_data = tex.read()

    tex.close()

    if png_file is None:
        png_file = '{}.png'.format(tex_file)

    if color_type == 1:
        image = Image.frombytes('RGBA', (width, height), deblock(width, 4, decode_4444(pixel_data)), 'raw', 'ABGR')
        if ignore_alpha:
            image = image.convert('RGB')
        image.save(png_file)
    elif color_type == 2:
        image = Image.frombytes('RGBA', (width, height), deblock(width, 4, decode_1555(pixel_data)), 'raw', 'ABGR')
        if ignore_alpha:
            image = image.convert('RGB')
        image.save(png_file)
    elif color_type == 3:
        image = Image.frombytes('RGBA', (width, height), deblock(width, 4, pixel_data), 'raw', 'ABGR')
        if ignore_alpha:
            image = image.convert('RGB')
        image.save(png_file)
    elif color_type == 4:
        image = Image.frombytes('RGB', (width, height), deblock(width, 3, decode_565(pixel_data)), 'raw', 'BGR')
        image.save(png_file)
    elif color_type == 5: # format may not be correct
        image = Image.frombytes('L', (width, height), deblock(width, 1, pixel_data), 'raw', 'L')
        image.save(png_file)
    elif color_type == 7:
        pixel_data = array.array('H', pixel_data)
        pixel_data.byteswap()
        image = Image.frombytes('LA', (width, height), deblock(width, 2, pixel_data.tobytes()), 'raw', 'LA')
        image.save(png_file)
    elif color_type == 11:
        image = Image.frombytes('RGBA', (width, height), decode_etc1(pixel_data, width), 'raw', 'RGBA')
        if ignore_alpha:
            image = image.convert('RGB')
        image.save(png_file)
    elif color_type == 12:
        image = Image.frombytes('RGBA', (width, height), decode_etc1(pixel_data, width, True), 'raw', 'RGBA')
        if ignore_alpha:
            image = image.convert('RGB')
        image.save(png_file)
    elif color_type == 14: # format may not be correct
        image = Image.frombytes('L', (width, height), deblock(width, 1, decode_4444(pixel_data)), 'raw', 'L')
        image.save(png_file)
    elif color_type == 15: # format may not be correct
        image = Image.frombytes('L', (width, height), deblock(width, 1, decode_4444(pixel_data)), 'raw', 'L')
        image.save(png_file)
    elif color_type == 16: # format may not be correct
        image = Image.frombytes('L', (width, height), deblock(width, 1, pixel_data), 'raw', 'L')
        image.save(png_file)
    elif color_type == 17:
        image = Image.frombytes('RGB', (width, height), deblock(width, 3, pixel_data), 'raw', 'BGR')
        image.save(png_file)
    else:
        raise ValueError('unknown texture color type')

Example 7

Project: mhff
Source File: tex_dds.py
View license
def convert_tex(tex_file, dds_file=None):
    tex = open(tex_file, 'rb')
    tex_header = array.array('I', tex.read(16))
    if tex_header[0] != 0x584554:
        raise ValueError('unknown magic')
    constant = tex_header[1] & 0xfff
    if constant not in [0xa5, 0xa6]:
        raise ValueError('unknown constant')
    unknown1 = (tex_header[1] >> 12) & 0xfff
    size_shift = (tex_header[1] >> 24) & 0xf # always == 0
    cube_map = (tex_header[1] >> 28) & 0xf # 2 = normal, 6 = cube map
    mipmap_count = tex_header[2] & 0x3f
    width = (tex_header[2] >> 6) & 0x1fff
    height = (tex_header[2] >> 19) & 0x1fff
    texture_count = tex_header[3] & 0xff
    color_type = (tex_header[3] >> 8) & 0xff
    if color_type not in (1, 2, 3, 4, 5, 7, 11, 12, 14, 15, 16, 17):
        raise ValueError('unknown color type')
    unknown3 = (tex_header[3] >> 16) & 0x1fff # always == 1
    dds_header = array.array('I', [0x20534444, 124, 0x100f, height, width, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0x1000, 0, 0, 0, 0])
    if cube_map == 6:
        dds_header[27] |= 0x8
        dds_header[28] = 0xfe00
        tex.seek(0x6c, os.SEEK_CUR) # data related to cube maps in some way
    offsets = array.array('I', tex.read(mipmap_count * texture_count * 4))
    pixel_data_start = tex.tell()
    pixel_data = []
    if mipmap_count > 1:
        dds_header[2] |= 0x20000
        dds_header[7] = mipmap_count
        dds_header[27] |= 0x400008
    if color_type == 1:
        dds_header[20] = 0x41
        dds_header[22] = 16
        dds_header[23] = 0xf000
        dds_header[24] = 0xf00
        dds_header[25] = 0xf0
        dds_header[26] = 0xf
    elif color_type == 2:
        dds_header[20] = 0x41
        dds_header[22] = 16
        dds_header[23] = 0xf800
        dds_header[24] = 0x7c0
        dds_header[25] = 0x3e
        dds_header[26] = 0x1
    elif color_type in (3, 11, 12):
        dds_header[20] = 0x41
        dds_header[22] = 32
        dds_header[23] = 0xff000000
        dds_header[24] = 0xff0000
        dds_header[25] = 0xff00
        dds_header[26] = 0xff
    elif color_type == 4:
        dds_header[20] = 0x40
        dds_header[22] = 16
        dds_header[23] = 0xf800
        dds_header[24] = 0x7e0
        dds_header[25] = 0x1f
    elif color_type in (5, 14, 15, 16): # format may not be correct
        dds_header[20] = 0x20000
        dds_header[22] = 8
        dds_header[23] = 0xff
    elif color_type == 7:
        dds_header[20] = 0x20001
        dds_header[22] = 16
        dds_header[23] = 0xff00
        dds_header[26] = 0xff
    elif color_type == 17:
        dds_header[20] = 0x40
        dds_header[22] = 24
        dds_header[23] = 0xff0000
        dds_header[24] = 0xff00
        dds_header[25] = 0xff
    dds_header[5] = (width * dds_header[22] + 7) // 8
    main_data_size = width * height
    if color_type in (11, 14, 15):
        main_data_size //= 2
    if color_type in (1, 2, 4, 7):
        main_data_size *= 2
    if color_type == 17:
        main_data_size *= 3
    if color_type == 3:
        main_data_size *= 4
    for i in range(mipmap_count):
        for j in range(texture_count):
            tex.seek(pixel_data_start + offsets[i * texture_count + j])
            data = tex.read(main_data_size // (1 << (i * 2)))
            if color_type in (14, 15):
                data = decode_half_byte(data)
            if color_type in (11, 12):
                data = decode_etc1(data, width // (1 << i), color_type == 12)
            else:
                data = deblock(width // (1 << i), dds_header[22] // 8, data)
            pixel_data.append(data)
    tex.close()
    if dds_file is None:
        dds_file = '{}.dds'.format(tex_file)
    dds = open(dds_file, 'wb')
    dds.write(dds_header.tobytes())
    for data in pixel_data:
        dds.write(data)
    dds.close()

Example 8

Project: multiprocess
Source File: __init__.py
View license
    def test_connection(self):
        conn, child_conn = self.Pipe()
        
        p = self.Process(target=self._echo, args=(child_conn,))
        p.setDaemon(True)
        p.start()

        seq = [1, 2.25, None]
        msg = 'hello world'
        longmsg = msg * 10
        arr = array.array('i', range(4))

        if self.TYPE == 'processes':
            self.assertEqual(type(conn.fileno()), int)

        self.assertEqual(conn.send(seq), None)
        self.assertEqual(conn.recv(), seq)

        self.assertEqual(conn.sendBytes(msg), None)
        self.assertEqual(conn.recvBytes(), msg)

        if self.TYPE == 'processes':
            buffer = array.array('i', [0]*10)
            expected = list(arr) + [0] * (10 - len(arr))
            self.assertEqual(conn.sendBytes(arr), None)
            self.assertEqual(conn.recvBytesInto(buffer),
                             len(arr) * buffer.itemsize)
            self.assertEqual(list(buffer), expected)

            buffer = array.array('i', [0]*10)
            expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
            self.assertEqual(conn.sendBytes(arr), None)
            self.assertEqual(conn.recvBytesInto(buffer, 3 * buffer.itemsize),
                             len(arr) * buffer.itemsize)
            self.assertEqual(list(buffer), expected)

            buffer = array.array('c', ' ' * 40)
            self.assertEqual(conn.sendBytes(longmsg), None)
            try:
                res = conn.recvBytesInto(buffer)
            except processing.BufferTooShort, e:
                self.assertEqual(e.args, (longmsg,))
            else:
                self.fail('expected BufferTooShort, got %s' % res)

        poll = TimingWrapper(conn.poll)

        self.assertEqual(poll(), False)
        self.assertTimingAlmostEqual(poll.elapsed, 0)

        self.assertEqual(poll(TIMEOUT1), False)
        self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)

        conn.send(None)

        self.assertEqual(poll(TIMEOUT1), True)
        self.assertTimingAlmostEqual(poll.elapsed, 0)
        
        self.assertEqual(conn.recv(), None)

        really_big_msg = 'X' * (1024 * 1024 * 16)       # 16 megabytes
        conn.sendBytes(really_big_msg)
        self.assertEqual(conn.recvBytes(), really_big_msg)
        
        conn.sendBytes('')                              # tell child to quit
        child_conn.close()

        if self.TYPE == 'processes':
            self.assertRaises(EOFError, conn.recv)
            self.assertRaises(EOFError, conn.recvBytes)

        p.join()

Example 9

View license
	def __init__(self, rows, cols):
		self.rows = rows
		self.cols = cols

		# Initialize screen arrays
		self.screen = []
		self.rendition = []
		self.other_screen = []
		self.other_rendition = []
		self.alt_screen = False
		self.dirty = set()

		self.default_screen_line = array.array('u')
		self.default_rendition_line = array.array('I')
		for i in xrange(0, cols):
			self.default_screen_line.append(u' ')
			self.default_rendition_line.append(0)
		for i in xrange(0, rows):
			self.screen.append(array.array('u', self.default_screen_line))
			self.rendition.append(array.array('I', self.default_rendition_line))
			self.other_screen.append(array.array('u', self.default_screen_line))
			self.other_rendition.append(array.array('I', self.default_rendition_line))

		self.history_screen = []
		self.history_rendition = []

		self.active_rendition = 0

		self.cursor_row = 0
		self.cursor_col = 0
		self.cursor_visible = True
		self.tab_width = 8

		self.scroll_top = 0
		self.scroll_bottom = self.rows - 1

		self.saved_cursor_row = 0
		self.saved_cursor_col = 0

		self.saved_normal_cursor_row = 0
		self.saved_normal_cursor_col = 0
		self.saved_alt_cursor_row = 0
		self.saved_alt_cursor_col = 0

		self.escape_mode = False
		self.window_title_mode = False
		self.ignored_window_title = False
		self.line_draw = False
		self.utf8_buffer = ""
		self.utf8_len = 0
		self.unprocessed_input = u""
		self.application_cursor_keys = False
		self.insert_mode = False

		self.update_callback = None
		self.title_callback = None
		self.response_callback = None

		self.special_chars = {
			u'\x07': self.bell,
			u'\x08': self.backspace,
			u'\x09': self.horizontal_tab,
			u'\x0a': self.line_feed,
			u'\x0b': self.line_feed,
			u'\x0c': self.line_feed,
			u'\x0d': self.carriage_return
		}

		self.escape_sequences = {
			u'@': self.insert_chars,
			u'A': self.cursor_up,
			u'B': self.cursor_down,
			u'C': self.cursor_right,
			u'D': self.cursor_left,
			u'E': self.cursor_next_line,
			u'F': self.cursor_prev_line,
			u'G': self.set_cursor_col,
			u'`': self.set_cursor_col,
			u'd': self.set_cursor_row,
			u'H': self.move_cursor,
			u'f': self.move_cursor,
			u'I': self.cursor_right_tab,
			u'J': self.erase_screen,
			u'?J': self.erase_screen,
			u'K': self.erase_line,
			u'?K': self.erase_line,
			u'r': self.scroll_region,
			u'L': self.insert_lines,
			u'P': self.delete_chars,
			u'M': self.delete_lines,
			u'S': self.scroll_up_lines,
			u'T': self.scroll_down_lines,
			u'X': self.erase_chars,
			u'Z': self.cursor_left_tab,
			u'm': self.graphic_rendition,
			u'h': self.set_option,
			u'l': self.clear_option,
			u'?h': self.set_private_option,
			u'?l': self.clear_private_option,
			u'c': self.device_attr,
			u'>c': self.device_secondary_attr,
			u'n': self.device_status,
			u'?n': self.device_status,
			u'!p': self.soft_reset
		}

		self.charset_escapes = [u' ', u'#', u'%', u'(', u')', u'*', u'+']
		self.line_draw_map = {
			u'j': unicode('\xe2\x94\x98', 'utf8'),
			u'k': unicode('\xe2\x94\x90', 'utf8'),
			u'l': unicode('\xe2\x94\x8c', 'utf8'),
			u'm': unicode('\xe2\x94\x94', 'utf8'),
			u'n': unicode('\xe2\x94\xbc', 'utf8'),
			u'q': unicode('\xe2\x94\x80', 'utf8'),
			u't': unicode('\xe2\x94\x9c', 'utf8'),
			u'u': unicode('\xe2\x94\xa4', 'utf8'),
			u'v': unicode('\xe2\x94\xb4', 'utf8'),
			u'w': unicode('\xe2\x94\xac', 'utf8'),
			u'x': unicode('\xe2\x94\x82', 'utf8')
		}

Example 10

View license
	def resize(self, rows, cols):
		if rows > self.rows:
			# Adding rows
			for i in xrange(self.rows, rows):
				self.screen.append(array.array('u', self.default_screen_line))
				self.rendition.append(array.array('I', self.default_rendition_line))
				self.other_screen.append(array.array('u', self.default_screen_line))
				self.other_rendition.append(array.array('I', self.default_rendition_line))
		elif rows < self.rows:
			if self.alt_screen:
				# Alternate screen buffer is active
				normal_cursor_row = self.saved_normal_cursor_row
				if normal_cursor_row < rows:
					# Cursor is at top, remove lines from bottom
					self.other_screen = self.other_screen[:rows]
					self.other_rendition = self.other_rendition[:rows]
				else:
					# Cursor is at bottom, remove lines from top, and place them in the
					# history buffer
					for i in xrange(0, (normal_cursor_row + 1) - rows):
						screen_line = self.other_screen.pop(0)
						rendition_line = self.other_rendition.pop(0)
						self.history_screen.append(screen_line)
						self.history_rendition.append(rendition_line)
					self.other_screen = self.other_screen[:rows]
					self.other_rendition = self.other_rendition[:rows]
				self.screen = self.screen[:rows]
				self.rendition = self.rendition[:rows]
			else:
				# Normal screen buffer is active
				normal_cursor_row = self.cursor_row
				if normal_cursor_row < rows:
					# Cursor is at top, remove lines from bottom
					self.screen = self.screen[:rows]
					self.rendition = self.rendition[:rows]
				else:
					# Cursor is at bottom, remove lines from top, and place them in the
					# history buffer
					for i in xrange(0, (normal_cursor_row + 1) - rows):
						screen_line = self.screen.pop(0)
						rendition_line = self.rendition.pop(0)
						self.history_screen.append(screen_line)
						self.history_rendition.append(rendition_line)
					self.screen = self.screen[:rows]
					self.rendition = self.rendition[:rows]
				self.other_screen = self.other_screen[:rows]
				self.other_rendition = self.other_rendition[:rows]

		if cols > self.cols:
			# Adding columns
			for i in xrange(0, rows):
				for j in xrange(self.cols, cols):
					self.screen[i].append(u' ')
					self.rendition[i].append(0)
					self.other_screen[i].append(u' ')
					self.other_rendition[i].append(0)
			for j in xrange(self.cols, cols):
				self.default_screen_line.append(u' ')
				self.default_rendition_line.append(0)
		elif cols < self.cols:
			# Removing columns
			for i in xrange(0, rows):
				self.screen[i] = self.screen[i][0:cols]
				self.rendition[i] = self.rendition[i][0:cols]
				self.other_screen[i] = self.other_screen[i][0:cols]
				self.other_rendition[i] = self.other_rendition[i][0:cols]
			self.default_screen_line = self.default_screen_line[0:cols]
			self.default_rendition_line = self.default_rendition_line[0:cols]

		self.rows = rows
		self.cols = cols

		self.scroll_top = 0
		self.scroll_bottom = self.rows - 1

		# Ensure cursors are within bounds
		if self.cursor_col > cols:
			self.cursor_col = cols
		if self.cursor_row >= rows:
			self.cursor_row = rows - 1
		if self.saved_cursor_col > cols:
			self.saved_cursor_col = cols
		if self.saved_cursor_row >= rows:
			self.saved_cursor_row = rows - 1
		if self.saved_normal_cursor_col > cols:
			self.saved_normal_cursor_col = cols
		if self.saved_normal_cursor_row >= rows:
			self.saved_normal_cursor_row = rows - 1
		if self.saved_alt_cursor_col > cols:
			self.saved_alt_cursor_col = cols
		if self.saved_alt_cursor_row >= rows:
			self.saved_alt_cursor_row = rows - 1

		self.invalidate()
		if self.update_callback:
			self.update_callback()

Example 11

Project: python-periphery
Source File: spi.py
View license
    def _open(self, devpath, mode, max_speed, bit_order, bits_per_word, extra_flags):
        if not isinstance(devpath, str):
            raise TypeError("Invalid devpath type, should be string.")
        elif not isinstance(mode, int):
            raise TypeError("Invalid mode type, should be integer.")
        elif not isinstance(max_speed, (int, float)):
            raise TypeError("Invalid max_speed type, should be integer or float.")
        elif not isinstance(bit_order, str):
            raise TypeError("Invalid bit_order type, should be string.")
        elif not isinstance(bits_per_word, int):
            raise TypeError("Invalid bits_per_word type, should be integer.")
        elif not isinstance(extra_flags, int):
            raise TypeError("Invalid extra_flags type, should be integer.")

        if mode not in [0, 1, 2, 3]:
            raise ValueError("Invalid mode, can be 0, 1, 2, 3.")
        elif bit_order.lower() not in ["msb", "lsb"]:
            raise ValueError("Invalid bit_order, can be \"msb\" or \"lsb\".")
        elif bits_per_word < 0 or bits_per_word > 255:
            raise ValueError("Invalid bits_per_word, must be 0-255.")
        elif extra_flags < 0 or extra_flags > 255:
            raise ValueError("Invalid extra_flags, must be 0-255.")

        # Open spidev
        try:
            self._fd = os.open(devpath, os.O_RDWR)
        except OSError as e:
            raise SPIError(e.errno, "Opening SPI device: " + e.strerror)

        self._devpath = devpath

        bit_order = bit_order.lower()

        # Set mode, bit order, extra flags
        buf = array.array("B", [mode | (SPI._SPI_LSB_FIRST if bit_order == "lsb" else 0) | extra_flags])
        try:
            fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MODE, buf, False)
        except OSError as e:
            raise SPIError(e.errno, "Setting SPI mode: " + e.strerror)

        # Set max speed
        buf = array.array("I", [int(max_speed)])
        try:
            fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MAX_SPEED_HZ, buf, False)
        except OSError as e:
            raise SPIError(e.errno, "Setting SPI max speed: " + e.strerror)

        # Set bits per word
        buf = array.array("B", [bits_per_word])
        try:
            fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_BITS_PER_WORD, buf, False)
        except OSError as e:
            raise SPIError(e.errno, "Setting SPI bits per word: " + e.strerror)

Example 12

Project: BitTornado
Source File: test_piecebuffer.py
View license
    def test_buffer(self):
        teststring = b'teststringofsomelength'
        shorterstring = b'shorterstring'

        x = PieceBuffer()
        x.append(teststring)

        # Basic functionality
        self.assertEqual(len(x), len(teststring))
        self.assertEqual(x[:], array.array('B', teststring))
        self.assertEqual(x[0], teststring[0])
        self.assertEqual(x[1:-1], array.array('B', teststring[1:-1]))

        # Optimization
        self.assertIs(x.buf, x[:])
        self.assertIs(x.buf, x[:len(teststring)])

        # Bounds checking
        with self.assertRaises(IndexError):
            x[-len(teststring) * 2]
        with self.assertRaises(IndexError):
            x[len(teststring) * 2]

        # Test range of [a:b] combinations
        bounds = [-10, -5, -2, -1, 0, 1, 2, 5, 10]
        for start in bounds:
            for stop in bounds:
                self.assertEqual(x[start:stop], x.buf[start:stop])

        # Re-initializing PieceBuffer retains buf attribute
        # but acts empty
        x.init()
        self.assertEqual(len(x), 0)
        self.assertEqual(x[:], array.array('B'))
        self.assertEqual(x.buf, array.array('B', teststring))
        with self.assertRaises(IndexError):
            x[0]

        # Test equal behavior despite distinct buffer contents
        y = PieceBuffer()
        x.append(shorterstring)
        y.append(shorterstring)
        self.assertEqual(x.length, y.length)

        # Bounds checking
        with self.assertRaises(IndexError):
            x[len(shorterstring)]
        with self.assertRaises(IndexError):
            y[len(shorterstring)]
        with self.assertRaises(IndexError):
            x[-len(shorterstring) - 1]
        with self.assertRaises(IndexError):
            y[-len(shorterstring) - 1]

        # Test range of [a:b] combinations
        bounds = [-10, -5, -2, -1, 0, 1, 2, 5, 10]
        for start in bounds:
            for stop in bounds:
                self.assertEqual(x[start:stop], y[start:stop])

Example 13

View license
    def testImageData(self):
        width = 256
        height = 256

        rPlane = array.array('B')
        rPlane.fromlist( [y%256 for y in range(0,height) for x in range(0,width)] )
        if sys.version_info[0] == 3:
            buffer = memoryview
        else:
            from __builtin__ import buffer
        rPlane = buffer(rPlane)

        gPlane = array.array('B')
        gPlane.fromlist( [y%256 for y in range(0,height) for x in range(width,0,-1)] )
        gPlane = buffer(gPlane)

        bPlane = array.array('B')
        bPlane.fromlist( [x%256 for y in range(0,height) for x in range(0,width)] )
        bPlane = buffer(bPlane)

        dataPlanes = (rPlane, gPlane, bPlane, None, None)

        # test planar, pre-made buffer
        i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0)
        self.assertTrue(i1)

        singlePlane = objc.allocateBuffer(width*height*3)
        for i in range(0, width*height):
            si = i * 3
            if sys.version_info[0] == 2:
                singlePlane[si] = rPlane[i]
                singlePlane[si+1] = gPlane[i]
                singlePlane[si+2] = bPlane[i]
            else:
                def as_byte(v):
                    if isinstance(v, int):
                        return v
                    else:
                        return ord(v)
                singlePlane[si] = as_byte(rPlane[i])
                singlePlane[si+1] = as_byte(gPlane[i])
                singlePlane[si+2] = as_byte(bPlane[i])

        dataPlanes = (singlePlane, None, None, None, None)
        # test non-planar, premade buffer
        i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)

        # test grey scale
        greyPlane = array.array('B')
        greyPlane.fromlist( [x%256 for x in range(0,height) for x in range(0,width)] )
        greyPlanes = (greyPlane, None, None, None, None)
        greyImage = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8)

        # test planar, NSBIR allocated buffer
        i3 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0)

        r,g,b,a,o = i3.getBitmapDataPlanes_()
        self.assertTrue(r)
        self.assertTrue(g)
        self.assertTrue(b)
        self.assertTrue(not a)
        self.assertTrue(not o)

        self.assertEqual(len(r), len(rPlane))
        self.assertEqual(len(g), len(gPlane))
        self.assertEqual(len(b), len(bPlane))

        r[0:len(r)] = rPlane[0:len(rPlane)]
        g[0:len(g)] = gPlane[0:len(gPlane)]
        b[0:len(b)] = bPlane[0:len(bPlane)]

        bitmapData = i2.bitmapData()

        self.assertEqual(len(bitmapData), len(singlePlane))
        try:
            memoryview
        except NameError:
            self.assertEqual(bitmapData, singlePlane)
        else:
            self.assertEqual(bitmapData.tobytes(),
                singlePlane)

        a = array.array('L', [255]*4)
        self.assertArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0)
        d = i2.getPixel_atX_y_(a, 1, 1)
        self.assertIs(a, d)

Example 14

Project: laf-fabric
Source File: model.py
View license
def model(origin, data_items, stamp):
    '''Augment the results of XML parsing by precomputing additional data structures.'''

    osep = ':' if origin[0] == 'a' else ''

    def model_x():
        stamp.Imsg("XML-IDS (inverse mapping)")
        for kind in ('n', 'e'):
            xi = (origin + osep + 'X' + kind + 'f', ())
            xr = (origin + osep + 'X' + kind + 'b', ())
            Names.deliver(make_inverse(data_items[Names.comp(*xi)]), xr, data_items)

    def model_regions():
        stamp.Imsg("NODES AND REGIONS")
        node_region_list = data_items[Names.comp(origin + osep + 'T00', ('node_region_list',))]
        n_node = len(node_region_list)

        stamp.Imsg("NODES ANCHOR BOUNDARIES")
        node_anchor_min = array.array('I', (0 for i in range(n_node)))
        node_anchor_max = array.array('I', (0 for i in range(n_node)))
        node_linked = array.array('I')
        region_begin = data_items[Names.comp(origin + osep + 'T00', ('region_begin',))]
        region_end = data_items[Names.comp(origin + osep + 'T00', ('region_end',))]
        node_anchor_list = []
        for node in range(n_node):
            links = node_region_list[node]
            if len(links) == 0:
                node_anchor_list.append([])
                continue
            node_linked.append(node)
            ranges = []
            for r in links:
                this_anchor_begin = region_begin[r]
                this_anchor_end = region_end[r]
                ranges.append((this_anchor_begin, this_anchor_end))
            norm_ranges = normalize_ranges(ranges)
            node_anchor_list.append(norm_ranges)
            node_anchor_min[node] = min(norm_ranges) + 1
            node_anchor_max[node] = max(norm_ranges) + 1
        (node_anchor, node_anchor_items) = arrayify(node_anchor_list)
        Names.deliver(node_anchor_min, (origin + osep + 'G00', ('node_anchor_min',)), data_items)
        Names.deliver(node_anchor_max, (origin + osep + 'G00', ('node_anchor_max',)), data_items)
        Names.deliver(node_anchor, (origin + osep + 'P00', ('node_anchor',)), data_items)
        Names.deliver(node_anchor_items, (origin + osep + 'P00', ('node_anchor_items',)), data_items)

        node_region_list = None
        del data_items[Names.comp(origin + osep + 'T00', ('region_begin',))]
        del data_items[Names.comp(origin + osep + 'T00', ('region_end',))]
        del data_items[Names.comp(origin + osep + 'T00', ('node_region_list',))]

        def interval(node): return (node_anchor_min[node], -node_anchor_max[node])

        stamp.Imsg("NODES SORTING BY REGIONS")
        node_sort = array.array('I', sorted(node_linked, key=interval))
        node_sort_inv = make_array_inverse(node_sort)
        Names.deliver(node_sort, (origin + osep + 'G00', ('node_sort',)), data_items)
        Names.deliver(node_sort_inv, (origin + osep + 'G00', ('node_sort_inv',)), data_items)

        stamp.Imsg("NODES EVENTS")
        anchor_max = max(node_anchor_max) - 1
        node_events = list([([],[],[]) for n in range(anchor_max + 1)])
        for n in node_sort:
            ranges = node_anchor_list[n]
            amin = ranges[0]
            amax = ranges[len(ranges)-1] 
            for (r, (a_start, a_end)) in enumerate(grouper(ranges, 2)):
                is_first = r == 0
                is_last = r == (len(ranges) / 2) - 1
                start_kind = 0 if is_first else 1 # 0 = start,   1 = resume
                end_kind = 3 if is_last else 2    # 2 = suspend, 3 = end
                if amin == amax: node_events[a_start][1].extend([(n, 0), (n,3)])
                else:
                    node_events[a_start][0].append((n, start_kind))
                    node_events[a_end][2].append((n, end_kind))
        node_events_n = array.array('I')
        node_events_k = array.array('I')
        node_events_a = list([[] for a in range(anchor_max + 1)])
        e_index = 0
        for (anchor, events) in enumerate(node_events):
            events[2].reverse()
            for main_kind in (2, 1, 0):
                for (node, kind) in events[main_kind]:
                    node_events_n.append(node)
                    node_events_k.append(kind)
                    node_events_a[anchor].append(e_index)
                    e_index += 1
        node_events = None
        (node_events, node_events_items) = arrayify(node_events_a)
        node_events_a = None
        Names.deliver(node_events_n, (origin + osep + 'P00', ('node_events_n',)), data_items)
        Names.deliver(node_events_k, (origin + osep + 'P00', ('node_events_k',)), data_items)
        Names.deliver(node_events, (origin + osep + 'P00', ('node_events',)), data_items)
        Names.deliver(node_events_items, (origin + osep + 'P00', ('node_events_items',)), data_items)
        node_anchor_list = None

    def model_conn():
        node_anchor_min = data_items[Names.comp('mG00', ('node_anchor_min',))]
        node_anchor_max = data_items[Names.comp('mG00', ('node_anchor_max',))]

        def interval(elem): return (node_anchor_min[elem[0]], -node_anchor_max[elem[0]])

        stamp.Imsg("CONNECTIVITY")
        edges_from = data_items[Names.comp('mG00', ('edges_from',))]
        edges_to = data_items[Names.comp('mG00', ('edges_to',))]
        labeled_edges = set()
        efeatures = set()
        for dkey in data_items:
            (dorigin, dgroup, dkind, ddir, dcomps) = Names.decomp_full(dkey)
            if dgroup != 'F' or dorigin != origin or dkind != 'e': continue
            efeatures.add((dkey, dcomps))
        for (dkey, feat) in efeatures:
            feature_map = data_items[dkey]
            connections = {}
            connectionsi = {}
            for (edge, fvalue) in feature_map.items():
                labeled_edges.add(edge)
                node_from = edges_from[edge]
                node_to = edges_to[edge]
                connections.setdefault(node_from, {})[node_to] = fvalue
                connectionsi.setdefault(node_to, {})[node_from] = fvalue
            Names.deliver(connections, (origin + osep + 'C0f', feat), data_items)
            Names.deliver(connectionsi, (origin + osep + 'C0b', feat), data_items)

        connections = {}
        connectionsi = {}
        if origin == 'm':
            for edge in range(len(edges_from)):
                if edge in labeled_edges: continue
                node_from = edges_from[edge]
                node_to = edges_to[edge]
                connections.setdefault(node_from, {})[node_to] = ''
                connectionsi.setdefault(node_to, {})[node_from] = ''
        elif origin[0] == 'a':
            for edge in range(len(edges_from)):
                if edge not in labeled_edges: continue
                node_from = edges_from[edge]
                node_to = edges_to[edge]
                connections.setdefault(node_from, {})[node_to] = ''
                connectionsi.setdefault(node_to, {})[node_from] = ''
        sfeature = Names.E_ANNOT_NON if origin == 'm' else Names.E_ANNOT_YES if origin[0] == 'a' else ''
        Names.deliver(connections, (origin + osep + 'C0f', sfeature), data_items)
        Names.deliver(connectionsi, (origin + osep + 'C0b', sfeature), data_items)

    if origin == 'm':
        model_x()
        model_regions()
    model_conn()

Example 15

Project: gmvault
Source File: blowfish.py
View license
    def __init__(self, key):
        """
        Creates an instance of blowfish using 'key' as the encryption key.
 
        Key is a string of bytes, used to seed calculations.
        Once the instance of the object is created, the key is no longer necessary.
        """
        if not self.KEY_MIN_LEN <= len(key) <= self.KEY_MAX_LEN:
            raise ValueError("Attempted to initialize Blowfish cipher with key of invalid length: %(len)i" % {
             'len': len(key),
            })
 
        self._p_boxes = array.array('I', [
            0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
            0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
            0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
            0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917,
            0x9216D5D9, 0x8979FB1B
        ])
 
        self._s_boxes = (
            array.array('I', [
                0xD1310BA6, 0x98DFB5AC, 0x2FFD72DB, 0xD01ADFB7,
                0xB8E1AFED, 0x6A267E96, 0xBA7C9045, 0xF12C7F99,
                0x24A19947, 0xB3916CF7, 0x0801F2E2, 0x858EFC16,
                0x636920D8, 0x71574E69, 0xA458FEA3, 0xF4933D7E,
                0x0D95748F, 0x728EB658, 0x718BCD58, 0x82154AEE,
                0x7B54A41D, 0xC25A59B5, 0x9C30D539, 0x2AF26013,
                0xC5D1B023, 0x286085F0, 0xCA417918, 0xB8DB38EF,
                0x8E79DCB0, 0x603A180E, 0x6C9E0E8B, 0xB01E8A3E,
                0xD71577C1, 0xBD314B27, 0x78AF2FDA, 0x55605C60,
                0xE65525F3, 0xAA55AB94, 0x57489862, 0x63E81440,
                0x55CA396A, 0x2AAB10B6, 0xB4CC5C34, 0x1141E8CE,
                0xA15486AF, 0x7C72E993, 0xB3EE1411, 0x636FBC2A,
                0x2BA9C55D, 0x741831F6, 0xCE5C3E16, 0x9B87931E,
                0xAFD6BA33, 0x6C24CF5C, 0x7A325381, 0x28958677,
                0x3B8F4898, 0x6B4BB9AF, 0xC4BFE81B, 0x66282193,
                0x61D809CC, 0xFB21A991, 0x487CAC60, 0x5DEC8032,
                0xEF845D5D, 0xE98575B1, 0xDC262302, 0xEB651B88,
                0x23893E81, 0xD396ACC5, 0x0F6D6FF3, 0x83F44239,
                0x2E0B4482, 0xA4842004, 0x69C8F04A, 0x9E1F9B5E,
                0x21C66842, 0xF6E96C9A, 0x670C9C61, 0xABD388F0,
                0x6A51A0D2, 0xD8542F68, 0x960FA728, 0xAB5133A3,
                0x6EEF0B6C, 0x137A3BE4, 0xBA3BF050, 0x7EFB2A98,
                0xA1F1651D, 0x39AF0176, 0x66CA593E, 0x82430E88,
                0x8CEE8619, 0x456F9FB4, 0x7D84A5C3, 0x3B8B5EBE,
                0xE06F75D8, 0x85C12073, 0x401A449F, 0x56C16AA6,
                0x4ED3AA62, 0x363F7706, 0x1BFEDF72, 0x429B023D,
                0x37D0D724, 0xD00A1248, 0xDB0FEAD3, 0x49F1C09B,
                0x075372C9, 0x80991B7B, 0x25D479D8, 0xF6E8DEF7,
                0xE3FE501A, 0xB6794C3B, 0x976CE0BD, 0x04C006BA,
                0xC1A94FB6, 0x409F60C4, 0x5E5C9EC2, 0x196A2463,
                0x68FB6FAF, 0x3E6C53B5, 0x1339B2EB, 0x3B52EC6F,
                0x6DFC511F, 0x9B30952C, 0xCC814544, 0xAF5EBD09,
                0xBEE3D004, 0xDE334AFD, 0x660F2807, 0x192E4BB3,
                0xC0CBA857, 0x45C8740F, 0xD20B5F39, 0xB9D3FBDB,
                0x5579C0BD, 0x1A60320A, 0xD6A100C6, 0x402C7279,
                0x679F25FE, 0xFB1FA3CC, 0x8EA5E9F8, 0xDB3222F8,
                0x3C7516DF, 0xFD616B15, 0x2F501EC8, 0xAD0552AB,
                0x323DB5FA, 0xFD238760, 0x53317B48, 0x3E00DF82,
                0x9E5C57BB, 0xCA6F8CA0, 0x1A87562E, 0xDF1769DB,
                0xD542A8F6, 0x287EFFC3, 0xAC6732C6, 0x8C4F5573,
                0x695B27B0, 0xBBCA58C8, 0xE1FFA35D, 0xB8F011A0,
                0x10FA3D98, 0xFD2183B8, 0x4AFCB56C, 0x2DD1D35B,
                0x9A53E479, 0xB6F84565, 0xD28E49BC, 0x4BFB9790,
                0xE1DDF2DA, 0xA4CB7E33, 0x62FB1341, 0xCEE4C6E8,
                0xEF20CADA, 0x36774C01, 0xD07E9EFE, 0x2BF11FB4,
                0x95DBDA4D, 0xAE909198, 0xEAAD8E71, 0x6B93D5A0,
                0xD08ED1D0, 0xAFC725E0, 0x8E3C5B2F, 0x8E7594B7,
                0x8FF6E2FB, 0xF2122B64, 0x8888B812, 0x900DF01C,
                0x4FAD5EA0, 0x688FC31C, 0xD1CFF191, 0xB3A8C1AD,
                0x2F2F2218, 0xBE0E1777, 0xEA752DFE, 0x8B021FA1,
                0xE5A0CC0F, 0xB56F74E8, 0x18ACF3D6, 0xCE89E299,
                0xB4A84FE0, 0xFD13E0B7, 0x7CC43B81, 0xD2ADA8D9,
                0x165FA266, 0x80957705, 0x93CC7314, 0x211A1477,
                0xE6AD2065, 0x77B5FA86, 0xC75442F5, 0xFB9D35CF,
                0xEBCDAF0C, 0x7B3E89A0, 0xD6411BD3, 0xAE1E7E49,
                0x00250E2D, 0x2071B35E, 0x226800BB, 0x57B8E0AF,
                0x2464369B, 0xF009B91E, 0x5563911D, 0x59DFA6AA,
                0x78C14389, 0xD95A537F, 0x207D5BA2, 0x02E5B9C5,
                0x83260376, 0x6295CFA9, 0x11C81968, 0x4E734A41,
                0xB3472DCA, 0x7B14A94A, 0x1B510052, 0x9A532915,
                0xD60F573F, 0xBC9BC6E4, 0x2B60A476, 0x81E67400,
                0x08BA6FB5, 0x571BE91F, 0xF296EC6B, 0x2A0DD915,
                0xB6636521, 0xE7B9F9B6, 0xFF34052E, 0xC5855664,
                0x53B02D5D, 0xA99F8FA1, 0x08BA4799, 0x6E85076A
            ]),
            array.array('I', [
                0x4B7A70E9, 0xB5B32944, 0xDB75092E, 0xC4192623,
                0xAD6EA6B0, 0x49A7DF7D, 0x9CEE60B8, 0x8FEDB266,
                0xECAA8C71, 0x699A17FF, 0x5664526C, 0xC2B19EE1,
                0x193602A5, 0x75094C29, 0xA0591340, 0xE4183A3E,
                0x3F54989A, 0x5B429D65, 0x6B8FE4D6, 0x99F73FD6,
                0xA1D29C07, 0xEFE830F5, 0x4D2D38E6, 0xF0255DC1,
                0x4CDD2086, 0x8470EB26, 0x6382E9C6, 0x021ECC5E,
                0x09686B3F, 0x3EBAEFC9, 0x3C971814, 0x6B6A70A1,
                0x687F3584, 0x52A0E286, 0xB79C5305, 0xAA500737,
                0x3E07841C, 0x7FDEAE5C, 0x8E7D44EC, 0x5716F2B8,
                0xB03ADA37, 0xF0500C0D, 0xF01C1F04, 0x0200B3FF,
                0xAE0CF51A, 0x3CB574B2, 0x25837A58, 0xDC0921BD,
                0xD19113F9, 0x7CA92FF6, 0x94324773, 0x22F54701,
                0x3AE5E581, 0x37C2DADC, 0xC8B57634, 0x9AF3DDA7,
                0xA9446146, 0x0FD0030E, 0xECC8C73E, 0xA4751E41,
                0xE238CD99, 0x3BEA0E2F, 0x3280BBA1, 0x183EB331,
                0x4E548B38, 0x4F6DB908, 0x6F420D03, 0xF60A04BF,
                0x2CB81290, 0x24977C79, 0x5679B072, 0xBCAF89AF,
                0xDE9A771F, 0xD9930810, 0xB38BAE12, 0xDCCF3F2E,
                0x5512721F, 0x2E6B7124, 0x501ADDE6, 0x9F84CD87,
                0x7A584718, 0x7408DA17, 0xBC9F9ABC, 0xE94B7D8C,
                0xEC7AEC3A, 0xDB851DFA, 0x63094366, 0xC464C3D2,
                0xEF1C1847, 0x3215D908, 0xDD433B37, 0x24C2BA16,
                0x12A14D43, 0x2A65C451, 0x50940002, 0x133AE4DD,
                0x71DFF89E, 0x10314E55, 0x81AC77D6, 0x5F11199B,
                0x043556F1, 0xD7A3C76B, 0x3C11183B, 0x5924A509,
                0xF28FE6ED, 0x97F1FBFA, 0x9EBABF2C, 0x1E153C6E,
                0x86E34570, 0xEAE96FB1, 0x860E5E0A, 0x5A3E2AB3,
                0x771FE71C, 0x4E3D06FA, 0x2965DCB9, 0x99E71D0F,
                0x803E89D6, 0x5266C825, 0x2E4CC978, 0x9C10B36A,
                0xC6150EBA, 0x94E2EA78, 0xA5FC3C53, 0x1E0A2DF4,
                0xF2F74EA7, 0x361D2B3D, 0x1939260F, 0x19C27960,
                0x5223A708, 0xF71312B6, 0xEBADFE6E, 0xEAC31F66,
                0xE3BC4595, 0xA67BC883, 0xB17F37D1, 0x018CFF28,
                0xC332DDEF, 0xBE6C5AA5, 0x65582185, 0x68AB9802,
                0xEECEA50F, 0xDB2F953B, 0x2AEF7DAD, 0x5B6E2F84,
                0x1521B628, 0x29076170, 0xECDD4775, 0x619F1510,
                0x13CCA830, 0xEB61BD96, 0x0334FE1E, 0xAA0363CF,
                0xB5735C90, 0x4C70A239, 0xD59E9E0B, 0xCBAADE14,
                0xEECC86BC, 0x60622CA7, 0x9CAB5CAB, 0xB2F3846E,
                0x648B1EAF, 0x19BDF0CA, 0xA02369B9, 0x655ABB50,
                0x40685A32, 0x3C2AB4B3, 0x319EE9D5, 0xC021B8F7,
                0x9B540B19, 0x875FA099, 0x95F7997E, 0x623D7DA8,
                0xF837889A, 0x97E32D77, 0x11ED935F, 0x16681281,
                0x0E358829, 0xC7E61FD6, 0x96DEDFA1, 0x7858BA99,
                0x57F584A5, 0x1B227263, 0x9B83C3FF, 0x1AC24696,
                0xCDB30AEB, 0x532E3054, 0x8FD948E4, 0x6DBC3128,
                0x58EBF2EF, 0x34C6FFEA, 0xFE28ED61, 0xEE7C3C73,
                0x5D4A14D9, 0xE864B7E3, 0x42105D14, 0x203E13E0,
                0x45EEE2B6, 0xA3AAABEA, 0xDB6C4F15, 0xFACB4FD0,
                0xC742F442, 0xEF6ABBB5, 0x654F3B1D, 0x41CD2105,
                0xD81E799E, 0x86854DC7, 0xE44B476A, 0x3D816250,
                0xCF62A1F2, 0x5B8D2646, 0xFC8883A0, 0xC1C7B6A3,
                0x7F1524C3, 0x69CB7492, 0x47848A0B, 0x5692B285,
                0x095BBF00, 0xAD19489D, 0x1462B174, 0x23820E00,
                0x58428D2A, 0x0C55F5EA, 0x1DADF43E, 0x233F7061,
                0x3372F092, 0x8D937E41, 0xD65FECF1, 0x6C223BDB,
                0x7CDE3759, 0xCBEE7460, 0x4085F2A7, 0xCE77326E,
                0xA6078084, 0x19F8509E, 0xE8EFD855, 0x61D99735,
                0xA969A7AA, 0xC50C06C2, 0x5A04ABFC, 0x800BCADC,
                0x9E447A2E, 0xC3453484, 0xFDD56705, 0x0E1E9EC9,
                0xDB73DBD3, 0x105588CD, 0x675FDA79, 0xE3674340,
                0xC5C43465, 0x713E38D8, 0x3D28F89E, 0xF16DFF20,
                0x153E21E7, 0x8FB03D4A, 0xE6E39F2B, 0xDB83ADF7
            ]),
            array.array('I', [
                0xE93D5A68, 0x948140F7, 0xF64C261C, 0x94692934,
                0x411520F7, 0x7602D4F7, 0xBCF46B2E, 0xD4A20068,
                0xD4082471, 0x3320F46A, 0x43B7D4B7, 0x500061AF,
                0x1E39F62E, 0x97244546, 0x14214F74, 0xBF8B8840,
                0x4D95FC1D, 0x96B591AF, 0x70F4DDD3, 0x66A02F45,
                0xBFBC09EC, 0x03BD9785, 0x7FAC6DD0, 0x31CB8504,
                0x96EB27B3, 0x55FD3941, 0xDA2547E6, 0xABCA0A9A,
                0x28507825, 0x530429F4, 0x0A2C86DA, 0xE9B66DFB,
                0x68DC1462, 0xD7486900, 0x680EC0A4, 0x27A18DEE,
                0x4F3FFEA2, 0xE887AD8C, 0xB58CE006, 0x7AF4D6B6,
                0xAACE1E7C, 0xD3375FEC, 0xCE78A399, 0x406B2A42,
                0x20FE9E35, 0xD9F385B9, 0xEE39D7AB, 0x3B124E8B,
                0x1DC9FAF7, 0x4B6D1856, 0x26A36631, 0xEAE397B2,
                0x3A6EFA74, 0xDD5B4332, 0x6841E7F7, 0xCA7820FB,
                0xFB0AF54E, 0xD8FEB397, 0x454056AC, 0xBA489527,
                0x55533A3A, 0x20838D87, 0xFE6BA9B7, 0xD096954B,
                0x55A867BC, 0xA1159A58, 0xCCA92963, 0x99E1DB33,
                0xA62A4A56, 0x3F3125F9, 0x5EF47E1C, 0x9029317C,
                0xFDF8E802, 0x04272F70, 0x80BB155C, 0x05282CE3,
                0x95C11548, 0xE4C66D22, 0x48C1133F, 0xC70F86DC,
                0x07F9C9EE, 0x41041F0F, 0x404779A4, 0x5D886E17,
                0x325F51EB, 0xD59BC0D1, 0xF2BCC18F, 0x41113564,
                0x257B7834, 0x602A9C60, 0xDFF8E8A3, 0x1F636C1B,
                0x0E12B4C2, 0x02E1329E, 0xAF664FD1, 0xCAD18115,
                0x6B2395E0, 0x333E92E1, 0x3B240B62, 0xEEBEB922,
                0x85B2A20E, 0xE6BA0D99, 0xDE720C8C, 0x2DA2F728,
                0xD0127845, 0x95B794FD, 0x647D0862, 0xE7CCF5F0,
                0x5449A36F, 0x877D48FA, 0xC39DFD27, 0xF33E8D1E,
                0x0A476341, 0x992EFF74, 0x3A6F6EAB, 0xF4F8FD37,
                0xA812DC60, 0xA1EBDDF8, 0x991BE14C, 0xDB6E6B0D,
                0xC67B5510, 0x6D672C37, 0x2765D43B, 0xDCD0E804,
                0xF1290DC7, 0xCC00FFA3, 0xB5390F92, 0x690FED0B,
                0x667B9FFB, 0xCEDB7D9C, 0xA091CF0B, 0xD9155EA3,
                0xBB132F88, 0x515BAD24, 0x7B9479BF, 0x763BD6EB,
                0x37392EB3, 0xCC115979, 0x8026E297, 0xF42E312D,
                0x6842ADA7, 0xC66A2B3B, 0x12754CCC, 0x782EF11C,
                0x6A124237, 0xB79251E7, 0x06A1BBE6, 0x4BFB6350,
                0x1A6B1018, 0x11CAEDFA, 0x3D25BDD8, 0xE2E1C3C9,
                0x44421659, 0x0A121386, 0xD90CEC6E, 0xD5ABEA2A,
                0x64AF674E, 0xDA86A85F, 0xBEBFE988, 0x64E4C3FE,
                0x9DBC8057, 0xF0F7C086, 0x60787BF8, 0x6003604D,
                0xD1FD8346, 0xF6381FB0, 0x7745AE04, 0xD736FCCC,
                0x83426B33, 0xF01EAB71, 0xB0804187, 0x3C005E5F,
                0x77A057BE, 0xBDE8AE24, 0x55464299, 0xBF582E61,
                0x4E58F48F, 0xF2DDFDA2, 0xF474EF38, 0x8789BDC2,
                0x5366F9C3, 0xC8B38E74, 0xB475F255, 0x46FCD9B9,
                0x7AEB2661, 0x8B1DDF84, 0x846A0E79, 0x915F95E2,
                0x466E598E, 0x20B45770, 0x8CD55591, 0xC902DE4C,
                0xB90BACE1, 0xBB8205D0, 0x11A86248, 0x7574A99E,
                0xB77F19B6, 0xE0A9DC09, 0x662D09A1, 0xC4324633,
                0xE85A1F02, 0x09F0BE8C, 0x4A99A025, 0x1D6EFE10,
                0x1AB93D1D, 0x0BA5A4DF, 0xA186F20F, 0x2868F169,
                0xDCB7DA83, 0x573906FE, 0xA1E2CE9B, 0x4FCD7F52,
                0x50115E01, 0xA70683FA, 0xA002B5C4, 0x0DE6D027,
                0x9AF88C27, 0x773F8641, 0xC3604C06, 0x61A806B5,
                0xF0177A28, 0xC0F586E0, 0x006058AA, 0x30DC7D62,
                0x11E69ED7, 0x2338EA63, 0x53C2DD94, 0xC2C21634,
                0xBBCBEE56, 0x90BCB6DE, 0xEBFC7DA1, 0xCE591D76,
                0x6F05E409, 0x4B7C0188, 0x39720A3D, 0x7C927C24,
                0x86E3725F, 0x724D9DB9, 0x1AC15BB4, 0xD39EB8FC,
                0xED545578, 0x08FCA5B5, 0xD83D7CD3, 0x4DAD0FC4,
                0x1E50EF5E, 0xB161E6F8, 0xA28514D9, 0x6C51133C,
                0x6FD5C7E7, 0x56E14EC4, 0x362ABFCE, 0xDDC6C837,
                0xD79A3234, 0x92638212, 0x670EFA8E, 0x406000E0
            ]),
            array.array('I', [
                0x3A39CE37, 0xD3FAF5CF, 0xABC27737, 0x5AC52D1B,
                0x5CB0679E, 0x4FA33742, 0xD3822740, 0x99BC9BBE,
                0xD5118E9D, 0xBF0F7315, 0xD62D1C7E, 0xC700C47B,
                0xB78C1B6B, 0x21A19045, 0xB26EB1BE, 0x6A366EB4,
                0x5748AB2F, 0xBC946E79, 0xC6A376D2, 0x6549C2C8,
                0x530FF8EE, 0x468DDE7D, 0xD5730A1D, 0x4CD04DC6,
                0x2939BBDB, 0xA9BA4650, 0xAC9526E8, 0xBE5EE304,
                0xA1FAD5F0, 0x6A2D519A, 0x63EF8CE2, 0x9A86EE22,
                0xC089C2B8, 0x43242EF6, 0xA51E03AA, 0x9CF2D0A4,
                0x83C061BA, 0x9BE96A4D, 0x8FE51550, 0xBA645BD6,
                0x2826A2F9, 0xA73A3AE1, 0x4BA99586, 0xEF5562E9,
                0xC72FEFD3, 0xF752F7DA, 0x3F046F69, 0x77FA0A59,
                0x80E4A915, 0x87B08601, 0x9B09E6AD, 0x3B3EE593,
                0xE990FD5A, 0x9E34D797, 0x2CF0B7D9, 0x022B8B51,
                0x96D5AC3A, 0x017DA67D, 0xD1CF3ED6, 0x7C7D2D28,
                0x1F9F25CF, 0xADF2B89B, 0x5AD6B472, 0x5A88F54C,
                0xE029AC71, 0xE019A5E6, 0x47B0ACFD, 0xED93FA9B,
                0xE8D3C48D, 0x283B57CC, 0xF8D56629, 0x79132E28,
                0x785F0191, 0xED756055, 0xF7960E44, 0xE3D35E8C,
                0x15056DD4, 0x88F46DBA, 0x03A16125, 0x0564F0BD,
                0xC3EB9E15, 0x3C9057A2, 0x97271AEC, 0xA93A072A,
                0x1B3F6D9B, 0x1E6321F5, 0xF59C66FB, 0x26DCF319,
                0x7533D928, 0xB155FDF5, 0x03563482, 0x8ABA3CBB,
                0x28517711, 0xC20AD9F8, 0xABCC5167, 0xCCAD925F,
                0x4DE81751, 0x3830DC8E, 0x379D5862, 0x9320F991,
                0xEA7A90C2, 0xFB3E7BCE, 0x5121CE64, 0x774FBE32,
                0xA8B6E37E, 0xC3293D46, 0x48DE5369, 0x6413E680,
                0xA2AE0810, 0xDD6DB224, 0x69852DFD, 0x09072166,
                0xB39A460A, 0x6445C0DD, 0x586CDECF, 0x1C20C8AE,
                0x5BBEF7DD, 0x1B588D40, 0xCCD2017F, 0x6BB4E3BB,
                0xDDA26A7E, 0x3A59FF45, 0x3E350A44, 0xBCB4CDD5,
                0x72EACEA8, 0xFA6484BB, 0x8D6612AE, 0xBF3C6F47,
                0xD29BE463, 0x542F5D9E, 0xAEC2771B, 0xF64E6370,
                0x740E0D8D, 0xE75B1357, 0xF8721671, 0xAF537D5D,
                0x4040CB08, 0x4EB4E2CC, 0x34D2466A, 0x0115AF84,
                0xE1B00428, 0x95983A1D, 0x06B89FB4, 0xCE6EA048,
                0x6F3F3B82, 0x3520AB82, 0x011A1D4B, 0x277227F8,
                0x611560B1, 0xE7933FDC, 0xBB3A792B, 0x344525BD,
                0xA08839E1, 0x51CE794B, 0x2F32C9B7, 0xA01FBAC9,
                0xE01CC87E, 0xBCC7D1F6, 0xCF0111C3, 0xA1E8AAC7,
                0x1A908749, 0xD44FBD9A, 0xD0DADECB, 0xD50ADA38,
                0x0339C32A, 0xC6913667, 0x8DF9317C, 0xE0B12B4F,
                0xF79E59B7, 0x43F5BB3A, 0xF2D519FF, 0x27D9459C,
                0xBF97222C, 0x15E6FC2A, 0x0F91FC71, 0x9B941525,
                0xFAE59361, 0xCEB69CEB, 0xC2A86459, 0x12BAA8D1,
                0xB6C1075E, 0xE3056A0C, 0x10D25065, 0xCB03A442,
                0xE0EC6E0E, 0x1698DB3B, 0x4C98A0BE, 0x3278E964,
                0x9F1F9532, 0xE0D392DF, 0xD3A0342B, 0x8971F21E,
                0x1B0A7441, 0x4BA3348C, 0xC5BE7120, 0xC37632D8,
                0xDF359F8D, 0x9B992F2E, 0xE60B6F47, 0x0FE3F11D,
                0xE54CDA54, 0x1EDAD891, 0xCE6279CF, 0xCD3E7E6F,
                0x1618B166, 0xFD2C1D05, 0x848FD2C5, 0xF6FB2299,
                0xF523F357, 0xA6327623, 0x93A83531, 0x56CCCD02,
                0xACF08162, 0x5A75EBB5, 0x6E163697, 0x88D273CC,
                0xDE966292, 0x81B949D0, 0x4C50901B, 0x71C65614,
                0xE6C6C7BD, 0x327A140A, 0x45E1D006, 0xC3F27B9A,
                0xC9AA53FD, 0x62A80F00, 0xBB25BFE2, 0x35BDD2F6,
                0x71126905, 0xB2040222, 0xB6CBCF7C, 0xCD769C2B,
                0x53113EC0, 0x1640E3D3, 0x38ABBD60, 0x2547ADF0,
                0xBA38209C, 0xF746CE76, 0x77AFA1C5, 0x20756060,
                0x85CBFE4E, 0x8AE88DD8, 0x7AAAF9B0, 0x4CF9AA7E,
                0x1948C25C, 0x02FB8A8C, 0x01C36AE4, 0xD6EBE1F9,
                0x90D4F869, 0xA65CDEA0, 0x3F09252D, 0xC208E69F,
                0xB74E6132, 0xCE77E25B, 0x578FDFE3, 0x3AC372E6
            ])
        )
 
        # Cycle through the p-boxes and round-robin XOR the
        # key with the p-boxes
        key_len = len(key)
        index = 0
        for i in xrange(len(self._p_boxes)):
            self._p_boxes[i] = self._p_boxes[i] ^ (
             (ord(key[index % key_len]) << 24) +
             (ord(key[(index + 1) % key_len]) << 16) +
             (ord(key[(index + 2) % key_len]) << 8) +
             (ord(key[(index + 3) % key_len]))
            )
            index += 4
 
        # For the chaining process
        l = r = 0
 
        # Begin chain replacing the p-boxes
        for i in xrange(0, len(self._p_boxes), 2):
            (l, r) = self.cipher(l, r, self.ENCRYPT)
            self._p_boxes[i] = l
            self._p_boxes[i + 1] = r
 
        # Chain replace the s-boxes
        for i in xrange(len(self._s_boxes)):
            for j in xrange(0, len(self._s_boxes[i]), 2):
                (l, r) = self.cipher(l, r, self.ENCRYPT)
                self._s_boxes[i][j] = l
                self._s_boxes[i][j + 1] = r

Example 16

Project: pyaxon
Source File: _loader.py
View license
    def __init__(self, fd, mode='safe', errto=None):
        '''
        .. py:function:: Loader(fd, readline, builder="safe", sbuilder="default", errto=None)

            :param fd:

                File-like object with `.readline()` and `.close()` method

            :param mode:

                Specifies the method of building python objects for complex values

            :param errto:

                Name of file for reporting errors
        '''
        self.fd = fd
        self.readline = fd.readline
        
        self.bc = 0
        self.bs = 0
        self.bq = 0
        self.ba = 0
        
        self.labeled_objects = {}

        self.builder = get_builder(mode)
        if self.builder is None:
            raise ValueError("Invalid mode: %s", mode)

        self.sbuilder = SimpleBuilder()

        self.c_constants = c_constants.copy()

        if errto is None:
            self.errto = sys.stderr
        else:
            self.errto = open(errto, 'wt')

        self.da = array.array(int_mode, (0,0,0))
        self.ta = array.array(int_mode, (0,0,0,0))
        self.to = array.array(int_mode, (0,0))

        self.is_nl = 0

        self.lnum = 0
        
        self.keyval = KeyVal('', None)
        
        self.next_line()

Example 17

Project: DragonPy
Source File: wave2bitstream.py
View license
    def iter_wave_values(self):
        """
        yield frame numer + volume value from the WAVE file
        """
        typecode = self.get_typecode(self.samplewidth)

        if log.level >= 5:
            if self.cfg.AVG_COUNT > 1:
                # merge samples -> log output in iter_avg_wave_values
                tlm = None
            else:
                tlm = TextLevelMeter(self.max_value, 79)

        # Use only a read size which is a quare divider of the samplewidth
        # Otherwise array.array will raise: ValueError: string length not a multiple of item size
        divider = int(round(float(WAVE_READ_SIZE) / self.samplewidth))
        read_size = self.samplewidth * divider
        if read_size != WAVE_READ_SIZE:
            log.info("Real use wave read size: %i Bytes" % read_size)

        get_wave_block_func = functools.partial(self.wavefile.readframes, read_size)
        skip_count = 0

        manually_audioop_bias = self.samplewidth == 1 and audioop is None

        for frames in iter(get_wave_block_func, ""):

            if self.samplewidth == 1:
                if audioop is None:
                    log.warning("use audioop.bias() work-a-round for missing audioop.")
                else:
                    # 8 bit samples are unsigned, see:
                    # http://docs.python.org/2/library/audioop.html#audioop.lin2lin
                    frames = audioop.bias(frames, 1, 128)

            try:
                values = array.array(typecode, frames)
            except ValueError, err:
                # e.g.:
                #     ValueError: string length not a multiple of item size
                # Work-a-round: Skip the last frames of this block
                frame_count = len(frames)
                divider = int(math.floor(float(frame_count) / self.samplewidth))
                new_count = self.samplewidth * divider
                frames = frames[:new_count] # skip frames
                log.error(
                    "Can't make array from %s frames: Value error: %s (Skip %i and use %i frames)" % (
                        frame_count, err, frame_count - new_count, len(frames)
                ))
                values = array.array(typecode, frames)

            for value in values:
                self.wave_pos += 1 # Absolute position in the frame stream

                if manually_audioop_bias:
                    # audioop.bias can't be used.
                    # See: http://hg.python.org/cpython/file/482590320549/Modules/audioop.c#l957
                    value = value % 0xff - 128

#                 if abs(value) < self.min_volume:
# #                     log.log(5, "Ignore to lower amplitude")
#                     skip_count += 1
#                     continue

                yield (self.wave_pos, value)

        log.info("Skip %i samples that are lower than %i" % (
            skip_count, self.min_volume
        ))
        log.info("Last readed Frame is: %s" % self.pformat_pos())

Example 18

Project: DragonPy
Source File: wave2bitstream.py
View license
    def iter_wave_values(self):
        """
        yield frame numer + volume value from the WAVE file
        """
        typecode = self.get_typecode(self.samplewidth)

        if log.level >= 5:
            if self.cfg.AVG_COUNT > 1:
                # merge samples -> log output in iter_avg_wave_values
                tlm = None
            else:
                tlm = TextLevelMeter(self.max_value, 79)

        # Use only a read size which is a quare divider of the samplewidth
        # Otherwise array.array will raise: ValueError: string length not a multiple of item size
        divider = int(round(float(WAVE_READ_SIZE) / self.samplewidth))
        read_size = self.samplewidth * divider
        if read_size != WAVE_READ_SIZE:
            log.info("Real use wave read size: %i Bytes" % read_size)

        get_wave_block_func = functools.partial(self.wavefile.readframes, read_size)
        skip_count = 0

        manually_audioop_bias = self.samplewidth == 1 and audioop is None

        for frames in iter(get_wave_block_func, ""):

            if self.samplewidth == 1:
                if audioop is None:
                    log.warning("use audioop.bias() work-a-round for missing audioop.")
                else:
                    # 8 bit samples are unsigned, see:
                    # http://docs.python.org/2/library/audioop.html#audioop.lin2lin
                    frames = audioop.bias(frames, 1, 128)

            try:
                values = array.array(typecode, frames)
            except ValueError, err:
                # e.g.:
                #     ValueError: string length not a multiple of item size
                # Work-a-round: Skip the last frames of this block
                frame_count = len(frames)
                divider = int(math.floor(float(frame_count) / self.samplewidth))
                new_count = self.samplewidth * divider
                frames = frames[:new_count] # skip frames
                log.error(
                    "Can't make array from %s frames: Value error: %s (Skip %i and use %i frames)" % (
                        frame_count, err, frame_count - new_count, len(frames)
                ))
                values = array.array(typecode, frames)

            for value in values:
                self.wave_pos += 1 # Absolute position in the frame stream

                if manually_audioop_bias:
                    # audioop.bias can't be used.
                    # See: http://hg.python.org/cpython/file/482590320549/Modules/audioop.c#l957
                    value = value % 0xff - 128

#                 if abs(value) < self.min_volume:
# #                     log.log(5, "Ignore to lower amplitude")
#                     skip_count += 1
#                     continue

                yield (self.wave_pos, value)

        log.info("Skip %i samples that are lower than %i" % (
            skip_count, self.min_volume
        ))
        log.info("Last readed Frame is: %s" % self.pformat_pos())

Example 19

Project: Printrun
Source File: actors.py
View license
    def load_data(self, model_data, callback=None):
        t_start = time.time()
        self.gcode = model_data

        self.count_travel_indices = count_travel_indices = [0]
        self.count_print_indices = count_print_indices = [0]
        self.count_print_vertices = count_print_vertices = [0]

        # Some trivial computations, but that's mostly for documentation :)
        # Not like 10 multiplications are going to cost much time vs what's
        # about to happen :)

        # Max number of values which can be generated per gline
        # to store coordinates/colors/normals.
        # Nicely enough we have 3 per kind of thing for all kinds.
        coordspervertex = 3
        verticesperline = 8
        coordsperline = coordspervertex * verticesperline
        coords_count = lambda nlines: nlines * coordsperline

        travelverticesperline = 2
        travelcoordsperline = coordspervertex * travelverticesperline
        travel_coords_count = lambda nlines: nlines * travelcoordsperline

        trianglesperface = 2
        facesperbox = 4
        trianglesperbox = trianglesperface * facesperbox
        verticespertriangle = 3
        indicesperbox = verticespertriangle * trianglesperbox
        boxperline = 2
        indicesperline = indicesperbox * boxperline
        indices_count = lambda nlines: nlines * indicesperline

        nlines = len(model_data)
        ntravelcoords = travel_coords_count(nlines)
        ncoords = coords_count(nlines)
        nindices = indices_count(nlines)
        travel_vertices = self.travels = numpy.zeros(ntravelcoords, dtype = GLfloat)
        travel_vertex_k = 0
        vertices = self.vertices = numpy.zeros(ncoords, dtype = GLfloat)
        vertex_k = 0
        colors = self.colors = numpy.zeros(ncoords, dtype = GLfloat)
        color_k = 0
        normals = self.normals = numpy.zeros(ncoords, dtype = GLfloat)
        normal_k = 0
        indices = self.indices = numpy.zeros(nindices, dtype = GLuint)
        index_k = 0
        self.layer_idxs_map = {}
        self.layer_stops = [0]

        prev_is_extruding = False
        prev_move_normal_x = None
        prev_move_normal_y = None
        prev_move_angle = None

        prev_pos = (0, 0, 0)
        layer_idx = 0

        self.printed_until = 0
        self.only_current = False

        twopi = 2 * math.pi

        processed_lines = 0

        while layer_idx < len(model_data.all_layers):
            with self.lock:
                nlines = len(model_data)
                remaining_lines = nlines - processed_lines
                # Only reallocate memory which might be needed, not memory
                # for everything
                ntravelcoords = coords_count(remaining_lines) + travel_vertex_k
                ncoords = coords_count(remaining_lines) + vertex_k
                nindices = indices_count(remaining_lines) + index_k
                if ncoords > vertices.size:
                    self.travels.resize(ntravelcoords, refcheck = False)
                    self.vertices.resize(ncoords, refcheck = False)
                    self.colors.resize(ncoords, refcheck = False)
                    self.normals.resize(ncoords, refcheck = False)
                    self.indices.resize(nindices, refcheck = False)
                layer = model_data.all_layers[layer_idx]
                has_movement = False
                for gline_idx, gline in enumerate(layer):
                    if not gline.is_move:
                        continue
                    if gline.x is None and gline.y is None and gline.z is None:
                        continue
                    has_movement = True
                    current_pos = (gline.current_x, gline.current_y, gline.current_z)
                    if not gline.extruding:
                        travel_vertices[travel_vertex_k] = prev_pos[0]
                        travel_vertices[travel_vertex_k + 1] = prev_pos[1]
                        travel_vertices[travel_vertex_k + 2] = prev_pos[2]
                        travel_vertices[travel_vertex_k + 3] = current_pos[0]
                        travel_vertices[travel_vertex_k + 4] = current_pos[1]
                        travel_vertices[travel_vertex_k + 5] = current_pos[2]
                        travel_vertex_k += 6
                        prev_is_extruding = False
                    else:
                        gline_color = self.movement_color(gline)

                        next_move = get_next_move(model_data, layer_idx, gline_idx)
                        next_is_extruding = (next_move.extruding
                                             if next_move is not None else False)

                        delta_x = current_pos[0] - prev_pos[0]
                        delta_y = current_pos[1] - prev_pos[1]
                        norm = delta_x * delta_x + delta_y * delta_y
                        if norm == 0:  # Don't draw anything if this move is Z+E only
                            continue
                        norm = math.sqrt(norm)
                        move_normal_x = - delta_y / norm
                        move_normal_y = delta_x / norm
                        move_angle = math.atan2(delta_y, delta_x)

                        # FIXME: compute these dynamically
                        path_halfwidth = self.path_halfwidth * 1.2
                        path_halfheight = self.path_halfheight * 1.2

                        new_indices = []
                        new_vertices = []
                        new_normals = []
                        if prev_is_extruding:
                            # Store previous vertices indices
                            prev_id = vertex_k / 3 - 4
                            avg_move_normal_x = (prev_move_normal_x + move_normal_x) / 2
                            avg_move_normal_y = (prev_move_normal_y + move_normal_y) / 2
                            norm = avg_move_normal_x * avg_move_normal_x + avg_move_normal_y * avg_move_normal_y
                            if norm == 0:
                                avg_move_normal_x = move_normal_x
                                avg_move_normal_y = move_normal_y
                            else:
                                norm = math.sqrt(norm)
                                avg_move_normal_x /= norm
                                avg_move_normal_y /= norm
                            delta_angle = move_angle - prev_move_angle
                            delta_angle = (delta_angle + twopi) % twopi
                            fact = abs(math.cos(delta_angle / 2))
                            # If move is turning too much, avoid creating a big peak
                            # by adding an intermediate box
                            if fact < 0.5:
                                # FIXME: It looks like there's some heavy code duplication here...
                                hw = path_halfwidth
                                p1x = prev_pos[0] - hw * prev_move_normal_x
                                p2x = prev_pos[0] + hw * prev_move_normal_x
                                p1y = prev_pos[1] - hw * prev_move_normal_y
                                p2y = prev_pos[1] + hw * prev_move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-prev_move_normal_x, -prev_move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((prev_move_normal_x, prev_move_normal_y, 0))
                                first = vertex_k / 3
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                                p1x = prev_pos[0] - hw * move_normal_x
                                p2x = prev_pos[0] + hw * move_normal_x
                                p1y = prev_pos[1] - hw * move_normal_y
                                p2y = prev_pos[1] + hw * move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-move_normal_x, -move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((move_normal_x, move_normal_y, 0))
                                prev_id += 4
                                first += 4
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                            else:
                                hw = path_halfwidth / fact
                                # Compute vertices
                                p1x = prev_pos[0] - hw * avg_move_normal_x
                                p2x = prev_pos[0] + hw * avg_move_normal_x
                                p1y = prev_pos[1] - hw * avg_move_normal_y
                                p2y = prev_pos[1] + hw * avg_move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-avg_move_normal_x, -avg_move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((avg_move_normal_x, avg_move_normal_y, 0))
                                first = vertex_k / 3
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                        else:
                            # Compute vertices normal to the current move and cap it
                            p1x = prev_pos[0] - path_halfwidth * move_normal_x
                            p2x = prev_pos[0] + path_halfwidth * move_normal_x
                            p1y = prev_pos[1] - path_halfwidth * move_normal_y
                            p2y = prev_pos[1] + path_halfwidth * move_normal_y
                            new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                            new_vertices.extend((p1x, p1y, prev_pos[2]))
                            new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                            new_vertices.extend((p2x, p2y, prev_pos[2]))
                            new_normals.extend((0, 0, 1))
                            new_normals.extend((-move_normal_x, -move_normal_y, 0))
                            new_normals.extend((0, 0, -1))
                            new_normals.extend((move_normal_x, move_normal_y, 0))
                            first = vertex_k / 3
                            new_indices = triangulate_rectangle(first, first + 1,
                                                                first + 2, first + 3)

                        if not next_is_extruding:
                            # Compute caps and link everything
                            p1x = current_pos[0] - path_halfwidth * move_normal_x
                            p2x = current_pos[0] + path_halfwidth * move_normal_x
                            p1y = current_pos[1] - path_halfwidth * move_normal_y
                            p2y = current_pos[1] + path_halfwidth * move_normal_y
                            new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] + path_halfheight))
                            new_vertices.extend((p1x, p1y, current_pos[2]))
                            new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] - path_halfheight))
                            new_vertices.extend((p2x, p2y, current_pos[2]))
                            new_normals.extend((0, 0, 1))
                            new_normals.extend((-move_normal_x, -move_normal_y, 0))
                            new_normals.extend((0, 0, -1))
                            new_normals.extend((move_normal_x, move_normal_y, 0))
                            end_first = vertex_k / 3 + len(new_vertices) / 3 - 4
                            new_indices += triangulate_rectangle(end_first + 3, end_first + 2,
                                                                 end_first + 1, end_first)
                            new_indices += triangulate_box(first, first + 1,
                                                           first + 2, first + 3,
                                                           end_first, end_first + 1,
                                                           end_first + 2, end_first + 3)

                        for new_i, item in enumerate(new_indices):
                            indices[index_k + new_i] = item
                        index_k += len(new_indices)
                        for new_i, item in enumerate(new_vertices):
                            vertices[vertex_k + new_i] = item
                        vertex_k += len(new_vertices)
                        for new_i, item in enumerate(new_normals):
                            normals[normal_k + new_i] = item
                        normal_k += len(new_normals)
                        new_colors = list(gline_color)[:-1] * (len(new_vertices) / 3)
                        for new_i, item in enumerate(new_colors):
                            colors[color_k + new_i] = item
                        color_k += len(new_colors)

                        prev_is_extruding = True
                        prev_move_normal_x = move_normal_x
                        prev_move_normal_y = move_normal_y
                        prev_move_angle = move_angle

                    prev_pos = current_pos
                    count_travel_indices.append(travel_vertex_k / 3)
                    count_print_indices.append(index_k)
                    count_print_vertices.append(vertex_k / 3)
                    gline.gcview_end_vertex = len(count_print_indices) - 1

                if has_movement:
                    self.layer_stops.append(len(count_print_indices) - 1)
                    self.layer_idxs_map[layer_idx] = len(self.layer_stops) - 1
                    self.max_layers = len(self.layer_stops) - 1
                    self.num_layers_to_draw = self.max_layers + 1
                    self.initialized = False
                    self.loaded = True

            processed_lines += len(layer)

            if callback:
                callback(layer_idx + 1)

            yield layer_idx
            layer_idx += 1

        with self.lock:
            self.dims = ((model_data.xmin, model_data.xmax, model_data.width),
                         (model_data.ymin, model_data.ymax, model_data.depth),
                         (model_data.zmin, model_data.zmax, model_data.height))

            self.travels.resize(travel_vertex_k, refcheck = False)
            self.vertices.resize(vertex_k, refcheck = False)
            self.colors.resize(color_k, refcheck = False)
            self.normals.resize(normal_k, refcheck = False)
            self.indices.resize(index_k, refcheck = False)

            self.layer_stops = array.array('L', self.layer_stops)
            self.count_travel_indices = array.array('L', count_travel_indices)
            self.count_print_indices = array.array('L', count_print_indices)
            self.count_print_vertices = array.array('L', count_print_vertices)

            self.max_layers = len(self.layer_stops) - 1
            self.num_layers_to_draw = self.max_layers + 1
            self.loaded = True
            self.initialized = False
            self.loaded = True
            self.fully_loaded = True

        t_end = time.time()

        logging.debug(_('Initialized 3D visualization in %.2f seconds') % (t_end - t_start))
        logging.debug(_('Vertex count: %d') % ((len(self.vertices) + len(self.travels)) / 3))
        yield None

Example 20

Project: Printrun
Source File: actors.py
View license
    def load_data(self, model_data, callback=None):
        t_start = time.time()
        self.gcode = model_data

        self.count_travel_indices = count_travel_indices = [0]
        self.count_print_indices = count_print_indices = [0]
        self.count_print_vertices = count_print_vertices = [0]

        # Some trivial computations, but that's mostly for documentation :)
        # Not like 10 multiplications are going to cost much time vs what's
        # about to happen :)

        # Max number of values which can be generated per gline
        # to store coordinates/colors/normals.
        # Nicely enough we have 3 per kind of thing for all kinds.
        coordspervertex = 3
        verticesperline = 8
        coordsperline = coordspervertex * verticesperline
        coords_count = lambda nlines: nlines * coordsperline

        travelverticesperline = 2
        travelcoordsperline = coordspervertex * travelverticesperline
        travel_coords_count = lambda nlines: nlines * travelcoordsperline

        trianglesperface = 2
        facesperbox = 4
        trianglesperbox = trianglesperface * facesperbox
        verticespertriangle = 3
        indicesperbox = verticespertriangle * trianglesperbox
        boxperline = 2
        indicesperline = indicesperbox * boxperline
        indices_count = lambda nlines: nlines * indicesperline

        nlines = len(model_data)
        ntravelcoords = travel_coords_count(nlines)
        ncoords = coords_count(nlines)
        nindices = indices_count(nlines)
        travel_vertices = self.travels = numpy.zeros(ntravelcoords, dtype = GLfloat)
        travel_vertex_k = 0
        vertices = self.vertices = numpy.zeros(ncoords, dtype = GLfloat)
        vertex_k = 0
        colors = self.colors = numpy.zeros(ncoords, dtype = GLfloat)
        color_k = 0
        normals = self.normals = numpy.zeros(ncoords, dtype = GLfloat)
        normal_k = 0
        indices = self.indices = numpy.zeros(nindices, dtype = GLuint)
        index_k = 0
        self.layer_idxs_map = {}
        self.layer_stops = [0]

        prev_is_extruding = False
        prev_move_normal_x = None
        prev_move_normal_y = None
        prev_move_angle = None

        prev_pos = (0, 0, 0)
        layer_idx = 0

        self.printed_until = 0
        self.only_current = False

        twopi = 2 * math.pi

        processed_lines = 0

        while layer_idx < len(model_data.all_layers):
            with self.lock:
                nlines = len(model_data)
                remaining_lines = nlines - processed_lines
                # Only reallocate memory which might be needed, not memory
                # for everything
                ntravelcoords = coords_count(remaining_lines) + travel_vertex_k
                ncoords = coords_count(remaining_lines) + vertex_k
                nindices = indices_count(remaining_lines) + index_k
                if ncoords > vertices.size:
                    self.travels.resize(ntravelcoords, refcheck = False)
                    self.vertices.resize(ncoords, refcheck = False)
                    self.colors.resize(ncoords, refcheck = False)
                    self.normals.resize(ncoords, refcheck = False)
                    self.indices.resize(nindices, refcheck = False)
                layer = model_data.all_layers[layer_idx]
                has_movement = False
                for gline_idx, gline in enumerate(layer):
                    if not gline.is_move:
                        continue
                    if gline.x is None and gline.y is None and gline.z is None:
                        continue
                    has_movement = True
                    current_pos = (gline.current_x, gline.current_y, gline.current_z)
                    if not gline.extruding:
                        travel_vertices[travel_vertex_k] = prev_pos[0]
                        travel_vertices[travel_vertex_k + 1] = prev_pos[1]
                        travel_vertices[travel_vertex_k + 2] = prev_pos[2]
                        travel_vertices[travel_vertex_k + 3] = current_pos[0]
                        travel_vertices[travel_vertex_k + 4] = current_pos[1]
                        travel_vertices[travel_vertex_k + 5] = current_pos[2]
                        travel_vertex_k += 6
                        prev_is_extruding = False
                    else:
                        gline_color = self.movement_color(gline)

                        next_move = get_next_move(model_data, layer_idx, gline_idx)
                        next_is_extruding = (next_move.extruding
                                             if next_move is not None else False)

                        delta_x = current_pos[0] - prev_pos[0]
                        delta_y = current_pos[1] - prev_pos[1]
                        norm = delta_x * delta_x + delta_y * delta_y
                        if norm == 0:  # Don't draw anything if this move is Z+E only
                            continue
                        norm = math.sqrt(norm)
                        move_normal_x = - delta_y / norm
                        move_normal_y = delta_x / norm
                        move_angle = math.atan2(delta_y, delta_x)

                        # FIXME: compute these dynamically
                        path_halfwidth = self.path_halfwidth * 1.2
                        path_halfheight = self.path_halfheight * 1.2

                        new_indices = []
                        new_vertices = []
                        new_normals = []
                        if prev_is_extruding:
                            # Store previous vertices indices
                            prev_id = vertex_k / 3 - 4
                            avg_move_normal_x = (prev_move_normal_x + move_normal_x) / 2
                            avg_move_normal_y = (prev_move_normal_y + move_normal_y) / 2
                            norm = avg_move_normal_x * avg_move_normal_x + avg_move_normal_y * avg_move_normal_y
                            if norm == 0:
                                avg_move_normal_x = move_normal_x
                                avg_move_normal_y = move_normal_y
                            else:
                                norm = math.sqrt(norm)
                                avg_move_normal_x /= norm
                                avg_move_normal_y /= norm
                            delta_angle = move_angle - prev_move_angle
                            delta_angle = (delta_angle + twopi) % twopi
                            fact = abs(math.cos(delta_angle / 2))
                            # If move is turning too much, avoid creating a big peak
                            # by adding an intermediate box
                            if fact < 0.5:
                                # FIXME: It looks like there's some heavy code duplication here...
                                hw = path_halfwidth
                                p1x = prev_pos[0] - hw * prev_move_normal_x
                                p2x = prev_pos[0] + hw * prev_move_normal_x
                                p1y = prev_pos[1] - hw * prev_move_normal_y
                                p2y = prev_pos[1] + hw * prev_move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-prev_move_normal_x, -prev_move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((prev_move_normal_x, prev_move_normal_y, 0))
                                first = vertex_k / 3
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                                p1x = prev_pos[0] - hw * move_normal_x
                                p2x = prev_pos[0] + hw * move_normal_x
                                p1y = prev_pos[1] - hw * move_normal_y
                                p2y = prev_pos[1] + hw * move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-move_normal_x, -move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((move_normal_x, move_normal_y, 0))
                                prev_id += 4
                                first += 4
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                            else:
                                hw = path_halfwidth / fact
                                # Compute vertices
                                p1x = prev_pos[0] - hw * avg_move_normal_x
                                p2x = prev_pos[0] + hw * avg_move_normal_x
                                p1y = prev_pos[1] - hw * avg_move_normal_y
                                p2y = prev_pos[1] + hw * avg_move_normal_y
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                                new_vertices.extend((p1x, p1y, prev_pos[2]))
                                new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                                new_vertices.extend((p2x, p2y, prev_pos[2]))
                                new_normals.extend((0, 0, 1))
                                new_normals.extend((-avg_move_normal_x, -avg_move_normal_y, 0))
                                new_normals.extend((0, 0, -1))
                                new_normals.extend((avg_move_normal_x, avg_move_normal_y, 0))
                                first = vertex_k / 3
                                # Link to previous
                                new_indices += triangulate_box(prev_id, prev_id + 1,
                                                               prev_id + 2, prev_id + 3,
                                                               first, first + 1,
                                                               first + 2, first + 3)
                        else:
                            # Compute vertices normal to the current move and cap it
                            p1x = prev_pos[0] - path_halfwidth * move_normal_x
                            p2x = prev_pos[0] + path_halfwidth * move_normal_x
                            p1y = prev_pos[1] - path_halfwidth * move_normal_y
                            p2y = prev_pos[1] + path_halfwidth * move_normal_y
                            new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] + path_halfheight))
                            new_vertices.extend((p1x, p1y, prev_pos[2]))
                            new_vertices.extend((prev_pos[0], prev_pos[1], prev_pos[2] - path_halfheight))
                            new_vertices.extend((p2x, p2y, prev_pos[2]))
                            new_normals.extend((0, 0, 1))
                            new_normals.extend((-move_normal_x, -move_normal_y, 0))
                            new_normals.extend((0, 0, -1))
                            new_normals.extend((move_normal_x, move_normal_y, 0))
                            first = vertex_k / 3
                            new_indices = triangulate_rectangle(first, first + 1,
                                                                first + 2, first + 3)

                        if not next_is_extruding:
                            # Compute caps and link everything
                            p1x = current_pos[0] - path_halfwidth * move_normal_x
                            p2x = current_pos[0] + path_halfwidth * move_normal_x
                            p1y = current_pos[1] - path_halfwidth * move_normal_y
                            p2y = current_pos[1] + path_halfwidth * move_normal_y
                            new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] + path_halfheight))
                            new_vertices.extend((p1x, p1y, current_pos[2]))
                            new_vertices.extend((current_pos[0], current_pos[1], current_pos[2] - path_halfheight))
                            new_vertices.extend((p2x, p2y, current_pos[2]))
                            new_normals.extend((0, 0, 1))
                            new_normals.extend((-move_normal_x, -move_normal_y, 0))
                            new_normals.extend((0, 0, -1))
                            new_normals.extend((move_normal_x, move_normal_y, 0))
                            end_first = vertex_k / 3 + len(new_vertices) / 3 - 4
                            new_indices += triangulate_rectangle(end_first + 3, end_first + 2,
                                                                 end_first + 1, end_first)
                            new_indices += triangulate_box(first, first + 1,
                                                           first + 2, first + 3,
                                                           end_first, end_first + 1,
                                                           end_first + 2, end_first + 3)

                        for new_i, item in enumerate(new_indices):
                            indices[index_k + new_i] = item
                        index_k += len(new_indices)
                        for new_i, item in enumerate(new_vertices):
                            vertices[vertex_k + new_i] = item
                        vertex_k += len(new_vertices)
                        for new_i, item in enumerate(new_normals):
                            normals[normal_k + new_i] = item
                        normal_k += len(new_normals)
                        new_colors = list(gline_color)[:-1] * (len(new_vertices) / 3)
                        for new_i, item in enumerate(new_colors):
                            colors[color_k + new_i] = item
                        color_k += len(new_colors)

                        prev_is_extruding = True
                        prev_move_normal_x = move_normal_x
                        prev_move_normal_y = move_normal_y
                        prev_move_angle = move_angle

                    prev_pos = current_pos
                    count_travel_indices.append(travel_vertex_k / 3)
                    count_print_indices.append(index_k)
                    count_print_vertices.append(vertex_k / 3)
                    gline.gcview_end_vertex = len(count_print_indices) - 1

                if has_movement:
                    self.layer_stops.append(len(count_print_indices) - 1)
                    self.layer_idxs_map[layer_idx] = len(self.layer_stops) - 1
                    self.max_layers = len(self.layer_stops) - 1
                    self.num_layers_to_draw = self.max_layers + 1
                    self.initialized = False
                    self.loaded = True

            processed_lines += len(layer)

            if callback:
                callback(layer_idx + 1)

            yield layer_idx
            layer_idx += 1

        with self.lock:
            self.dims = ((model_data.xmin, model_data.xmax, model_data.width),
                         (model_data.ymin, model_data.ymax, model_data.depth),
                         (model_data.zmin, model_data.zmax, model_data.height))

            self.travels.resize(travel_vertex_k, refcheck = False)
            self.vertices.resize(vertex_k, refcheck = False)
            self.colors.resize(color_k, refcheck = False)
            self.normals.resize(normal_k, refcheck = False)
            self.indices.resize(index_k, refcheck = False)

            self.layer_stops = array.array('L', self.layer_stops)
            self.count_travel_indices = array.array('L', count_travel_indices)
            self.count_print_indices = array.array('L', count_print_indices)
            self.count_print_vertices = array.array('L', count_print_vertices)

            self.max_layers = len(self.layer_stops) - 1
            self.num_layers_to_draw = self.max_layers + 1
            self.loaded = True
            self.initialized = False
            self.loaded = True
            self.fully_loaded = True

        t_end = time.time()

        logging.debug(_('Initialized 3D visualization in %.2f seconds') % (t_end - t_start))
        logging.debug(_('Vertex count: %d') % ((len(self.vertices) + len(self.travels)) / 3))
        yield None

Example 21

Project: Emoji-Tools
Source File: _c_m_a_p.py
View license
	def compile(self, ttFont):
		if self.data:
			return struct.pack(">HHH", self.format, self.length, self.language) + self.data

		charCodes = list(self.cmap.keys())
		lenCharCodes = len(charCodes)
		if lenCharCodes == 0:
			startCode = [0xffff]
			endCode = [0xffff]
		else:
			charCodes.sort()
			names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
			nameMap = ttFont.getReverseGlyphMap()
			try:
				gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
			except KeyError:
				nameMap = ttFont.getReverseGlyphMap(rebuild=True)
				try:
					gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
				except KeyError:
					# allow virtual GIDs in format 4 tables
					gids = []
					for name in names:
						try:
							gid = nameMap[name]
						except KeyError:
							try:
								if (name[:3] == 'gid'):
									gid = eval(name[3:])
								else:
									gid = ttFont.getGlyphID(name)
							except:
								raise KeyError(name)

						gids.append(gid)
			cmap = {}  # code:glyphID mapping
			list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))

			# Build startCode and endCode lists.
			# Split the char codes in ranges of consecutive char codes, then split
			# each range in more ranges of consecutive/not consecutive glyph IDs.
			# See splitRange().
			lastCode = charCodes[0]
			endCode = []
			startCode = [lastCode]
			for charCode in charCodes[1:]:  # skip the first code, it's the first start code
				if charCode == lastCode + 1:
					lastCode = charCode
					continue
				start, end = splitRange(startCode[-1], lastCode, cmap)
				startCode.extend(start)
				endCode.extend(end)
				startCode.append(charCode)
				lastCode = charCode
			start, end = splitRange(startCode[-1], lastCode, cmap)
			startCode.extend(start)
			endCode.extend(end)
			startCode.append(0xffff)
			endCode.append(0xffff)

		# build up rest of cruft
		idDelta = []
		idRangeOffset = []
		glyphIndexArray = []
		for i in range(len(endCode)-1):  # skip the closing codes (0xffff)
			indices = []
			for charCode in range(startCode[i], endCode[i] + 1):
				indices.append(cmap[charCode])
			if  (indices == list(range(indices[0], indices[0] + len(indices)))):
				idDelta.append((indices[0] - startCode[i]) % 0x10000)
				idRangeOffset.append(0)
			else:
				# someone *definitely* needs to get killed.
				idDelta.append(0)
				idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
				glyphIndexArray.extend(indices)
		idDelta.append(1)  # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
		idRangeOffset.append(0)

		# Insane.
		segCount = len(endCode)
		segCountX2 = segCount * 2
		searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)

		charCodeArray = array.array("H", endCode + [0] + startCode)
		idDeltaArray = array.array("H", idDelta)
		restArray = array.array("H", idRangeOffset + glyphIndexArray)
		if sys.byteorder != "big":
			charCodeArray.byteswap()
			idDeltaArray.byteswap()
			restArray.byteswap()
		data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()

		length = struct.calcsize(cmap_format_4_format) + len(data)
		header = struct.pack(cmap_format_4_format, self.format, length, self.language,
				segCountX2, searchRange, entrySelector, rangeShift)
		return header + data

Example 22

Project: Emoji-Tools
Source File: _c_m_a_p.py
View license
	def compile(self, ttFont):
		if self.data:
			return struct.pack(">HHH", self.format, self.length, self.language) + self.data

		charCodes = list(self.cmap.keys())
		lenCharCodes = len(charCodes)
		if lenCharCodes == 0:
			startCode = [0xffff]
			endCode = [0xffff]
		else:
			charCodes.sort()
			names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
			nameMap = ttFont.getReverseGlyphMap()
			try:
				gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
			except KeyError:
				nameMap = ttFont.getReverseGlyphMap(rebuild=True)
				try:
					gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
				except KeyError:
					# allow virtual GIDs in format 4 tables
					gids = []
					for name in names:
						try:
							gid = nameMap[name]
						except KeyError:
							try:
								if (name[:3] == 'gid'):
									gid = eval(name[3:])
								else:
									gid = ttFont.getGlyphID(name)
							except:
								raise KeyError(name)

						gids.append(gid)
			cmap = {}  # code:glyphID mapping
			list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))

			# Build startCode and endCode lists.
			# Split the char codes in ranges of consecutive char codes, then split
			# each range in more ranges of consecutive/not consecutive glyph IDs.
			# See splitRange().
			lastCode = charCodes[0]
			endCode = []
			startCode = [lastCode]
			for charCode in charCodes[1:]:  # skip the first code, it's the first start code
				if charCode == lastCode + 1:
					lastCode = charCode
					continue
				start, end = splitRange(startCode[-1], lastCode, cmap)
				startCode.extend(start)
				endCode.extend(end)
				startCode.append(charCode)
				lastCode = charCode
			start, end = splitRange(startCode[-1], lastCode, cmap)
			startCode.extend(start)
			endCode.extend(end)
			startCode.append(0xffff)
			endCode.append(0xffff)

		# build up rest of cruft
		idDelta = []
		idRangeOffset = []
		glyphIndexArray = []
		for i in range(len(endCode)-1):  # skip the closing codes (0xffff)
			indices = []
			for charCode in range(startCode[i], endCode[i] + 1):
				indices.append(cmap[charCode])
			if  (indices == list(range(indices[0], indices[0] + len(indices)))):
				idDelta.append((indices[0] - startCode[i]) % 0x10000)
				idRangeOffset.append(0)
			else:
				# someone *definitely* needs to get killed.
				idDelta.append(0)
				idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
				glyphIndexArray.extend(indices)
		idDelta.append(1)  # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
		idRangeOffset.append(0)

		# Insane.
		segCount = len(endCode)
		segCountX2 = segCount * 2
		searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)

		charCodeArray = array.array("H", endCode + [0] + startCode)
		idDeltaArray = array.array("H", idDelta)
		restArray = array.array("H", idRangeOffset + glyphIndexArray)
		if sys.byteorder != "big":
			charCodeArray.byteswap()
			idDeltaArray.byteswap()
			restArray.byteswap()
		data = charCodeArray.tostring() + idDeltaArray.tostring() + restArray.tostring()

		length = struct.calcsize(cmap_format_4_format) + len(data)
		header = struct.pack(cmap_format_4_format, self.format, length, self.language,
				segCountX2, searchRange, entrySelector, rangeShift)
		return header + data

Example 23

View license
def random_choice_csc(n_samples, classes, class_probability=None,
                      random_state=None):
    """Generate a sparse random matrix given column class distributions

    Parameters
    ----------
    n_samples : int,
        Number of samples to draw in each column.

    classes : list of size n_outputs of arrays of size (n_classes,)
        List of classes for each column.

    class_probability : list of size n_outputs of arrays of size (n_classes,)
        Optional (default=None). Class distribution of each column. If None the
        uniform distribution is assumed.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    random_matrix : sparse csc matrix of size (n_samples, n_outputs)

    """
    data = array.array('i')
    indices = array.array('i')
    indptr = array.array('i', [0])

    for j in range(len(classes)):
        classes[j] = np.asarray(classes[j])
        if classes[j].dtype.kind != 'i':
            raise ValueError("class dtype %s is not supported" %
                             classes[j].dtype)
        classes[j] = astype(classes[j], np.int64, copy=False)

        # use uniform distribution if no class_probability is given
        if class_probability is None:
            class_prob_j = np.empty(shape=classes[j].shape[0])
            class_prob_j.fill(1 / classes[j].shape[0])
        else:
            class_prob_j = np.asarray(class_probability[j])

        if np.sum(class_prob_j) != 1.0:
            raise ValueError("Probability array at index {0} does not sum to "
                             "one".format(j))

        if class_prob_j.shape[0] != classes[j].shape[0]:
            raise ValueError("classes[{0}] (length {1}) and "
                             "class_probability[{0}] (length {2}) have "
                             "different length.".format(j,
                                                        classes[j].shape[0],
                                                        class_prob_j.shape[0]))

        # If 0 is not present in the classes insert it with a probability 0.0
        if 0 not in classes[j]:
            classes[j] = np.insert(classes[j], 0, 0)
            class_prob_j = np.insert(class_prob_j, 0, 0.0)

        # If there are nonzero classes choose randomly using class_probability
        rng = check_random_state(random_state)
        if classes[j].shape[0] > 1:
            p_nonzero = 1 - class_prob_j[classes[j] == 0]
            nnz = int(n_samples * p_nonzero)
            ind_sample = sample_without_replacement(n_population=n_samples,
                                                    n_samples=nnz,
                                                    random_state=random_state)
            indices.extend(ind_sample)

            # Normalize probabilites for the nonzero elements
            classes_j_nonzero = classes[j] != 0
            class_probability_nz = class_prob_j[classes_j_nonzero]
            class_probability_nz_norm = (class_probability_nz /
                                         np.sum(class_probability_nz))
            classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
                                          rng.rand(nnz))
            data.extend(classes[j][classes_j_nonzero][classes_ind])
        indptr.append(len(indices))

    return sp.csc_matrix((data, indices, indptr),
                         (n_samples, len(classes)),
                         dtype=int)

Example 24

View license
def random_choice_csc(n_samples, classes, class_probability=None,
                      random_state=None):
    """Generate a sparse random matrix given column class distributions

    Parameters
    ----------
    n_samples : int,
        Number of samples to draw in each column.

    classes : list of size n_outputs of arrays of size (n_classes,)
        List of classes for each column.

    class_probability : list of size n_outputs of arrays of size (n_classes,)
        Optional (default=None). Class distribution of each column. If None the
        uniform distribution is assumed.

    random_state : int, RandomState instance or None, optional (default=None)
        If int, random_state is the seed used by the random number generator;
        If RandomState instance, random_state is the random number generator;
        If None, the random number generator is the RandomState instance used
        by `np.random`.

    Returns
    -------
    random_matrix : sparse csc matrix of size (n_samples, n_outputs)

    """
    data = array.array('i')
    indices = array.array('i')
    indptr = array.array('i', [0])

    for j in range(len(classes)):
        classes[j] = np.asarray(classes[j])
        if classes[j].dtype.kind != 'i':
            raise ValueError("class dtype %s is not supported" %
                             classes[j].dtype)
        classes[j] = astype(classes[j], np.int64, copy=False)

        # use uniform distribution if no class_probability is given
        if class_probability is None:
            class_prob_j = np.empty(shape=classes[j].shape[0])
            class_prob_j.fill(1 / classes[j].shape[0])
        else:
            class_prob_j = np.asarray(class_probability[j])

        if np.sum(class_prob_j) != 1.0:
            raise ValueError("Probability array at index {0} does not sum to "
                             "one".format(j))

        if class_prob_j.shape[0] != classes[j].shape[0]:
            raise ValueError("classes[{0}] (length {1}) and "
                             "class_probability[{0}] (length {2}) have "
                             "different length.".format(j,
                                                        classes[j].shape[0],
                                                        class_prob_j.shape[0]))

        # If 0 is not present in the classes insert it with a probability 0.0
        if 0 not in classes[j]:
            classes[j] = np.insert(classes[j], 0, 0)
            class_prob_j = np.insert(class_prob_j, 0, 0.0)

        # If there are nonzero classes choose randomly using class_probability
        rng = check_random_state(random_state)
        if classes[j].shape[0] > 1:
            p_nonzero = 1 - class_prob_j[classes[j] == 0]
            nnz = int(n_samples * p_nonzero)
            ind_sample = sample_without_replacement(n_population=n_samples,
                                                    n_samples=nnz,
                                                    random_state=random_state)
            indices.extend(ind_sample)

            # Normalize probabilites for the nonzero elements
            classes_j_nonzero = classes[j] != 0
            class_probability_nz = class_prob_j[classes_j_nonzero]
            class_probability_nz_norm = (class_probability_nz /
                                         np.sum(class_probability_nz))
            classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
                                          rng.rand(nnz))
            data.extend(classes[j][classes_j_nonzero][classes_ind])
        indptr.append(len(indices))

    return sp.csc_matrix((data, indices, indptr),
                         (n_samples, len(classes)),
                         dtype=int)

Example 25

Project: pika
Source File: forward_server.py
View license
    def _forward(self, src_sock, dest_sock): # pylint: disable=R0912
        """Forward from src_sock to dest_sock"""
        src_peername = src_sock.getpeername()

        _trace("%s forwarding from %s to %s", datetime.utcnow(),
               src_peername, dest_sock.getpeername())
        try:
            # NOTE: python 2.6 doesn't support bytearray with recv_into, so
            # we use array.array instead; this is only okay as long as the
            # array instance isn't shared across threads. See
            # http://bugs.python.org/issue7827 and
            # groups.google.com/forum/#!topic/comp.lang.python/M6Pqr-KUjQw
            rx_buf = array.array("B", [0] * self._SOCK_RX_BUF_SIZE)

            while True:
                try:
                    nbytes = src_sock.recv_into(rx_buf)
                except socket.error as exc:
                    if exc.errno == errno.EINTR:
                        continue
                    elif exc.errno == errno.ECONNRESET:
                        # Source peer forcibly closed connection
                        _trace("%s errno.ECONNRESET from %s",
                               datetime.utcnow(), src_peername)
                        break
                    else:
                        _trace("%s Unexpected errno=%s from %s\n%s",
                               datetime.utcnow(), exc.errno, src_peername,
                               "".join(traceback.format_stack()))
                        raise

                if not nbytes:
                    # Source input EOF
                    _trace("%s EOF on %s", datetime.utcnow(), src_peername)
                    break

                try:
                    dest_sock.sendall(buffer(rx_buf, 0, nbytes))
                except socket.error as exc:
                    if exc.errno == errno.EPIPE:
                        # Destination peer closed its end of the connection
                        _trace("%s Destination peer %s closed its end of "
                               "the connection: errno.EPIPE",
                               datetime.utcnow(), dest_sock.getpeername())
                        break
                    elif exc.errno == errno.ECONNRESET:
                        # Destination peer forcibly closed connection
                        _trace("%s Destination peer %s forcibly closed "
                               "connection: errno.ECONNRESET",
                               datetime.utcnow(), dest_sock.getpeername())
                        break
                    else:
                        _trace(
                            "%s Unexpected errno=%s in sendall to %s\n%s",
                            datetime.utcnow(), exc.errno,
                            dest_sock.getpeername(),
                            "".join(traceback.format_stack()))
                        raise
        except:
            _trace("forward failed\n%s", "".join(traceback.format_exc()))
            raise
        finally:
            _trace("%s done forwarding from %s", datetime.utcnow(),
                   src_peername)
            try:
                # Let source peer know we're done receiving
                _safe_shutdown_socket(src_sock, socket.SHUT_RD)
            finally:
                # Let destination peer know we're done sending
                _safe_shutdown_socket(dest_sock, socket.SHUT_WR)

Example 26

Project: pinguino-ide
Source File: intel_hex.py
View license
    def _decode_record(self, s, line=0):
        '''Decode one record of HEX file.

        @param  s       line with HEX record.
        @param  line    line number (for error messages).

        @raise  EndOfFile   if EOF record encountered.
        '''
        s = s.rstrip('\r\n')
        if not s:
            return          # empty line

        if s[0] == ':':
            try:
                bin = array('B', unhexlify(asbytes(s[1:])))
            except (TypeError, ValueError):
                # this might be raised by unhexlify when odd hexascii digits
                raise HexRecordError(line=line)
            length = len(bin)
            if length < 5:
                raise HexRecordError(line=line)
        else:
            raise HexRecordError(line=line)

        record_length = bin[0]
        if length != (5 + record_length):
            raise RecordLengthError(line=line)

        addr = bin[1]*256 + bin[2]

        record_type = bin[3]
        if not (0 <= record_type <= 5):
            raise RecordTypeError(line=line)

        crc = sum(bin)
        crc &= 0x0FF
        if crc != 0:
            raise RecordChecksumError(line=line)

        if record_type == 0:
            # data record
            addr += self._offset
            for i in xrange(4, 4+record_length):
                if not self._buf.get(addr, None) is None:
                    raise AddressOverlapError(address=addr, line=line)
                self._buf[addr] = bin[i]
                addr += 1   # FIXME: addr should be wrapped
                            # BUT after 02 record (at 64K boundary)
                            # and after 04 record (at 4G boundary)

        elif record_type == 1:
            # end of file record
            if record_length != 0:
                raise EOFRecordError(line=line)
            raise _EndOfFile

        elif record_type == 2:
            # Extended 8086 Segment Record
            if record_length != 2 or addr != 0:
                raise ExtendedSegmentAddressRecordError(line=line)
            self._offset = (bin[4]*256 + bin[5]) * 16

        elif record_type == 4:
            # Extended Linear Address Record
            if record_length != 2 or addr != 0:
                raise ExtendedLinearAddressRecordError(line=line)
            self._offset = (bin[4]*256 + bin[5]) * 65536

        elif record_type == 3:
            # Start Segment Address Record
            if record_length != 4 or addr != 0:
                raise StartSegmentAddressRecordError(line=line)
            if self.start_addr:
                raise DuplicateStartAddressRecordError(line=line)
            self.start_addr = {'CS': bin[4]*256 + bin[5],
                               'IP': bin[6]*256 + bin[7],
                              }

        elif record_type == 5:
            # Start Linear Address Record
            if record_length != 4 or addr != 0:
                raise StartLinearAddressRecordError(line=line)
            if self.start_addr:
                raise DuplicateStartAddressRecordError(line=line)
            self.start_addr = {'EIP': (bin[4]*16777216 +
                                       bin[5]*65536 +
                                       bin[6]*256 +
                                       bin[7]),
                              }

Example 27

Project: pinguino-ide
Source File: intel_hex.py
View license
    def write_hex_file(self, f, write_start_addr=True):
        """Write data to file f in HEX format.

        @param  f                   filename or file-like object for writing
        @param  write_start_addr    enable or disable writing start address
                                    record to file (enabled by default).
                                    If there is no start address in obj, nothing
                                    will be written regardless of this setting.
        """
        fwrite = getattr(f, "write", None)
        if fwrite:
            fobj = f
            fclose = None
        else:
            fobj = open(f, 'w')
            fwrite = fobj.write
            fclose = fobj.close

        # Translation table for uppercasing hex ascii string.
        # timeit shows that using hexstr.translate(table)
        # is faster than hexstr.upper():
        # 0.452ms vs. 0.652ms (translate vs. upper)
        if sys.version_info[0] >= 3:
            table = bytes(range(256)).upper()
        else:
            table = ''.join(chr(i).upper() for  i in range(256))



        # start address record if any
        if self.start_addr and write_start_addr:
            keys = self.start_addr.keys()
            keys.sort()
            bin = array('B', asbytes('\0'*9))
            if keys == ['CS','IP']:
                # Start Segment Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 3      # rectyp
                cs = self.start_addr['CS']
                bin[4] = (cs >> 8) & 0x0FF
                bin[5] = cs & 0x0FF
                ip = self.start_addr['IP']
                bin[6] = (ip >> 8) & 0x0FF
                bin[7] = ip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' +
                       asstr(hexlify(bin.tostring()).translate(table)) +
                       '\n')
            elif keys == ['EIP']:
                # Start Linear Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 5      # rectyp
                eip = self.start_addr['EIP']
                bin[4] = (eip >> 24) & 0x0FF
                bin[5] = (eip >> 16) & 0x0FF
                bin[6] = (eip >> 8) & 0x0FF
                bin[7] = eip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' +
                       asstr(hexlify(bin.tostring()).translate(table)) +
                       '\n')
            else:
                if fclose:
                    fclose()
                raise InvalidStartAddressValueError(start_addr=self.start_addr)

        # data
        addresses = self._buf.keys()
        addresses.sort()
        addr_len = len(addresses)
        if addr_len:
            minaddr = addresses[0]
            maxaddr = addresses[-1]

            if maxaddr > 65535:
                need_offset_record = True
            else:
                need_offset_record = False
            high_ofs = 0

            cur_addr = minaddr
            cur_ix = 0

            while cur_addr <= maxaddr:
                if need_offset_record:
                    bin = array('B', asbytes('\0'*7))
                    bin[0] = 2      # reclen
                    bin[1] = 0      # offset msb
                    bin[2] = 0      # offset lsb
                    bin[3] = 4      # rectyp
                    high_ofs = int(cur_addr>>16)
                    b = divmod(high_ofs, 256)
                    bin[4] = b[0]   # msb of high_ofs
                    bin[5] = b[1]   # lsb of high_ofs
                    bin[6] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' +
                           asstr(hexlify(bin.tostring()).translate(table)) +
                           '\n')

                while True:
                    # produce one record
                    low_addr = cur_addr & 0x0FFFF
                    # chain_len off by 1
                    chain_len = min(15, 65535-low_addr, maxaddr-cur_addr)

                    # search continuous chain
                    stop_addr = cur_addr + chain_len
                    if chain_len:
                        ix = bisect_right(addresses, stop_addr,
                                          cur_ix,
                                          min(cur_ix+chain_len+1, addr_len))
                        chain_len = ix - cur_ix     # real chain_len
                        # there could be small holes in the chain
                        # but we will catch them by try-except later
                        # so for big continuous files we will work
                        # at maximum possible speed
                    else:
                        chain_len = 1               # real chain_len

                    bin = array('B', asbytes('\0'*(5+chain_len)))
                    b = divmod(low_addr, 256)
                    bin[1] = b[0]   # msb of low_addr
                    bin[2] = b[1]   # lsb of low_addr
                    bin[3] = 0          # rectype
                    try:    # if there is small holes we'll catch them
                        for i in range(chain_len):
                            bin[4+i] = self._buf[cur_addr+i]
                    except KeyError:
                        # we catch a hole so we should shrink the chain
                        chain_len = i
                        bin = bin[:5+i]
                    bin[0] = chain_len
                    bin[4+chain_len] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' +
                           asstr(hexlify(bin.tostring()).translate(table)) +
                           '\n')

                    # adjust cur_addr/cur_ix
                    cur_ix += chain_len
                    if cur_ix < addr_len:
                        cur_addr = addresses[cur_ix]
                    else:
                        cur_addr = maxaddr + 1
                        break
                    high_addr = int(cur_addr>>16)
                    if high_addr > high_ofs:
                        break

        # end-of-file record
        fwrite(":00000001FF\n")
        if fclose:
            fclose()

Example 28

Project: XDATA
Source File: scanner.py
View license
  def build(self, keywords):
    goto = dict()
    fail = dict()
    output = defaultdict(set)

    # Algorithm 2
    newstate = 0
    for a in keywords:
      state = 0
      j = 0
      while (j < len(a)) and (state, a[j]) in goto:
        state = goto[(state, a[j])]
        j += 1
      for p in range(j, len(a)):
        newstate += 1
        goto[(state, a[p])] = newstate
        #print "(%d, %s) -> %d" % (state, a[p], newstate)
        state = newstate
      output[state].add(a)
    for a in self.alphabet:
      if (0,a) not in goto: 
        goto[(0,a)] = 0

    # Algorithm 3
    queue = deque()
    for a in self.alphabet:
      if goto[(0,a)] != 0:
        s = goto[(0,a)]
        queue.append(s)
        fail[s] = 0
    while queue:
      r = queue.popleft()
      for a in self.alphabet:
        if (r,a) in goto:
          s = goto[(r,a)]
          queue.append(s)
          state = fail[r]
          while (state,a) not in goto:
            state = fail[state]
          fail[s] = goto[(state,a)]
          #print "f(%d) -> %d" % (s, goto[(state,a)]), output[fail[s]]
          if output[fail[s]]:
            output[s].update(output[fail[s]])

    # Algorithm 4
    self.nextmove = {}
    for a in self.alphabet:
      self.nextmove[(0,a)] = goto[(0,a)]
      if goto[(0,a)] != 0:
        queue.append(goto[(0,a)])
    while queue:
      r = queue.popleft()
      for a in self.alphabet:
        if (r,a) in goto:
          s = goto[(r,a)]
          queue.append(s)
          self.nextmove[(r,a)] = s
        else:
          self.nextmove[(r,a)] = self.nextmove[(fail[r],a)]

    # convert the output to tuples, as tuple iteration is faster
    # than set iteration
    self.output = dict((k, tuple(output[k])) for k in output)

    # Next move encoded as a single array. The index of the next state
    # is located at current state * alphabet size  + ord(c).
    # The choice of 'H' array typecode limits us to 64k states.
    def generate_nm_arr(typecode):
      def nextstate_iter():
        # State count starts at 0, so the number of states is the number of i
        # the last state (newstate) + 1
        for state in xrange(newstate+1):
          for letter in self.alphabet:
            yield self.nextmove[(state, letter)]
      return array.array(typecode, nextstate_iter())
    try:
      self.nm_arr = generate_nm_arr('H')
    except OverflowError:
      # Could not fit in an unsigned short array, let's try an unsigned long array.
      self.nm_arr = generate_nm_arr('L')

Example 29

Project: XDATA
Source File: scanner.py
View license
  def build(self, keywords):
    goto = dict()
    fail = dict()
    output = defaultdict(set)

    # Algorithm 2
    newstate = 0
    for a in keywords:
      state = 0
      j = 0
      while (j < len(a)) and (state, a[j]) in goto:
        state = goto[(state, a[j])]
        j += 1
      for p in range(j, len(a)):
        newstate += 1
        goto[(state, a[p])] = newstate
        #print "(%d, %s) -> %d" % (state, a[p], newstate)
        state = newstate
      output[state].add(a)
    for a in self.alphabet:
      if (0,a) not in goto: 
        goto[(0,a)] = 0

    # Algorithm 3
    queue = deque()
    for a in self.alphabet:
      if goto[(0,a)] != 0:
        s = goto[(0,a)]
        queue.append(s)
        fail[s] = 0
    while queue:
      r = queue.popleft()
      for a in self.alphabet:
        if (r,a) in goto:
          s = goto[(r,a)]
          queue.append(s)
          state = fail[r]
          while (state,a) not in goto:
            state = fail[state]
          fail[s] = goto[(state,a)]
          #print "f(%d) -> %d" % (s, goto[(state,a)]), output[fail[s]]
          if output[fail[s]]:
            output[s].update(output[fail[s]])

    # Algorithm 4
    self.nextmove = {}
    for a in self.alphabet:
      self.nextmove[(0,a)] = goto[(0,a)]
      if goto[(0,a)] != 0:
        queue.append(goto[(0,a)])
    while queue:
      r = queue.popleft()
      for a in self.alphabet:
        if (r,a) in goto:
          s = goto[(r,a)]
          queue.append(s)
          self.nextmove[(r,a)] = s
        else:
          self.nextmove[(r,a)] = self.nextmove[(fail[r],a)]

    # convert the output to tuples, as tuple iteration is faster
    # than set iteration
    self.output = dict((k, tuple(output[k])) for k in output)

    # Next move encoded as a single array. The index of the next state
    # is located at current state * alphabet size  + ord(c).
    # The choice of 'H' array typecode limits us to 64k states.
    def generate_nm_arr(typecode):
      def nextstate_iter():
        # State count starts at 0, so the number of states is the number of i
        # the last state (newstate) + 1
        for state in xrange(newstate+1):
          for letter in self.alphabet:
            yield self.nextmove[(state, letter)]
      return array.array(typecode, nextstate_iter())
    try:
      self.nm_arr = generate_nm_arr('H')
    except OverflowError:
      # Could not fit in an unsigned short array, let's try an unsigned long array.
      self.nm_arr = generate_nm_arr('L')

Example 30

Project: python-messaging
Source File: mms_pdu.py
View license
    def encode_message_header(self):
        """
        Binary-encodes the MMS header data.

        The encoding used for the MMS header is specified in [4].
        All "constant" encoded values found/used in this method
        are also defined in [4]. For a good example, see [2].

        :return: the MMS PDU header, as an array of bytes
        :rtype: array.array('B')
        """
        # See [4], chapter 8 for info on how to use these
        # from_types = {'Address-present-token': 0x80,
        #               'Insert-address-token': 0x81}

        # content_types = {'application/vnd.wap.multipart.related': 0xb3}

        # Create an array of 8-bit values
        message_header = array.array('B')

        headers_to_encode = self._mms_message.headers

        # If the user added any of these to the message manually
        # (X- prefix) use those instead
        for hdr in ('X-Mms-Message-Type', 'X-Mms-Transaction-Id',
                    'X-Mms-Version'):
            if hdr in headers_to_encode:
                if hdr == 'X-Mms-Version':
                    clean_header = 'MMS-Version'
                else:
                    clean_header = hdr.replace('X-Mms-', '', 1)

                headers_to_encode[clean_header] = headers_to_encode[hdr]
                del headers_to_encode[hdr]

         # First 3  headers (in order), according to [4]:
        ################################################
        # - X-Mms-Message-Type
        # - X-Mms-Transaction-ID
        # - X-Mms-Version

        ### Start of Message-Type verification
        if 'Message-Type' not in headers_to_encode:
            # Default to 'm-retrieve-conf'; we don't need a To/CC field for
            # this (see WAP-209, section 6.3, table 5)
            headers_to_encode['Message-Type'] = 'm-retrieve-conf'

        # See if the chosen message type is valid, given the message's
        # other headers. NOTE: we only distinguish between 'm-send-req'
        # (requires a destination number) and 'm-retrieve-conf'
        # (requires no destination number) - if "Message-Type" is
        # something else, we assume the message creator knows
        # what she is doing
        if headers_to_encode['Message-Type'] == 'm-send-req':
            found_dest_address = False
            for address_type in ('To', 'Cc', 'Bc'):
                if address_type in headers_to_encode:
                    found_dest_address = True
                    break

            if not found_dest_address:
                headers_to_encode['Message-Type'] = 'm-retrieve-conf'
        ### End of Message-Type verification

        ### Start of Transaction-Id verification
        if 'Transaction-Id' not in headers_to_encode:
            trans_id = str(random.randint(1000, 9999))
            headers_to_encode['Transaction-Id'] = trans_id
        ### End of Transaction-Id verification

        ### Start of MMS-Version verification
        if 'MMS-Version' not in headers_to_encode:
            headers_to_encode['MMS-Version'] = '1.0'

        # Encode the first three headers, in correct order
        for hdr in ('Message-Type', 'Transaction-Id', 'MMS-Version'):
            message_header.extend(
                MMSEncoder.encode_header(hdr, headers_to_encode[hdr]))
            del headers_to_encode[hdr]

        # Encode all remaining MMS message headers, except "Content-Type"
        # -- this needs to be added last, according [2] and [4]
        for hdr in headers_to_encode:
            if hdr != 'Content-Type':
                message_header.extend(
                    MMSEncoder.encode_header(hdr, headers_to_encode[hdr]))

        # Ok, now only "Content-type" should be left
        content_type, ct_parameters = headers_to_encode['Content-Type']
        message_header.extend(MMSEncoder.encode_mms_field_name('Content-Type'))
        ret = MMSEncoder.encode_content_type_value(content_type, ct_parameters)
        message_header.extend(flatten_list(ret))

        return message_header

Example 31

Project: python-messaging
Source File: mms_pdu.py
View license
    def encode_message_body(self):
        """
        Binary-encodes the MMS body data

        The MMS body's header should not be confused with the actual
        MMS header, as returned by :func:`encode_header`.

        The encoding used for the MMS body is specified in [5],
        section 8.5. It is only referenced in [4], however [2]
        provides a good example of how this ties in with the MMS
        header encoding.

        The MMS body is of type `application/vnd.wap.multipart` ``mixed``
        or ``related``. As such, its structure is divided into a header, and
        the data entries/parts::

            [ header ][ entries ]
            ^^^^^^^^^^^^^^^^^^^^^
                  MMS Body

        The MMS Body header consists of one entry[5]::

            name             type           purpose
            -------          -------        -----------
            num_entries      uint_var        num of entries in the multipart entity

        The MMS body's multipart entries structure::

            name             type                   purpose
            -------          -----                  -----------
            HeadersLen       uint_var                length of the ContentType and
                                                    Headers fields combined
            DataLen          uint_var                length of the Data field
            ContentType      Multiple octets        the content type of the data
            Headers          (<HeadersLen>
                              - length of
                             <ContentType>) octets  the part's headers
            Data             <DataLen> octets       the part's data

        :return: The binary-encoded MMS PDU body, as an array of bytes
        :rtype: array.array('B')
        """
        message_body = array.array('B')

        #TODO: enable encoding of MMSs without SMIL file
        ########## MMS body: header ##########
        # Parts: SMIL file + <number of data elements in each slide>
        num_entries = 1
        for page in self._mms_message._pages:
            num_entries += page.number_of_parts()

        for data_part in self._mms_message._data_parts:
            num_entries += 1

        message_body.extend(self.encode_uint_var(num_entries))

        ########## MMS body: entries ##########
        # For every data "part", we have to add the following sequence:
        # <length of content-type + other possible headers>,
        # <length of data>,
        # <content-type + other possible headers>,
        # <data>.

        # Gather the data parts, adding the MMS message's SMIL file
        smil_part = message.DataPart()
        smil = self._mms_message.smil()
        smil_part.set_data(smil, 'application/smil')
        #TODO: make this dynamic....
        smil_part.headers['Content-ID'] = '<0000>'
        parts = [smil_part]
        for slide in self._mms_message._pages:
            for part_tuple in (slide.image, slide.audio, slide.text):
                if part_tuple is not None:
                    parts.append(part_tuple[0])

        for part in parts:
            name, val_type = part.headers['Content-Type']
            part_content_type = self.encode_content_type_value(name, val_type)

            encoded_part_headers = []
            for hdr in part.headers:
                if hdr == 'Content-Type':
                    continue
                encoded_part_headers.extend(
                        wsp_pdu.Encoder.encode_header(hdr, part.headers[hdr]))

            # HeadersLen entry (length of the ContentType and
            #  Headers fields combined)
            headers_len = len(part_content_type) + len(encoded_part_headers)
            message_body.extend(self.encode_uint_var(headers_len))
            # DataLen entry (length of the Data field)
            message_body.extend(self.encode_uint_var(len(part)))
            # ContentType entry
            message_body.extend(part_content_type)
            # Headers
            message_body.extend(encoded_part_headers)
            # Data (note: we do not null-terminate this)
            for char in part.data:
                message_body.append(ord(char))

        return message_body

Example 32

Project: pwn_plug_sources
Source File: intelhex.py
View license
    def _decode_record(self, s, line=0):
        '''Decode one record of HEX file.

        @param  s       line with HEX record.
        @param  line    line number (for error messages).

        @raise  EndOfFile   if EOF record encountered.
        '''
        s = s.rstrip('\r\n')
        if not s:
            return          # empty line

        if s[0] == ':':
            try:
                bin = array('B', unhexlify(s[1:]))
            except TypeError:
                # this might be raised by unhexlify when odd hexascii digits
                raise HexRecordError(line=line)
            length = len(bin)
            if length < 5:
                raise HexRecordError(line=line)
        else:
            raise HexRecordError(line=line)

        record_length = bin[0]
        if length != (5 + record_length):
            raise RecordLengthError(line=line)

        addr = bin[1]*256 + bin[2]

        record_type = bin[3]
        if not (0 <= record_type <= 5):
            raise RecordTypeError(line=line)

        crc = sum(bin)
        crc &= 0x0FF
        if crc != 0:
            raise RecordChecksumError(line=line)

        if record_type == 0:
            # data record
            addr += self._offset
            for i in xrange(4, 4+record_length):
                if not self._buf.get(addr, None) is None:
                    raise AddressOverlapError(address=addr, line=line)
                self._buf[addr] = bin[i]
                addr += 1   # FIXME: addr should be wrapped 
                            # BUT after 02 record (at 64K boundary)
                            # and after 04 record (at 4G boundary)

        elif record_type == 1:
            # end of file record
            if record_length != 0:
                raise EOFRecordError(line=line)
            raise _EndOfFile

        elif record_type == 2:
            # Extended 8086 Segment Record
            if record_length != 2 or addr != 0:
                raise ExtendedSegmentAddressRecordError(line=line)
            self._offset = (bin[4]*256 + bin[5]) * 16

        elif record_type == 4:
            # Extended Linear Address Record
            if record_length != 2 or addr != 0:
                raise ExtendedLinearAddressRecordError(line=line)
            self._offset = (bin[4]*256 + bin[5]) * 65536

        elif record_type == 3:
            # Start Segment Address Record
            if record_length != 4 or addr != 0:
                raise StartSegmentAddressRecordError(line=line)
            if self.start_addr:
                raise DuplicateStartAddressRecordError(line=line)
            self.start_addr = {'CS': bin[4]*256 + bin[5],
                               'IP': bin[6]*256 + bin[7],
                              }

        elif record_type == 5:
            # Start Linear Address Record
            if record_length != 4 or addr != 0:
                raise StartLinearAddressRecordError(line=line)
            if self.start_addr:
                raise DuplicateStartAddressRecordError(line=line)
            self.start_addr = {'EIP': (bin[4]*16777216 +
                                       bin[5]*65536 +
                                       bin[6]*256 +
                                       bin[7]),
                              }

Example 33

Project: pwn_plug_sources
Source File: intelhex.py
View license
    def write_hex_file(self, f, write_start_addr=True):
        """Write data to file f in HEX format.

        @param  f                   filename or file-like object for writing
        @param  write_start_addr    enable or disable writing start address
                                    record to file (enabled by default).
                                    If there is no start address in obj, nothing
                                    will be written regardless of this setting.
        """
        fwrite = getattr(f, "write", None)
        if fwrite:
            fobj = f
            fclose = None
        else:
            fobj = file(f, 'w')
            fwrite = fobj.write
            fclose = fobj.close

        # Translation table for uppercasing hex ascii string.
        # timeit shows that using hexstr.translate(table)
        # is faster than hexstr.upper():
        # 0.452ms vs. 0.652ms (translate vs. upper)
        table = ''.join(chr(i).upper() for  i in range(256))

        # start address record if any
        if self.start_addr and write_start_addr:
            keys = self.start_addr.keys()
            keys.sort()
            bin = array('B', '\0'*9)
            if keys == ['CS','IP']:
                # Start Segment Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 3      # rectyp
                cs = self.start_addr['CS']
                bin[4] = (cs >> 8) & 0x0FF
                bin[5] = cs & 0x0FF
                ip = self.start_addr['IP']
                bin[6] = (ip >> 8) & 0x0FF
                bin[7] = ip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')
            elif keys == ['EIP']:
                # Start Linear Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 5      # rectyp
                eip = self.start_addr['EIP']
                bin[4] = (eip >> 24) & 0x0FF
                bin[5] = (eip >> 16) & 0x0FF
                bin[6] = (eip >> 8) & 0x0FF
                bin[7] = eip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')
            else:
                if fclose:
                    fclose()
                raise InvalidStartAddressValueError(start_addr=self.start_addr)

        # data
        addresses = self._buf.keys()
        addresses.sort()
        addr_len = len(addresses)
        if addr_len:
            minaddr = addresses[0]
            maxaddr = addresses[-1]
    
            if maxaddr > 65535:
                need_offset_record = True
            else:
                need_offset_record = False
            high_ofs = 0

            cur_addr = minaddr
            cur_ix = 0

            while cur_addr <= maxaddr:
                if need_offset_record:
                    bin = array('B', '\0'*7)
                    bin[0] = 2      # reclen
                    bin[1] = 0      # offset msb
                    bin[2] = 0      # offset lsb
                    bin[3] = 4      # rectyp
                    high_ofs = int(cur_addr/65536)
                    bytes = divmod(high_ofs, 256)
                    bin[4] = bytes[0]   # msb of high_ofs
                    bin[5] = bytes[1]   # lsb of high_ofs
                    bin[6] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')

                while True:
                    # produce one record
                    low_addr = cur_addr & 0x0FFFF
                    # chain_len off by 1
                    chain_len = min(15, 65535-low_addr, maxaddr-cur_addr)

                    # search continuous chain
                    stop_addr = cur_addr + chain_len
                    if chain_len:
                        ix = bisect_right(addresses, stop_addr,
                                          cur_ix,
                                          min(cur_ix+chain_len+1, addr_len))
                        chain_len = ix - cur_ix     # real chain_len
                        # there could be small holes in the chain
                        # but we will catch them by try-except later
                        # so for big continuous files we will work
                        # at maximum possible speed
                    else:
                        chain_len = 1               # real chain_len

                    bin = array('B', '\0'*(5+chain_len))
                    bytes = divmod(low_addr, 256)
                    bin[1] = bytes[0]   # msb of low_addr
                    bin[2] = bytes[1]   # lsb of low_addr
                    bin[3] = 0          # rectype
                    try:    # if there is small holes we'll catch them
                        for i in range(chain_len):
                            bin[4+i] = self._buf[cur_addr+i]
                    except KeyError:
                        # we catch a hole so we should shrink the chain
                        chain_len = i
                        bin = bin[:5+i]
                    bin[0] = chain_len
                    bin[4+chain_len] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')

                    # adjust cur_addr/cur_ix
                    cur_ix += chain_len
                    if cur_ix < addr_len:
                        cur_addr = addresses[cur_ix]
                    else:
                        cur_addr = maxaddr + 1
                        break
                    high_addr = int(cur_addr/65536)
                    if high_addr > high_ofs:
                        break

        # end-of-file record
        fwrite(":00000001FF\n")
        if fclose:
            fclose()

Example 34

Project: raspberry_pwn
Source File: intelhex.py
View license
    def _decode_record(self, s, line=0):
        '''Decode one record of HEX file.

        @param  s       line with HEX record.
        @param  line    line number (for error messages).

        @raise  EndOfFile   if EOF record encountered.
        '''
        s = s.rstrip('\r\n')
        if not s:
            return          # empty line

        if s[0] == ':':
            try:
                bin = array('B', unhexlify(s[1:]))
            except TypeError:
                # this might be raised by unhexlify when odd hexascii digits
                raise HexRecordError(line=line)
            length = len(bin)
            if length < 5:
                raise HexRecordError(line=line)
        else:
            raise HexRecordError(line=line)

        record_length = bin[0]
        if length != (5 + record_length):
            raise RecordLengthError(line=line)

        addr = bin[1]*256 + bin[2]

        record_type = bin[3]
        if not (0 <= record_type <= 5):
            raise RecordTypeError(line=line)

        crc = sum(bin)
        crc &= 0x0FF
        if crc != 0:
            raise RecordChecksumError(line=line)

        if record_type == 0:
            # data record
            addr += self._offset
            for i in xrange(4, 4+record_length):
                if not self._buf.get(addr, None) is None:
                    raise AddressOverlapError(address=addr, line=line)
                self._buf[addr] = bin[i]
                addr += 1   # FIXME: addr should be wrapped 
                            # BUT after 02 record (at 64K boundary)
                            # and after 04 record (at 4G boundary)

        elif record_type == 1:
            # end of file record
            if record_length != 0:
                raise EOFRecordError(line=line)
            raise _EndOfFile

        elif record_type == 2:
            # Extended 8086 Segment Record
            if record_length != 2 or addr != 0:
                raise ExtendedSegmentAddressRecordError(line=line)
            self._offset = (bin[4]*256 + bin[5]) * 16

        elif record_type == 4:
            # Extended Linear Address Record
            if record_length != 2 or addr != 0:
                raise ExtendedLinearAddressRecordError(line=line)
            self._offset = (bin[4]*256 + bin[5]) * 65536

        elif record_type == 3:
            # Start Segment Address Record
            if record_length != 4 or addr != 0:
                raise StartSegmentAddressRecordError(line=line)
            if self.start_addr:
                raise DuplicateStartAddressRecordError(line=line)
            self.start_addr = {'CS': bin[4]*256 + bin[5],
                               'IP': bin[6]*256 + bin[7],
                              }

        elif record_type == 5:
            # Start Linear Address Record
            if record_length != 4 or addr != 0:
                raise StartLinearAddressRecordError(line=line)
            if self.start_addr:
                raise DuplicateStartAddressRecordError(line=line)
            self.start_addr = {'EIP': (bin[4]*16777216 +
                                       bin[5]*65536 +
                                       bin[6]*256 +
                                       bin[7]),
                              }

Example 35

Project: raspberry_pwn
Source File: intelhex.py
View license
    def write_hex_file(self, f, write_start_addr=True):
        """Write data to file f in HEX format.

        @param  f                   filename or file-like object for writing
        @param  write_start_addr    enable or disable writing start address
                                    record to file (enabled by default).
                                    If there is no start address in obj, nothing
                                    will be written regardless of this setting.
        """
        fwrite = getattr(f, "write", None)
        if fwrite:
            fobj = f
            fclose = None
        else:
            fobj = file(f, 'w')
            fwrite = fobj.write
            fclose = fobj.close

        # Translation table for uppercasing hex ascii string.
        # timeit shows that using hexstr.translate(table)
        # is faster than hexstr.upper():
        # 0.452ms vs. 0.652ms (translate vs. upper)
        table = ''.join(chr(i).upper() for  i in range(256))

        # start address record if any
        if self.start_addr and write_start_addr:
            keys = self.start_addr.keys()
            keys.sort()
            bin = array('B', '\0'*9)
            if keys == ['CS','IP']:
                # Start Segment Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 3      # rectyp
                cs = self.start_addr['CS']
                bin[4] = (cs >> 8) & 0x0FF
                bin[5] = cs & 0x0FF
                ip = self.start_addr['IP']
                bin[6] = (ip >> 8) & 0x0FF
                bin[7] = ip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')
            elif keys == ['EIP']:
                # Start Linear Address Record
                bin[0] = 4      # reclen
                bin[1] = 0      # offset msb
                bin[2] = 0      # offset lsb
                bin[3] = 5      # rectyp
                eip = self.start_addr['EIP']
                bin[4] = (eip >> 24) & 0x0FF
                bin[5] = (eip >> 16) & 0x0FF
                bin[6] = (eip >> 8) & 0x0FF
                bin[7] = eip & 0x0FF
                bin[8] = (-sum(bin)) & 0x0FF    # chksum
                fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')
            else:
                if fclose:
                    fclose()
                raise InvalidStartAddressValueError(start_addr=self.start_addr)

        # data
        addresses = self._buf.keys()
        addresses.sort()
        addr_len = len(addresses)
        if addr_len:
            minaddr = addresses[0]
            maxaddr = addresses[-1]
    
            if maxaddr > 65535:
                need_offset_record = True
            else:
                need_offset_record = False
            high_ofs = 0

            cur_addr = minaddr
            cur_ix = 0

            while cur_addr <= maxaddr:
                if need_offset_record:
                    bin = array('B', '\0'*7)
                    bin[0] = 2      # reclen
                    bin[1] = 0      # offset msb
                    bin[2] = 0      # offset lsb
                    bin[3] = 4      # rectyp
                    high_ofs = int(cur_addr/65536)
                    bytes = divmod(high_ofs, 256)
                    bin[4] = bytes[0]   # msb of high_ofs
                    bin[5] = bytes[1]   # lsb of high_ofs
                    bin[6] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')

                while True:
                    # produce one record
                    low_addr = cur_addr & 0x0FFFF
                    # chain_len off by 1
                    chain_len = min(15, 65535-low_addr, maxaddr-cur_addr)

                    # search continuous chain
                    stop_addr = cur_addr + chain_len
                    if chain_len:
                        ix = bisect_right(addresses, stop_addr,
                                          cur_ix,
                                          min(cur_ix+chain_len+1, addr_len))
                        chain_len = ix - cur_ix     # real chain_len
                        # there could be small holes in the chain
                        # but we will catch them by try-except later
                        # so for big continuous files we will work
                        # at maximum possible speed
                    else:
                        chain_len = 1               # real chain_len

                    bin = array('B', '\0'*(5+chain_len))
                    bytes = divmod(low_addr, 256)
                    bin[1] = bytes[0]   # msb of low_addr
                    bin[2] = bytes[1]   # lsb of low_addr
                    bin[3] = 0          # rectype
                    try:    # if there is small holes we'll catch them
                        for i in range(chain_len):
                            bin[4+i] = self._buf[cur_addr+i]
                    except KeyError:
                        # we catch a hole so we should shrink the chain
                        chain_len = i
                        bin = bin[:5+i]
                    bin[0] = chain_len
                    bin[4+chain_len] = (-sum(bin)) & 0x0FF    # chksum
                    fwrite(':' + hexlify(bin.tostring()).translate(table) + '\n')

                    # adjust cur_addr/cur_ix
                    cur_ix += chain_len
                    if cur_ix < addr_len:
                        cur_addr = addresses[cur_ix]
                    else:
                        cur_addr = maxaddr + 1
                        break
                    high_addr = int(cur_addr/65536)
                    if high_addr > high_ofs:
                        break

        # end-of-file record
        fwrite(":00000001FF\n")
        if fclose:
            fclose()

Example 36

Project: pychess
Source File: PgnImport.py
View license
    def do_import(self, filename, info=None, progressbar=None):
        DB_MAXINT_SHIFT = get_maxint_shift(self.engine)
        self.progressbar = progressbar

        orig_filename = filename
        count_source = self.conn.execute(self.count_source.where(source.c.name == orig_filename)).scalar()
        if count_source > 0:
            print("%s is already imported" % filename)
            return

        # collect new names not in they dict yet
        self.event_data = []
        self.site_data = []
        self.player_data = []
        self.annotator_data = []
        self.source_data = []

        # collect new games and commit them in big chunks for speed
        self.game_data = []
        self.bitboard_data = []
        self.stat_ins_data = []
        self.stat_upd_data = []
        self.tag_game_data = []

        if filename.startswith("http"):
            filename = download_file(filename, progressbar=progressbar)
            if filename is None:
                return
        else:
            if not os.path.isfile(filename):
                print("Can't open %s" % filename)
                return

        if filename.lower().endswith(".zip") and zipfile.is_zipfile(filename):
            zf = zipfile.ZipFile(filename, "r")
            files = [f for f in zf.namelist() if f.lower().endswith(".pgn")]
        else:
            zf = None
            files = [filename]

        for pgnfile in files:
            basename = os.path.basename(pgnfile)
            if progressbar is not None:
                GLib.idle_add(progressbar.set_text, "Reading %s ..." % basename)
            else:
                print("Reading %s ..." % pgnfile)

            if zf is None:
                size = os.path.getsize(pgnfile)
                handle = protoopen(pgnfile)
            else:
                size = zf.getinfo(pgnfile).file_size
                handle = io.TextIOWrapper(zf.open(pgnfile), encoding=PGN_ENCODING, newline='')

            cf = PgnBase(handle, [])

            # estimated game count
            all_games = max(size / 840, 1)
            self.CHUNK = 1000 if all_games > 5000 else 100

            get_id = self.get_id
            # use transaction to avoid autocommit slowness
            trans = self.conn.begin()
            try:
                i = 0
                for tagtext, movetext in read_games(handle):
                    tags = defaultdict(str, tagre.findall(tagtext))
                    if not tags:
                        print("Empty game #%s" % (i + 1))
                        continue

                    if self.cancel:
                        trans.rollback()
                        return

                    fenstr = tags.get("FEN")

                    variant = tags.get("Variant")
                    if variant:
                        if "fischer" in variant.lower() or "960" in variant:
                            variant = "Fischerandom"
                        else:
                            variant = variant.lower().capitalize()

                    # Fixes for some non statndard Chess960 .pgn
                    if fenstr and variant == "Fischerandom":
                        parts = fenstr.split()
                        parts[0] = parts[0].replace(".", "/").replace("0", "")
                        if len(parts) == 1:
                            parts.append("w")
                            parts.append("-")
                            parts.append("-")
                        fenstr = " ".join(parts)

                    if variant:
                        if variant not in name2variant:
                            print("Unknown variant: %s" % variant)
                            continue
                        variant = name2variant[variant].variant
                        if variant == NORMALCHESS:
                            # lichess uses tag [Variant "Standard"]
                            variant = 0
                            board = START_BOARD.clone()
                        else:
                            board = LBoard(variant)
                    elif fenstr:
                        variant = 0
                        board = LBoard()
                    else:
                        variant = 0
                        board = START_BOARD.clone()

                    if fenstr:
                        try:
                            board.applyFen(fenstr)
                        except SyntaxError as e:
                            print(_(
                                "The game #%s can't be loaded, because of an error parsing FEN")
                                % (i + 1), e.args[0])
                            continue
                    elif variant:
                        board.applyFen(FEN_START)

                    movelist = array("H")
                    comments = []
                    cf.error = None

                    # First we try to use simple_parse_movetext()
                    # assuming most games in .pgn contains only moves
                    # without any comments/variations
                    simple = False
                    if not fenstr and not variant:
                        bitboards = []
                        simple = cf.simple_parse_movetext(movetext, board, movelist, bitboards)

                        if cf.error is not None:
                            print("ERROR in %s game #%s" % (pgnfile, i + 1), cf.error.args[0])
                            continue

                    # If simple_parse_movetext() find any comments/variations
                    # we restart parsing with full featured parse_movetext()
                    if not simple:
                        movelist = array("H")
                        bitboards = None

                        # in case simple_parse_movetext failed we have to reset our lboard
                        if not fenstr and not variant:
                            board = START_BOARD.clone()

                        # parse movetext to create boards tree structure
                        boards = [board]
                        boards = cf.parse_movetext(movetext, boards[0], -1, pgn_import=True)

                        if cf.error is not None:
                            print("ERROR in %s game #%s" % (pgnfile, i + 1), cf.error.args[0])
                            continue

                        # create movelist and comments from boards tree
                        walk(boards[0], movelist, comments)

                    white = tags.get('White')
                    black = tags.get('Black')

                    if not movelist:
                        if (not comments) and (not white) and (not black):
                            print("Empty game #%s" % (i + 1))
                            continue

                    event_id = get_id(tags.get('Event'), event, EVENT)

                    site_id = get_id(tags.get('Site'), site, SITE)

                    game_date = tags.get('Date').strip()
                    try:
                        if game_date and '?' not in game_date:
                            ymd = game_date.split('.')
                            if len(ymd) == 3:
                                game_year, game_month, game_day = map(int, ymd)
                            else:
                                game_year, game_month, game_day = int(game_date[:4]), None, None
                        elif game_date and '?' not in game_date[:4]:
                            game_year, game_month, game_day = int(game_date[:4]), None, None
                        else:
                            game_year, game_month, game_day = None, None, None
                    except:
                        game_year, game_month, game_day = None, None, None

                    game_round = tags.get('Round')

                    white_fide_id = tags.get('WhiteFideId')
                    black_fide_id = tags.get('BlackFideId')

                    white_id = get_id(unicode(white), player, PLAYER, fide_id=white_fide_id)
                    black_id = get_id(unicode(black), player, PLAYER, fide_id=black_fide_id)

                    result = tags.get("Result")
                    if result in pgn2Const:
                        result = pgn2Const[result]
                    else:
                        print("Invalid Result tag in game #%s: %s" % (i + 1, result))
                        continue

                    white_elo = tags.get('WhiteElo')
                    white_elo = int(white_elo) if white_elo and white_elo.isdigit() else None

                    black_elo = tags.get('BlackElo')
                    black_elo = int(black_elo) if black_elo and black_elo.isdigit() else None

                    time_control = tags.get("TimeControl")

                    eco = tags.get("ECO")
                    eco = eco[:3] if eco else None

                    fen = tags.get("FEN")

                    board_tag = tags.get("Board")

                    annotator_id = get_id(tags.get("Annotator"), annotator, ANNOTATOR)

                    source_id = get_id(unicode(orig_filename), source, SOURCE, info=info)

                    game_id = self.next_id[GAME]
                    self.next_id[GAME] += 1

                    # annotated game
                    if bitboards is None:
                        for ply, board in enumerate(boards):
                            if ply == 0:
                                continue
                            bb = board.friends[0] | board.friends[1]
                            # Avoid to include mate in x .pgn collections and similar in opening tree
                            if fen and "/pppppppp/8/8/8/8/PPPPPPPP/" not in fen:
                                ply = -1
                            self.bitboard_data.append({
                                'game_id': game_id,
                                'ply': ply,
                                'bitboard': bb - DB_MAXINT_SHIFT,
                            })

                            if ply <= STAT_PLY_MAX:
                                self.stat_ins_data.append({
                                    'ply': ply,
                                    'bitboard': bb - DB_MAXINT_SHIFT,
                                    'count': 0,
                                    'whitewon': 0,
                                    'blackwon': 0,
                                    'draw': 0,
                                    'white_elo_count': 0,
                                    'black_elo_count': 0,
                                    'white_elo': 0,
                                    'black_elo': 0,
                                })
                                self.stat_upd_data.append({
                                    '_ply': ply,
                                    '_bitboard': bb - DB_MAXINT_SHIFT,
                                    '_count': 1,
                                    '_whitewon': 1 if result == WHITEWON else 0,
                                    '_blackwon': 1 if result == BLACKWON else 0,
                                    '_draw': 1 if result == DRAW else 0,
                                    '_white_elo_count': 1 if white_elo is not None else 0,
                                    '_black_elo_count': 1 if black_elo is not None else 0,
                                    '_white_elo': white_elo if white_elo is not None else 0,
                                    '_black_elo': black_elo if black_elo is not None else 0,
                                })

                    # simple game
                    else:
                        for ply, bb in enumerate(bitboards):
                            if ply == 0:
                                continue
                            self.bitboard_data.append({
                                'game_id': game_id,
                                'ply': ply,
                                'bitboard': bb - DB_MAXINT_SHIFT,
                            })

                            if ply <= STAT_PLY_MAX:
                                self.stat_ins_data.append({
                                    'ply': ply,
                                    'bitboard': bb - DB_MAXINT_SHIFT,
                                    'count': 0,
                                    'whitewon': 0,
                                    'blackwon': 0,
                                    'draw': 0,
                                    'white_elo_count': 0,
                                    'black_elo_count': 0,
                                    'white_elo': 0,
                                    'black_elo': 0,
                                })
                                self.stat_upd_data.append({
                                    '_ply': ply,
                                    '_bitboard': bb - DB_MAXINT_SHIFT,
                                    '_count': 1,
                                    '_whitewon': 1 if result == WHITEWON else 0,
                                    '_blackwon': 1 if result == BLACKWON else 0,
                                    '_draw': 1 if result == DRAW else 0,
                                    '_white_elo_count': 1 if white_elo is not None else 0,
                                    '_black_elo_count': 1 if black_elo is not None else 0,
                                    '_white_elo': white_elo if white_elo is not None else 0,
                                    '_black_elo': black_elo if black_elo is not None else 0,
                                })

                    ply_count = tags.get("PlyCount")
                    if not ply_count and not fen:
                        ply_count = len(bitboards) if bitboards is not None else len(boards)

                    self.game_data.append({
                        'event_id': event_id,
                        'site_id': site_id,
                        'date_year': game_year,
                        'date_month': game_month,
                        'date_day': game_day,
                        'round': game_round,
                        'white_id': white_id,
                        'black_id': black_id,
                        'result': result,
                        'white_elo': white_elo,
                        'black_elo': black_elo,
                        'ply_count': ply_count,
                        'eco': eco,
                        'fen': fen,
                        'variant': variant,
                        'board': board_tag,
                        'time_control': time_control,
                        'annotator_id': annotator_id,
                        'source_id': source_id,
                        'movelist': movelist.tostring(),
                        'comments': unicode("|".join(comments)),
                    })

                    i += 1

                    if len(self.game_data) >= self.CHUNK:
                        if self.event_data:
                            self.conn.execute(self.ins_event, self.event_data)
                            self.event_data = []

                        if self.site_data:
                            self.conn.execute(self.ins_site, self.site_data)
                            self.site_data = []

                        if self.player_data:
                            self.conn.execute(self.ins_player,
                                              self.player_data)
                            self.player_data = []

                        if self.annotator_data:
                            self.conn.execute(self.ins_annotator,
                                              self.annotator_data)
                            self.annotator_data = []

                        if self.source_data:
                            self.conn.execute(self.ins_source, self.source_data)
                            self.source_data = []

                        self.conn.execute(self.ins_game, self.game_data)
                        self.game_data = []

                        if self.bitboard_data:
                            self.conn.execute(self.ins_bitboard, self.bitboard_data)
                            self.bitboard_data = []

                            self.conn.execute(self.ins_stat, self.stat_ins_data)
                            self.conn.execute(self.upd_stat, self.stat_upd_data)
                            self.stat_ins_data = []
                            self.stat_upd_data = []

                        if progressbar is not None:
                            GLib.idle_add(progressbar.set_fraction, i / float(all_games))
                            GLib.idle_add(progressbar.set_text, "%s games from %s imported" % (i, basename))
                        else:
                            print(pgnfile, i)

                if self.event_data:
                    self.conn.execute(self.ins_event, self.event_data)
                    self.event_data = []

                if self.site_data:
                    self.conn.execute(self.ins_site, self.site_data)
                    self.site_data = []

                if self.player_data:
                    self.conn.execute(self.ins_player, self.player_data)
                    self.player_data = []

                if self.annotator_data:
                    self.conn.execute(self.ins_annotator, self.annotator_data)
                    self.annotator_data = []

                if self.source_data:
                    self.conn.execute(self.ins_source, self.source_data)
                    self.source_data = []

                if self.game_data:
                    self.conn.execute(self.ins_game, self.game_data)
                    self.game_data = []

                if self.bitboard_data:
                    self.conn.execute(self.ins_bitboard, self.bitboard_data)
                    self.bitboard_data = []

                    self.conn.execute(self.ins_stat, self.stat_ins_data)
                    self.conn.execute(self.upd_stat, self.stat_upd_data)
                    self.stat_ins_data = []
                    self.stat_upd_data = []

                if progressbar is not None:
                    GLib.idle_add(progressbar.set_fraction, i / float(all_games))
                    GLib.idle_add(progressbar.set_text, "%s games from %s imported" % (i, basename))
                else:
                    print(pgnfile, i)
                trans.commit()

            except SQLAlchemyError as e:
                trans.rollback()
                print("Importing %s failed! \n%s" % (pgnfile, e))

Example 37

Project: pychess
Source File: database.py
View license
def save(path, model, position=None):
    movelist = array("H")
    comments = []
    walk(model.boards[0].board, movelist, comments)

    game_event = model.tags["Event"]
    game_site = model.tags["Site"]
    year, month, day = int(model.tags["Year"]), int(model.tags["Month"]), int(model.tags["Day"])
    game_round = model.tags.get("Round")
    white = repr(model.players[WHITE])
    black = repr(model.players[BLACK])
    result = model.status
    eco = model.tags.get("ECO")
    time_control = model.tags.get("TimeControl")
    board = int(model.tags.get("Board")) if model.tags.get("Board") else None
    white_elo = int(model.tags.get("WhiteElo")) if model.tags.get("WhiteElo") else None
    black_elo = int(model.tags.get("BlackElo")) if model.tags.get("BlackElo") else None
    variant = model.variant.variant
    fen = model.boards[0].board.asFen()
    fen = fen if fen != FEN_START else None
    game_annotator = model.tags.get("Annotator")
    ply_count = model.ply - model.lowply

    def get_id(table, name):
        if not name:
            return None

        selection = select([table.c.id], table.c.name == unicode(name))
        result = conn.execute(selection)
        id_ = result.scalar()
        if id_ is None:
            result = conn.execute(table.insert().values(name=unicode(name)))
            id_ = result.inserted_primary_key[0]
        return id_

    engine = dbmodel.get_engine(path)
    DB_MAXINT_SHIFT = get_maxint_shift(engine)

    conn = engine.connect()
    trans = conn.begin()
    try:
        event_id = get_id(event, game_event)
        site_id = get_id(site, game_site)
        white_id = get_id(player, white)
        black_id = get_id(player, black)
        annotator_id = get_id(annotator, game_annotator)

        new_values = {
            'event_id': event_id,
            'site_id': site_id,
            'date_year': year,
            'date_month': month,
            'date_day': day,
            'round': game_round,
            'white_id': white_id,
            'black_id': black_id,
            'result': result,
            'white_elo': white_elo,
            'black_elo': black_elo,
            'ply_count': ply_count,
            'eco': eco,
            'time_control': time_control,
            'board': board,
            'fen': fen,
            'variant': variant,
            'annotator_id': annotator_id,
            'movelist': movelist.tostring(),
            'comments': "|".join(comments),
        }

        if hasattr(model, "game_id") and model.game_id is not None:
            result = conn.execute(game.update().where(
                game.c.id == model.game_id).values(new_values))

            # TODO: ?
            result = conn.execute(bitboard.delete().where(
                bitboard.c.game_id == model.game_id))
        else:
            result = conn.execute(game.insert().values(new_values))
            game_id = model.game_id = result.inserted_primary_key[0]

            if not fen:
                bitboard_data = []
                stat_ins_data = []
                stat_upd_data = []

                result = model.status

                for ply, board in enumerate(model.boards):
                    if ply == 0:
                        continue
                    bb = board.board.friends[0] | board.board.friends[1]
                    bitboard_data.append({
                        'game_id': game_id,
                        'ply': ply,
                        'bitboard': bb - DB_MAXINT_SHIFT,
                    })

                    if ply <= STAT_PLY_MAX:
                        stat_ins_data.append({
                            'ply': ply,
                            'bitboard': bb - DB_MAXINT_SHIFT,
                            'count': 0,
                            'whitewon': 0,
                            'blackwon': 0,
                            'draw': 0,
                            'white_elo_count': 0,
                            'black_elo_count': 0,
                            'white_elo': 0,
                            'black_elo': 0,
                        })
                        stat_upd_data.append({
                            '_ply': ply,
                            '_bitboard': bb - DB_MAXINT_SHIFT,
                            '_count': 1,
                            '_whitewon': 1 if result == WHITEWON else 0,
                            '_blackwon': 1 if result == BLACKWON else 0,
                            '_draw': 1 if result == DRAW else 0,
                            '_white_elo_count': 1 if white_elo is not None else 0,
                            '_black_elo_count': 1 if black_elo is not None else 0,
                            '_white_elo': white_elo if white_elo is not None else 0,
                            '_black_elo': black_elo if black_elo is not None else 0,
                        })

                result = conn.execute(bitboard.insert(), bitboard_data)
                conn.execute(insert_or_ignore(engine, stat.insert()), stat_ins_data)
                conn.execute(upd_stat, stat_upd_data)

        trans.commit()
    except:
        trans.rollback()
        raise
    conn.close()

Example 38

Project: pymo
Source File: test_array.py
View license
    def test_setslice(self):
        a = array.array(self.typecode, self.example)
        a[:1] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[1:])
        )

        a = array.array(self.typecode, self.example)
        a[:-1] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[-1:])
        )

        a = array.array(self.typecode, self.example)
        a[-1:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:-1] + self.example)
        )

        a = array.array(self.typecode, self.example)
        a[1:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example)
        )

        a = array.array(self.typecode, self.example)
        a[1:-1] = a
        self.assertEqual(
            a,
            array.array(
                self.typecode,
                self.example[:1] + self.example + self.example[-1:]
            )
        )

        a = array.array(self.typecode, self.example)
        a[1000:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )

        a = array.array(self.typecode, self.example)
        a[-1000:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example)
        )

        a = array.array(self.typecode, self.example)
        a[:1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example)
        )

        a = array.array(self.typecode, self.example)
        a[:-1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )

        a = array.array(self.typecode, self.example)
        a[1:0] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
        )

        a = array.array(self.typecode, self.example)
        a[2000:1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )

        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.__setslice__, 0, 0, None)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)

        b = array.array(self.badtypecode())
        self.assertRaises(TypeError, a.__setslice__, 0, 0, b)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)

Example 39

Project: pymo
Source File: test_file.py
View license
    def testIteration(self):
        # Test the complex interaction when mixing file-iteration and the
        # various read* methods.
        dataoffset = 16384
        filler = b"ham\n"
        assert not dataoffset % len(filler), \
            "dataoffset must be multiple of len(filler)"
        nchunks = dataoffset // len(filler)
        testlines = [
            b"spam, spam and eggs\n",
            b"eggs, spam, ham and spam\n",
            b"saussages, spam, spam and eggs\n",
            b"spam, ham, spam and eggs\n",
            b"spam, spam, spam, spam, spam, ham, spam\n",
            b"wonderful spaaaaaam.\n"
        ]
        methods = [("readline", ()), ("read", ()), ("readlines", ()),
                   ("readinto", (array("b", b" "*100),))]

        try:
            # Prepare the testfile
            bag = self.open(TESTFN, "wb")
            bag.write(filler * nchunks)
            bag.writelines(testlines)
            bag.close()
            # Test for appropriate errors mixing read* and iteration
            for methodname, args in methods:
                f = self.open(TESTFN, 'rb')
                if next(f) != filler:
                    self.fail, "Broken testfile"
                meth = getattr(f, methodname)
                meth(*args)  # This simply shouldn't fail
                f.close()

            # Test to see if harmless (by accident) mixing of read* and
            # iteration still works. This depends on the size of the internal
            # iteration buffer (currently 8192,) but we can test it in a
            # flexible manner.  Each line in the bag o' ham is 4 bytes
            # ("h", "a", "m", "\n"), so 4096 lines of that should get us
            # exactly on the buffer boundary for any power-of-2 buffersize
            # between 4 and 16384 (inclusive).
            f = self.open(TESTFN, 'rb')
            for i in range(nchunks):
                next(f)
            testline = testlines.pop(0)
            try:
                line = f.readline()
            except ValueError:
                self.fail("readline() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("readline() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            testline = testlines.pop(0)
            buf = array("b", b"\x00" * len(testline))
            try:
                f.readinto(buf)
            except ValueError:
                self.fail("readinto() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            line = buf.tostring()
            if line != testline:
                self.fail("readinto() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))

            testline = testlines.pop(0)
            try:
                line = f.read(len(testline))
            except ValueError:
                self.fail("read() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("read() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            try:
                lines = f.readlines()
            except ValueError:
                self.fail("readlines() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if lines != testlines:
                self.fail("readlines() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            # Reading after iteration hit EOF shouldn't hurt either
            f = self.open(TESTFN, 'rb')
            try:
                for line in f:
                    pass
                try:
                    f.readline()
                    f.readinto(buf)
                    f.read()
                    f.readlines()
                except ValueError:
                    self.fail("read* failed after next() consumed file")
            finally:
                f.close()
        finally:
            os.unlink(TESTFN)

Example 40

Project: pymo
Source File: test_file2k.py
View license
    def testIteration(self):
        # Test the complex interaction when mixing file-iteration and the
        # various read* methods. Ostensibly, the mixture could just be tested
        # to work when it should work according to the Python language,
        # instead of fail when it should fail according to the current CPython
        # implementation.  People don't always program Python the way they
        # should, though, and the implemenation might change in subtle ways,
        # so we explicitly test for errors, too; the test will just have to
        # be updated when the implementation changes.
        dataoffset = 16384
        filler = "ham\n"
        assert not dataoffset % len(filler), \
            "dataoffset must be multiple of len(filler)"
        nchunks = dataoffset // len(filler)
        testlines = [
            "spam, spam and eggs\n",
            "eggs, spam, ham and spam\n",
            "saussages, spam, spam and eggs\n",
            "spam, ham, spam and eggs\n",
            "spam, spam, spam, spam, spam, ham, spam\n",
            "wonderful spaaaaaam.\n"
        ]
        methods = [("readline", ()), ("read", ()), ("readlines", ()),
                   ("readinto", (array("c", " "*100),))]

        try:
            # Prepare the testfile
            bag = open(TESTFN, "w")
            bag.write(filler * nchunks)
            bag.writelines(testlines)
            bag.close()
            # Test for appropriate errors mixing read* and iteration
            for methodname, args in methods:
                f = open(TESTFN)
                if f.next() != filler:
                    self.fail, "Broken testfile"
                meth = getattr(f, methodname)
                try:
                    meth(*args)
                except ValueError:
                    pass
                else:
                    self.fail("%s%r after next() didn't raise ValueError" %
                                     (methodname, args))
                f.close()

            # Test to see if harmless (by accident) mixing of read* and
            # iteration still works. This depends on the size of the internal
            # iteration buffer (currently 8192,) but we can test it in a
            # flexible manner.  Each line in the bag o' ham is 4 bytes
            # ("h", "a", "m", "\n"), so 4096 lines of that should get us
            # exactly on the buffer boundary for any power-of-2 buffersize
            # between 4 and 16384 (inclusive).
            f = open(TESTFN)
            for i in range(nchunks):
                f.next()
            testline = testlines.pop(0)
            try:
                line = f.readline()
            except ValueError:
                self.fail("readline() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("readline() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            testline = testlines.pop(0)
            buf = array("c", "\x00" * len(testline))
            try:
                f.readinto(buf)
            except ValueError:
                self.fail("readinto() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            line = buf.tostring()
            if line != testline:
                self.fail("readinto() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))

            testline = testlines.pop(0)
            try:
                line = f.read(len(testline))
            except ValueError:
                self.fail("read() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("read() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            try:
                lines = f.readlines()
            except ValueError:
                self.fail("readlines() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if lines != testlines:
                self.fail("readlines() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            # Reading after iteration hit EOF shouldn't hurt either
            f = open(TESTFN)
            try:
                for line in f:
                    pass
                try:
                    f.readline()
                    f.readinto(buf)
                    f.read()
                    f.readlines()
                except ValueError:
                    self.fail("read* failed after next() consumed file")
            finally:
                f.close()
        finally:
            os.unlink(TESTFN)

Example 41

Project: babel
Source File: mofile.py
View license
def write_mo(fileobj, catalog, use_fuzzy=False):
    """Write a catalog to the specified file-like object using the GNU MO file
    format.

    >>> import sys
    >>> from babel.messages import Catalog
    >>> from gettext import GNUTranslations
    >>> from babel._compat import BytesIO

    >>> catalog = Catalog(locale='en_US')
    >>> catalog.add('foo', 'Voh')
    <Message ...>
    >>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
    <Message ...>
    >>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
    <Message ...>
    >>> catalog.add('Fizz', '')
    <Message ...>
    >>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
    <Message ...>
    >>> buf = BytesIO()

    >>> write_mo(buf, catalog)
    >>> x = buf.seek(0)
    >>> translations = GNUTranslations(fp=buf)
    >>> if sys.version_info[0] >= 3:
    ...     translations.ugettext = translations.gettext
    ...     translations.ungettext = translations.ngettext
    >>> translations.ugettext('foo')
    u'Voh'
    >>> translations.ungettext('bar', 'baz', 1)
    u'Bahr'
    >>> translations.ungettext('bar', 'baz', 2)
    u'Batz'
    >>> translations.ugettext('fuz')
    u'fuz'
    >>> translations.ugettext('Fizz')
    u'Fizz'
    >>> translations.ugettext('Fuzz')
    u'Fuzz'
    >>> translations.ugettext('Fuzzes')
    u'Fuzzes'

    :param fileobj: the file-like object to write to
    :param catalog: the `Catalog` instance
    :param use_fuzzy: whether translations marked as "fuzzy" should be included
                      in the output
    """
    messages = list(catalog)
    if not use_fuzzy:
        messages[1:] = [m for m in messages[1:] if not m.fuzzy]
    messages.sort()

    ids = strs = b''
    offsets = []

    for message in messages:
        # For each string, we need size and file offset.  Each string is NUL
        # terminated; the NUL does not count into the size.
        if message.pluralizable:
            msgid = b'\x00'.join([
                msgid.encode(catalog.charset) for msgid in message.id
            ])
            msgstrs = []
            for idx, string in enumerate(message.string):
                if not string:
                    msgstrs.append(message.id[min(int(idx), 1)])
                else:
                    msgstrs.append(string)
            msgstr = b'\x00'.join([
                msgstr.encode(catalog.charset) for msgstr in msgstrs
            ])
        else:
            msgid = message.id.encode(catalog.charset)
            if not message.string:
                msgstr = message.id.encode(catalog.charset)
            else:
                msgstr = message.string.encode(catalog.charset)
        if message.context:
            msgid = b'\x04'.join([message.context.encode(catalog.charset),
                                  msgid])
        offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
        ids += msgid + b'\x00'
        strs += msgstr + b'\x00'

    # The header is 7 32-bit unsigned integers.  We don't use hash tables, so
    # the keys start right after the index tables.
    keystart = 7 * 4 + 16 * len(messages)
    valuestart = keystart + len(ids)

    # The string table first has the list of keys, then the list of values.
    # Each entry has first the size of the string, then the file offset.
    koffsets = []
    voffsets = []
    for o1, l1, o2, l2 in offsets:
        koffsets += [l1, o1 + keystart]
        voffsets += [l2, o2 + valuestart]
    offsets = koffsets + voffsets

    fileobj.write(struct.pack('Iiiiiii',
                              LE_MAGIC,                   # magic
                              0,                          # version
                              len(messages),              # number of entries
                              7 * 4,                      # start of key index
                              7 * 4 + len(messages) * 8,  # start of value index
                              0, 0                        # size and offset of hash table
                              ) + array_tobytes(array.array("i", offsets)) + ids + strs)

Example 42

Project: stock
Source File: analysis.py
View license
    def __init__(self, data_obj, af_inc=0.02, af_max=0.2):
        heights = data_obj.high
        lows = data_obj.low
        datalen = len(heights)

        assert datalen == len(lows)

        # Main object data
        self.trend = array.array('h') # Signed short
        self.sar = array.array('d') # Double float

        # Init trend based on first two positions
        if heights[1] > heights[0] or lows[0] < lows[1]:
            # Trending up
            self.trend.append(1)
        else:
            self.trend.append(-1)

        # Init first SAR value based on trend to first position low or high
        # and extreme point value (highest on uptrend, lowest on downtrend)
        if self.trend[0] > 0:
            self.sar.append(lows[0])
            xp = heights[0]
        else:
            self.sar.append(heights[0])
            xp = lows[0]

        # Init acceleration factor
        af = af_inc

        #print(dt_date(data_obj.time[0]), "Trend: %d Hi: %.2f Lo: %.2f SAR: %.2f XP: %.2f AF: %.2f" %
        #    (self.trend[0], heights[0], lows[0], self.sar[0], xp, af))
        #sleep(0.1)

        for i in range(1, datalen):

            # If trend was up
            if self.trend[i-1] > 0:
                # If new high is higher than extreme
                if heights[i] > xp:
                    # Record new extreme
                    xp = heights[i]
                    # Set new acceleration factor
                    af = min(af_max, af+af_inc)

                # Calculate this period's SAR
                current_sar = self.sar[i-1] + af * (xp - self.sar[i-1])

                # SAR can't be higher than previous low
                if current_sar > lows[i-1]:
                    current_sar = lows[i-1]

                # Check if we have reversal
                # (current period low is lower than SAR)
                if lows[i] < current_sar:
                    # Reverse trend
                    self.trend.append(-1)
                    # Set current SAR to current extreme point
                    self.sar.append(xp)
                    # Reset acceleration factor
                    af = af_inc
                    # Reset extreme point
                    xp = lows[i]
                # If trend haven't reversed - record current values
                else:
                    self.trend.append(1)
                    self.sar.append(current_sar)

                #print(dt_date(data_obj.time[i]), "Trend: %d Hi: %.2f Lo: %.2f SAR: %.2f XP: %.2f AF: %.2f" %
                #    (self.trend[i], heights[i], lows[i], self.sar[i], xp, af))
                #sleep(0.1)
            # End if uptrend

            elif self.trend[i-1] < 0:
                # If new low is lower than extreme
                if lows[i] < xp:
                    # Record new extreme
                    xp = lows[i]
                    # Set new acceleration factor
                    af = min(af_max, af+af_inc)

                # Calculate this period's SAR
                current_sar = self.sar[i-1] + af * (xp - self.sar[i-1])

                # SAR can't be lower than previous high
                if current_sar < heights[i-1]:
                    current_sar = heights[i-1]

                # Check if we have reversal
                # (current period high is higher than SAR)
                if heights[i] > current_sar:
                    # Reverse trend
                    self.trend.append(1)
                    # Set current SAR to current extreme point
                    self.sar.append(xp)
                    # Reset acceleration factor
                    af = af_inc
                    # Reset extreme point
                    xp = heights[i]
                # If trend haven't reversed - record current values
                else:

                    self.trend.append(-1)
                    self.sar.append(current_sar)

                #print(dt_date(data_obj.time[i]), "Trend: %d Hi: %.2f Lo: %.2f SAR: %.2f XP: %.2f AF: %.2f" %
                #    (self.trend[i], heights[i], lows[i], self.sar[i], xp, af))
                #sleep(0.1)
            # End elif downtrend
        # End for loop through datapoints

        assert len(self.trend) == len (self.sar) == datalen

Example 43

Project: pygame_sdl2
Source File: image_test.py
View license
    def test_fromstring__and_tostring(self):
        """ see if fromstring, and tostring methods are symmetric.
        """
        
        def AreSurfacesIdentical(surf_a, surf_b):
            if surf_a.get_width() != surf_b.get_width() or surf_a.get_height() != surf_b.get_height():
                return False
            for y in xrange_(surf_a.get_height()):
                for x in xrange_(surf_b.get_width()):
                    if surf_a.get_at((x,y)) != surf_b.get_at((x,y)):
                        return False
            return True

        ####################################################################
        def RotateRGBAtoARGB(str_buf):
            byte_buf = array.array("B", str_buf)
            num_quads = len(byte_buf)//4
            for i in xrange_(num_quads):
                alpha = byte_buf[i*4 + 3]
                byte_buf[i*4 + 3] = byte_buf[i*4 + 2]
                byte_buf[i*4 + 2] = byte_buf[i*4 + 1]
                byte_buf[i*4 + 1] = byte_buf[i*4 + 0]
                byte_buf[i*4 + 0] = alpha
            return byte_buf.tostring()

        ####################################################################
        def RotateARGBtoRGBA(str_buf):
            byte_buf = array.array("B", str_buf)
            num_quads = len(byte_buf)//4
            for i in xrange_(num_quads):
                alpha = byte_buf[i*4 + 0]
                byte_buf[i*4 + 0] = byte_buf[i*4 + 1]
                byte_buf[i*4 + 1] = byte_buf[i*4 + 2]
                byte_buf[i*4 + 2] = byte_buf[i*4 + 3]
                byte_buf[i*4 + 3] = alpha
            return byte_buf.tostring()
                
        ####################################################################
        test_surface = pygame.Surface((64, 256), flags=pygame.SRCALPHA, depth=32)
        for i in xrange_(256):
            for j in xrange_(16):
                intensity = j*16 + 15
                test_surface.set_at((j + 0, i), (intensity, i, i, i))
                test_surface.set_at((j + 16, i), (i, intensity, i, i))
                test_surface.set_at((j + 32, i), (i, i, intensity, i))
                test_surface.set_at((j + 32, i), (i, i, i, intensity))
            
        self.assert_(AreSurfacesIdentical(test_surface, test_surface))

        rgba_buf = pygame.image.tostring(test_surface, "RGBA")
        rgba_buf = RotateARGBtoRGBA(RotateRGBAtoARGB(rgba_buf))
        test_rotate_functions = pygame.image.fromstring(rgba_buf, test_surface.get_size(), "RGBA")

        self.assert_(AreSurfacesIdentical(test_surface, test_rotate_functions))

        rgba_buf = pygame.image.tostring(test_surface, "RGBA")
        argb_buf = RotateRGBAtoARGB(rgba_buf)
        test_from_argb_string = pygame.image.fromstring(argb_buf, test_surface.get_size(), "ARGB")

        self.assert_(AreSurfacesIdentical(test_surface, test_from_argb_string))
        #"ERROR: image.fromstring with ARGB failed"


        argb_buf = pygame.image.tostring(test_surface, "ARGB")
        rgba_buf = RotateARGBtoRGBA(argb_buf)
        test_to_argb_string = pygame.image.fromstring(rgba_buf, test_surface.get_size(), "RGBA")

        self.assert_(AreSurfacesIdentical(test_surface, test_to_argb_string))
        #"ERROR: image.tostring with ARGB failed"


        argb_buf = pygame.image.tostring(test_surface, "ARGB")
        test_to_from_argb_string = pygame.image.fromstring(argb_buf, test_surface.get_size(), "ARGB")

        self.assert_(AreSurfacesIdentical(test_surface, test_to_from_argb_string))

Example 44

Project: meza
Source File: convert.py
View license
def records2array(records, types, native=False, silent=False):
    """Converts records into either a numpy.recarray or a nested array.array

    Args:
        records (Iter[dict]): Rows of data whose keys are the field names.
            E.g., output from any `meza.io` read function.

        types (Iter[dict]):

        native (bool): Return a native array (default: False).

        silent (bool): Suppress the warning message (default: False).

    Returns:
        numpy.recarray

    See also:
        `meza.convert.records2df`

    Examples:
        >>> records = [{'alpha': 'aa', 'beta': 2}, {'alpha': 'bee', 'beta': 3}]
        >>> types = [
        ...     {'id': 'alpha', 'type': 'text'}, {'id': 'beta', 'type': 'int'}]
        >>>
        >>> arr = records2array(records, types, silent=True)
        >>> u, i = get_native_str('u'), get_native_str('i')
        >>> native_resp = [
        ...     [array(u, 'alpha'), array(u, 'beta')],
        ...     [array(u, 'aa'), array(u, 'bee')],
        ...     array(i, [2, 3])]
        >>>
        >>> if np:
        ...     arr.alpha.tolist() == ['aa', 'bee']
        ...     arr.beta.tolist() == [2, 3]
        ... else:
        ...     True
        ...     True
        True
        True
        >>> True if np else arr == native_resp
        True
        >>> records2array(records, types, native=True) == native_resp
        True
    """
    numpy = np and not native
    dialect = 'numpy' if numpy else 'array'
    _dtype = [ft.get_dtype(t['type'], dialect) for t in types]
    dtype = [get_native_str(d) for d in _dtype]
    ids = [t['id'] for t in types]

    if numpy:
        data = [tuple(r.get(id_) for id_ in ids) for r in records]
        ndtype = [tuple(map(get_native_str, z)) for z in zip(ids, dtype)]
        ndarray = np.array(data, dtype=ndtype)
        converted = ndarray.view(np.recarray)
    else:
        if not (native or silent):
            msg = (
                "It looks like you don't have numpy installed. This function"
                " will return a native array instead.")

            logger.warning(msg)

        header = [array(get_native_str('u'), t['id']) for t in types]
        data = (zip_longest(*([r.get(i) for i in ids] for r in records)))

        # array.array can't have nulls, so convert to an appropriate equivalent
        clean = lambda t, d: (x if x else ft.ARRAY_NULL_TYPE[t] for x in d)
        cleaned = (it.starmap(clean, zip(dtype, data)))

        values = [
            [array(t, x) for x in d] if t in {'c', 'u'} else array(t, d)
            for t, d in zip(dtype, cleaned)]

        converted = [header] + values

    return converted

Example 45

Project: promise
Source File: byteplay.py
View license
    def to_code(self):
        """Assemble a Python code object from a Code object."""
        co_argcount = len(self.args) - self.varargs - self.varkwargs
        co_stacksize = self._compute_stacksize()
        co_flags = self._compute_flags()

        co_consts = [self.docstring]
        co_names = []
        co_varnames = list(self.args)

        co_freevars = tuple(self.freevars)

        # We find all cellvars beforehand, for two reasons:
        # 1. We need the number of them to construct the numeric argument
        #    for ops in "hasfree".
        # 2. We need to put arguments which are cell vars in the beginning
        #    of co_cellvars
        cellvars = set(arg for op, arg in self.code
                       if isopcode(op) and op in hasfree
                       and arg not in co_freevars)
        co_cellvars = [x for x in self.args if x in cellvars]

        def index(seq, item, eq=operator.eq, can_append=True):
            """Find the index of item in a sequence and return it.
            If it is not found in the sequence, and can_append is True,
            it is appended to the sequence.

            eq is the equality operator to use.
            """
            for i, x in enumerate(seq):
                if eq(x, item):
                    return i
            else:
                if can_append:
                    seq.append(item)
                    return len(seq) - 1
                else:
                    raise IndexError, "Item not found"

        # List of tuples (pos, label) to be filled later
        jumps = []
        # A mapping from a label to its position
        label_pos = {}
        # Last SetLineno
        lastlineno = self.firstlineno
        lastlinepos = 0

        co_code = array('B')
        co_lnotab = array('B')
        for i, (op, arg) in enumerate(self.code):
            if isinstance(op, Label):
                label_pos[op] = len(co_code)

            elif op is SetLineno:
                incr_lineno = arg - lastlineno
                incr_pos = len(co_code) - lastlinepos
                lastlineno = arg
                lastlinepos = len(co_code)

                if incr_lineno == 0 and incr_pos == 0:
                    co_lnotab.append(0)
                    co_lnotab.append(0)
                else:
                    while incr_pos > 255:
                        co_lnotab.append(255)
                        co_lnotab.append(0)
                        incr_pos -= 255
                    while incr_lineno > 255:
                        co_lnotab.append(incr_pos)
                        co_lnotab.append(255)
                        incr_pos = 0
                        incr_lineno -= 255
                    if incr_pos or incr_lineno:
                        co_lnotab.append(incr_pos)
                        co_lnotab.append(incr_lineno)

            elif op == opcode.EXTENDED_ARG:
                raise ValueError, "EXTENDED_ARG not supported in Code objects"

            elif not op in hasarg:
                co_code.append(op)

            else:
                if op in hasconst:
                    if isinstance(arg, Code) and i < len(self.code)-1 and \
                       self.code[i+1][0] in hascode:
                        arg = arg.to_code()
                    arg = index(co_consts, arg, operator.is_)
                elif op in hasname:
                    arg = index(co_names, arg)
                elif op in hasjump:
                    # arg will be filled later
                    jumps.append((len(co_code), arg))
                    arg = 0
                elif op in haslocal:
                    arg = index(co_varnames, arg)
                elif op in hascompare:
                    arg = index(cmp_op, arg, can_append=False)
                elif op in hasfree:
                    try:
                        arg = index(co_freevars, arg, can_append=False) \
                              + len(cellvars)
                    except IndexError:
                        arg = index(co_cellvars, arg)
                else:
                    # arg is ok
                    pass

                if arg > 0xFFFF:
                    co_code.append(opcode.EXTENDED_ARG)
                    co_code.append((arg >> 16) & 0xFF)
                    co_code.append((arg >> 24) & 0xFF)
                co_code.append(op)
                co_code.append(arg & 0xFF)
                co_code.append((arg >> 8) & 0xFF)

        for pos, label in jumps:
            jump = label_pos[label]
            if co_code[pos] in hasjrel:
                jump -= pos+3
            if jump > 0xFFFF:
                raise NotImplementedError, "Extended jumps not implemented"
            co_code[pos+1] = jump & 0xFF
            co_code[pos+2] = (jump >> 8) & 0xFF

        co_code = co_code.tostring()
        co_lnotab = co_lnotab.tostring()

        co_consts = tuple(co_consts)
        co_names = tuple(co_names)
        co_varnames = tuple(co_varnames)
        co_nlocals = len(co_varnames)
        co_cellvars = tuple(co_cellvars)

        return types.CodeType(co_argcount, co_nlocals, co_stacksize, co_flags,
                              co_code, co_consts, co_names, co_varnames,
                              self.filename, self.name, self.firstlineno, co_lnotab,
                              co_freevars, co_cellvars)

Example 46

Project: withhacks
Source File: byteplay.py
View license
    def to_code(self):
        """Assemble a Python code object from a Code object."""
        co_argcount = len(self.args) - self.varargs - self.varkwargs
        co_stacksize = self._compute_stacksize()
        co_flags = self._compute_flags()

        co_consts = [self.docstring]
        co_names = []
        co_varnames = list(self.args)

        co_freevars = tuple(self.freevars)

        # We find all cellvars beforehand, for two reasons:
        # 1. We need the number of them to construct the numeric argument
        #    for ops in "hasfree".
        # 2. We need to put arguments which are cell vars in the beginning
        #    of co_cellvars
        cellvars = set(arg for op, arg in self.code
                       if isopcode(op) and op in hasfree
                       and arg not in co_freevars)
        co_cellvars = [x for x in self.args if x in cellvars]

        def index(seq, item, eq=operator.eq, can_append=True):
            """Find the index of item in a sequence and return it.
            If it is not found in the sequence, and can_append is True,
            it is appended to the sequence.

            eq is the equality operator to use.
            """
            for i, x in enumerate(seq):
                if eq(x, item):
                    return i
            else:
                if can_append:
                    seq.append(item)
                    return len(seq) - 1
                else:
                    raise IndexError, "Item not found"

        # List of tuples (pos, label) to be filled later
        jumps = []
        # A mapping from a label to its position
        label_pos = {}
        # Last SetLineno
        lastlineno = self.firstlineno
        lastlinepos = 0

        co_code = array('B')
        co_lnotab = array('B')
        for i, (op, arg) in enumerate(self.code):
            if isinstance(op, Label):
                label_pos[op] = len(co_code)

            elif op is SetLineno:
                incr_lineno = arg - lastlineno
                incr_pos = len(co_code) - lastlinepos
                lastlineno = arg
                lastlinepos = len(co_code)

                if incr_lineno == 0 and incr_pos == 0:
                    co_lnotab.append(0)
                    co_lnotab.append(0)
                else:
                    while incr_pos > 255:
                        co_lnotab.append(255)
                        co_lnotab.append(0)
                        incr_pos -= 255
                    while incr_lineno > 255:
                        co_lnotab.append(incr_pos)
                        co_lnotab.append(255)
                        incr_pos = 0
                        incr_lineno -= 255
                    if incr_pos or incr_lineno:
                        co_lnotab.append(incr_pos)
                        co_lnotab.append(incr_lineno)

            elif op == opcode.EXTENDED_ARG:
                raise ValueError, "EXTENDED_ARG not supported in Code objects"

            elif not op in hasarg:
                co_code.append(op)

            else:
                if op in hasconst:
                    if isinstance(arg, Code) and i < len(self.code)-1 and \
                       self.code[i+1][0] in hascode:
                        arg = arg.to_code()
                    arg = index(co_consts, arg, operator.is_)
                elif op in hasname:
                    arg = index(co_names, arg)
                elif op in hasjump:
                    # arg will be filled later
                    jumps.append((len(co_code), arg))
                    arg = 0
                elif op in haslocal:
                    arg = index(co_varnames, arg)
                elif op in hascompare:
                    arg = index(cmp_op, arg, can_append=False)
                elif op in hasfree:
                    try:
                        arg = index(co_freevars, arg, can_append=False) \
                              + len(cellvars)
                    except IndexError:
                        arg = index(co_cellvars, arg)
                else:
                    # arg is ok
                    pass

                if arg > 0xFFFF:
                    co_code.append(opcode.EXTENDED_ARG)
                    co_code.append((arg >> 16) & 0xFF)
                    co_code.append((arg >> 24) & 0xFF)
                co_code.append(op)
                co_code.append(arg & 0xFF)
                co_code.append((arg >> 8) & 0xFF)

        for pos, label in jumps:
            jump = label_pos[label]
            if co_code[pos] in hasjrel:
                jump -= pos+3
            if jump > 0xFFFF:
                raise NotImplementedError, "Extended jumps not implemented"
            co_code[pos+1] = jump & 0xFF
            co_code[pos+2] = (jump >> 8) & 0xFF

        co_code = co_code.tostring()
        co_lnotab = co_lnotab.tostring()

        co_consts = tuple(co_consts)
        co_names = tuple(co_names)
        co_varnames = tuple(co_varnames)
        co_nlocals = len(co_varnames)
        co_cellvars = tuple(co_cellvars)

        return new.code(co_argcount, co_nlocals, co_stacksize, co_flags,
                        co_code, co_consts, co_names, co_varnames,
                        self.filename, self.name, self.firstlineno, co_lnotab,
                        co_freevars, co_cellvars)

Example 47

Project: imagrium
Source File: test_array.py
View license
    def test_setslice(self):
        a = array.array(self.typecode, self.example)
        a[:1] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[1:])
        )

        a = array.array(self.typecode, self.example)
        a[:-1] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[-1:])
        )

        a = array.array(self.typecode, self.example)
        a[-1:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:-1] + self.example)
        )

        a = array.array(self.typecode, self.example)
        a[1:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example)
        )

        a = array.array(self.typecode, self.example)
        a[1:-1] = a
        self.assertEqual(
            a,
            array.array(
                self.typecode,
                self.example[:1] + self.example + self.example[-1:]
            )
        )

        a = array.array(self.typecode, self.example)
        a[1000:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )

        a = array.array(self.typecode, self.example)
        a[-1000:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example)
        )

        a = array.array(self.typecode, self.example)
        a[:1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example)
        )

        a = array.array(self.typecode, self.example)
        a[:-1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )

        a = array.array(self.typecode, self.example)
        a[1:0] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
        )

        a = array.array(self.typecode, self.example)
        a[2000:1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )

        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.__setslice__, 0, 0, None)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)

        b = array.array(self.badtypecode())
        self.assertRaises(TypeError, a.__setslice__, 0, 0, b)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)

Example 48

Project: imagrium
Source File: test_file.py
View license
    def testIteration(self):
        # Test the complex interaction when mixing file-iteration and the
        # various read* methods. Ostensibly, the mixture could just be tested
        # to work when it should work according to the Python language,
        # instead of fail when it should fail according to the current CPython
        # implementation.  People don't always program Python the way they
        # should, though, and the implemenation might change in subtle ways,
        # so we explicitly test for errors, too; the test will just have to
        # be updated when the implementation changes.
        dataoffset = 16384
        filler = "ham\n"
        assert not dataoffset % len(filler), \
            "dataoffset must be multiple of len(filler)"
        nchunks = dataoffset // len(filler)
        testlines = [
            "spam, spam and eggs\n",
            "eggs, spam, ham and spam\n",
            "saussages, spam, spam and eggs\n",
            "spam, ham, spam and eggs\n",
            "spam, spam, spam, spam, spam, ham, spam\n",
            "wonderful spaaaaaam.\n"
        ]
        methods = [("readline", ()), ("read", ()), ("readlines", ()),
                   ("readinto", (array("c", " "*100),))]

        try:
            # Prepare the testfile
            bag = open(TESTFN, "w")
            bag.write(filler * nchunks)
            bag.writelines(testlines)
            bag.close()
            # Test for appropriate errors mixing read* and iteration
            for methodname, args in methods:
                f = open(TESTFN)
                if f.next() != filler:
                    self.fail, "Broken testfile"
                meth = getattr(f, methodname)
                try:
                    meth(*args)
                except ValueError:
                    pass
                else:
                    self.fail("%s%r after next() didn't raise ValueError" %
                                     (methodname, args))
                f.close()

            # Test to see if harmless (by accident) mixing of read* and
            # iteration still works. This depends on the size of the internal
            # iteration buffer (currently 8192,) but we can test it in a
            # flexible manner.  Each line in the bag o' ham is 4 bytes
            # ("h", "a", "m", "\n"), so 4096 lines of that should get us
            # exactly on the buffer boundary for any power-of-2 buffersize
            # between 4 and 16384 (inclusive).
            f = open(TESTFN)
            for i in range(nchunks):
                f.next()
            testline = testlines.pop(0)
            try:
                line = f.readline()
            except ValueError:
                self.fail("readline() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("readline() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            testline = testlines.pop(0)
            buf = array("c", "\x00" * len(testline))
            try:
                f.readinto(buf)
            except ValueError:
                self.fail("readinto() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            line = buf.tostring()
            if line != testline:
                self.fail("readinto() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))

            testline = testlines.pop(0)
            try:
                line = f.read(len(testline))
            except ValueError:
                self.fail("read() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("read() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            try:
                lines = f.readlines()
            except ValueError:
                self.fail("readlines() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if lines != testlines:
                self.fail("readlines() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            # Reading after iteration hit EOF shouldn't hurt either
            f = open(TESTFN)
            try:
                for line in f:
                    pass
                try:
                    f.readline()
                    f.readinto(buf)
                    f.read()
                    f.readlines()
                except ValueError:
                    self.fail("read* failed after next() consumed file")
            finally:
                f.close()
        finally:
            os.unlink(TESTFN)

Example 49

Project: imagrium
Source File: test_file2k.py
View license
    @unittest.skipIf(test_support.is_jython, "FIXME: Not working on Jython")
    def testIteration(self):
        # Test the complex interaction when mixing file-iteration and the
        # various read* methods. Ostensibly, the mixture could just be tested
        # to work when it should work according to the Python language,
        # instead of fail when it should fail according to the current CPython
        # implementation.  People don't always program Python the way they
        # should, though, and the implemenation might change in subtle ways,
        # so we explicitly test for errors, too; the test will just have to
        # be updated when the implementation changes.
        dataoffset = 16384
        filler = "ham\n"
        assert not dataoffset % len(filler), \
            "dataoffset must be multiple of len(filler)"
        nchunks = dataoffset // len(filler)
        testlines = [
            "spam, spam and eggs\n",
            "eggs, spam, ham and spam\n",
            "saussages, spam, spam and eggs\n",
            "spam, ham, spam and eggs\n",
            "spam, spam, spam, spam, spam, ham, spam\n",
            "wonderful spaaaaaam.\n"
        ]
        methods = [("readline", ()), ("read", ()), ("readlines", ()),
                   ("readinto", (array("c", " "*100),))]

        try:
            # Prepare the testfile
            bag = open(TESTFN, "w")
            bag.write(filler * nchunks)
            bag.writelines(testlines)
            bag.close()
            # Test for appropriate errors mixing read* and iteration
            for methodname, args in methods:
                f = open(TESTFN)
                if f.next() != filler:
                    self.fail, "Broken testfile"
                meth = getattr(f, methodname)
                try:
                    meth(*args)
                except ValueError:
                    pass
                else:
                    self.fail("%s%r after next() didn't raise ValueError" %
                                     (methodname, args))
                f.close()

            # Test to see if harmless (by accident) mixing of read* and
            # iteration still works. This depends on the size of the internal
            # iteration buffer (currently 8192,) but we can test it in a
            # flexible manner.  Each line in the bag o' ham is 4 bytes
            # ("h", "a", "m", "\n"), so 4096 lines of that should get us
            # exactly on the buffer boundary for any power-of-2 buffersize
            # between 4 and 16384 (inclusive).
            f = open(TESTFN)
            for i in range(nchunks):
                f.next()
            testline = testlines.pop(0)
            try:
                line = f.readline()
            except ValueError:
                self.fail("readline() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("readline() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            testline = testlines.pop(0)
            buf = array("c", "\x00" * len(testline))
            try:
                f.readinto(buf)
            except ValueError:
                self.fail("readinto() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            line = buf.tostring()
            if line != testline:
                self.fail("readinto() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))

            testline = testlines.pop(0)
            try:
                line = f.read(len(testline))
            except ValueError:
                self.fail("read() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if line != testline:
                self.fail("read() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            try:
                lines = f.readlines()
            except ValueError:
                self.fail("readlines() after next() with supposedly empty "
                          "iteration-buffer failed anyway")
            if lines != testlines:
                self.fail("readlines() after next() with empty buffer "
                          "failed. Got %r, expected %r" % (line, testline))
            # Reading after iteration hit EOF shouldn't hurt either
            f = open(TESTFN)
            try:
                for line in f:
                    pass
                try:
                    f.readline()
                    f.readinto(buf)
                    f.read()
                    f.readlines()
                except ValueError:
                    self.fail("read* failed after next() consumed file")
            finally:
                f.close()
        finally:
            os.unlink(TESTFN)

Example 50

Project: python-uncompyle6
Source File: scanner3.py
View license
    def ingest(self, co, classname=None, code_objects={}, show_asm=None):
        """
        Pick out tokens from an uncompyle6 code object, and transform them,
        returning a list of uncompyle6 'Token's.

        The transformations are made to assist the deparsing grammar.
        Specificially:
           -  various types of LOAD_CONST's are categorized in terms of what they load
           -  COME_FROM instructions are added to assist parsing control structures
           -  MAKE_FUNCTION and FUNCTION_CALLS append the number of positional arguments

        Also, when we encounter certain tokens, we add them to a set which will cause custom
        grammar rules. Specifically, variable arg tokens like MAKE_FUNCTION or BUILD_LIST
        cause specific rules for the specific number of arguments they take.
        """

        show_asm = self.show_asm if not show_asm else show_asm
        # show_asm = 'after'
        if show_asm in ('both', 'before'):
            bytecode = Bytecode(co, self.opc)
            for instr in bytecode.get_instructions(co):
                print(instr._disassemble())

        # Container for tokens
        tokens = []

        customize = {}
        if self.is_pypy:
            customize['PyPy'] = 1;

        self.code = array('B', co.co_code)
        self.build_lines_data(co)
        self.build_prev_op()

        bytecode = Bytecode(co, self.opc)

        # FIXME: put as its own method?
        # Scan for assertions. Later we will
        # turn 'LOAD_GLOBAL' to 'LOAD_ASSERT'.
        # 'LOAD_ASSERT' is used in assert statements.
        self.load_asserts = set()
        bs = list(bytecode)
        n = len(bs)
        for i in range(n):
            inst = bs[i]

            # We need to detect the difference between
            # "raise AssertionError" and "assert"
            # If we have a JUMP_FORWARD after the
            # RAISE_VARARGS then we have a "raise" statement
            # else we have an "assert" statement.
            if inst.opname == 'POP_JUMP_IF_TRUE' and i+1 < n:
                next_inst = bs[i+1]
                if (next_inst.opname == 'LOAD_GLOBAL' and
                    next_inst.argval == 'AssertionError'):
                    for j in range(i+2, n):
                        raise_inst = bs[j]
                        if raise_inst.opname.startswith('RAISE_VARARGS'):
                            if j+1 >= n or bs[j+1].opname != 'JUMP_FORWARD':
                                self.load_asserts.add(next_inst.offset)
                                pass
                            break
                    pass
                pass

        # Get jump targets
        # Format: {target offset: [jump offsets]}
        jump_targets = self.find_jump_targets()

        for inst in bytecode:

            argval = inst.argval
            if inst.offset in jump_targets:
                jump_idx = 0
                # We want to process COME_FROMs to the same offset to be in *descending*
                # offset order so we have the larger range or biggest instruction interval
                # last. (I think they are sorted in increasing order, but for safety
                # we sort them). That way, specific COME_FROM tags will match up
                # properly. For example, a "loop" with an "if" nested in it should have the
                # "loop" tag last so the grammar rule matches that properly.
                for jump_offset in sorted(jump_targets[inst.offset], reverse=True):
                    come_from_name = 'COME_FROM'
                    opname = self.opName(jump_offset)
                    if opname.startswith('SETUP_'):
                        come_from_type = opname[len('SETUP_'):]
                        come_from_name = 'COME_FROM_%s' % come_from_type
                        pass
                    tokens.append(Token(come_from_name,
                                        None, repr(jump_offset),
                                        offset='%s_%s' % (inst.offset, jump_idx),
                                        has_arg = True, opc=self.opc))
                    jump_idx += 1
                    pass
                pass

            pattr =  inst.argrepr
            opname = inst.opname
            op = inst.opcode

            if opname in ['LOAD_CONST']:
                const = inst.argval
                if iscode(const):
                    if const.co_name == '<lambda>':
                        opname = 'LOAD_LAMBDA'
                    elif const.co_name == '<genexpr>':
                        opname = 'LOAD_GENEXPR'
                    elif const.co_name == '<dictcomp>':
                        opname = 'LOAD_DICTCOMP'
                    elif const.co_name == '<setcomp>':
                        opname = 'LOAD_SETCOMP'
                    elif const.co_name == '<listcomp>':
                        opname = 'LOAD_LISTCOMP'
                    # verify() uses 'pattr' for comparison, since 'attr'
                    # now holds Code(const) and thus can not be used
                    # for comparison (todo: think about changing this)
                    # pattr = 'code_object @ 0x%x %s->%s' %\
                    # (id(const), const.co_filename, const.co_name)
                    pattr = '<code_object ' + const.co_name + '>'
                else:
                    pattr = const
                    pass
            elif opname in ('MAKE_FUNCTION', 'MAKE_CLOSURE'):
                pos_args, name_pair_args, annotate_args = parse_fn_counts(inst.argval)
                if name_pair_args > 0:
                    opname = '%s_N%d' % (opname, name_pair_args)
                    pass
                if annotate_args > 0:
                    opname = '%s_A_%d' % (opname, annotate_args)
                    pass
                opname = '%s_%d' % (opname, pos_args)
                pattr = ("%d positional, %d keyword pair, %d annotated" %
                             (pos_args, name_pair_args, annotate_args))
                tokens.append(
                    Token(
                        type_ = opname,
                        attr = (pos_args, name_pair_args, annotate_args),
                        pattr = pattr,
                        offset = inst.offset,
                        linestart = inst.starts_line,
                        op = op,
                        has_arg = op_has_argument(op, op3),
                        opc = self.opc
                    )
                )
                continue
            elif op in self.varargs_ops:
                pos_args = inst.argval
                if self.is_pypy and not pos_args and opname == 'BUILD_MAP':
                    opname = 'BUILD_MAP_n'
                else:
                    opname = '%s_%d' % (opname, pos_args)
            elif self.is_pypy and opname in ('CALL_METHOD', 'JUMP_IF_NOT_DEBUG'):
                # The value in the dict is in special cases in semantic actions, such
                # as CALL_FUNCTION. The value is not used in these cases, so we put
                # in arbitrary value 0.
                customize[opname] = 0
            elif opname == 'UNPACK_EX':
                # FIXME: try with scanner and parser by
                # changing inst.argval
                before_args = inst.argval & 0xFF
                after_args = (inst.argval >> 8) & 0xff
                pattr = "%d before vararg, %d after" % (before_args, after_args)
                argval = (before_args, after_args)
                opname = '%s_%d+%d' % (opname, before_args, after_args)

            elif op == self.opc.JUMP_ABSOLUTE:
                # Further classify JUMP_ABSOLUTE into backward jumps
                # which are used in loops, and "CONTINUE" jumps which
                # may appear in a "continue" statement.  The loop-type
                # and continue-type jumps will help us classify loop
                # boundaries The continue-type jumps help us get
                # "continue" statements with would otherwise be turned
                # into a "pass" statement because JUMPs are sometimes
                # ignored in rules as just boundary overhead. In
                # comprehensions we might sometimes classify JUMP_BACK
                # as CONTINUE, but that's okay since we add a grammar
                # rule for that.
                pattr = inst.argval
                target = self.get_target(inst.offset)
                if target <= inst.offset:
                    next_opname = self.opname[self.code[inst.offset+3]]
                    if (inst.offset in self.stmts and
                        next_opname not in ('END_FINALLY', 'POP_BLOCK')
                        and inst.offset not in self.not_continue):
                        opname = 'CONTINUE'
                    else:
                        opname = 'JUMP_BACK'
                        # FIXME: this is a hack to catch stuff like:
                        #   if x: continue
                        # the "continue" is not on a new line.
                        # There are other situations were we don't catch
                        # CONTINUE as well.
                        if tokens[-1].type == 'JUMP_BACK':
                            tokens[-1].type = intern('CONTINUE')

            elif op == self.opc.RETURN_VALUE:
                if inst.offset in self.return_end_ifs:
                    opname = 'RETURN_END_IF'
            elif inst.offset in self.load_asserts:
                opname = 'LOAD_ASSERT'

            tokens.append(
                Token(
                    type_ = opname,
                    attr = argval,
                    pattr = pattr,
                    offset = inst.offset,
                    linestart = inst.starts_line,
                    op = op,
                    has_arg = (op >= op3.HAVE_ARGUMENT),
                    opc = self.opc
                    )
                )
            pass

        if show_asm in ('both', 'after'):
            for t in tokens:
                print(t)
            print()
        return tokens, customize