cStringIO.StringIO

Here are the examples of the python api cStringIO.StringIO taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

137 Examples 7

Example 51

Project: grow Source File: catalog_holder.py
Function: extract
    def extract(self, include_obsolete=None, localized=None, paths=None,
                include_header=None, locales=None, use_fuzzy_matching=None):
        include_obsolete, localized, include_header, use_fuzzy_matching, = \
            self.get_extract_config(include_header=include_header,
                include_obsolete=include_obsolete, localized=localized,
                use_fuzzy_matching=use_fuzzy_matching)

        env = self.pod.get_jinja_env()
        # {
        #    locale1: locale1_catalog,
        #    locale2: locale2_catalog,
        #    ...
        # }
        # This is built up as we extract
        localized_catalogs = {}
        unlocalized_catalog = catalogs.Catalog()  # for localized=False case

        comment_tags = [
            ':',
        ]
        options = {
            'extensions': ','.join(env.extensions.keys()),
            'silent': 'false',
        }

        def _add_to_catalog(message, locales):
            # Add to all relevant catalogs
            for locale in locales:
                if locale not in localized_catalogs:
                    # Start with a new catalog so we can track what's obsolete:
                    # we'll merge it with existing translations later.
                    # *NOT* setting `locale` kwarg here b/c that will load existing
                    # translations.
                    localized_catalogs[locale] = catalogs.Catalog(pod=self.pod)
                localized_catalogs[locale][message.id] = message
            unlocalized_catalog[message.id] = message

        def _handle_field(path, locales, msgid, key, node):
            if (not key
                    or not isinstance(key, basestring)
                    or not key.endswith('@')):
                return
            # Support gettext "extracted comments" on tagged fields:
            #   field@: Message.
            #   field@#: Extracted comment for field@.
            auto_comments = []
            if isinstance(node, dict):
                auto_comment = node.get('{}#'.format(key))
                if auto_comment:
                    auto_comments.append(auto_comment)
            message = catalog.Message(
                msgid,
                None,
                auto_comments=auto_comments,
                locations=[(path, 0)])
            if msgid:
                _add_to_catalog(message, locales)

        def _babel_extract(fp, locales, path):
            try:
                all_parts = extract.extract(
                    'jinja2.ext.babel_extract',
                    fp,
                    options=options,
                    comment_tags=comment_tags)
                for parts in all_parts:
                    lineno, msgid, comments, context = parts
                    message = catalog.Message(
                        msgid,
                        None,
                        auto_comments=comments,
                        locations=[(path, lineno)])
                    _add_to_catalog(message, locales)
            except tokenize.TokenError:
                self.pod.logger.error('Problem extracting body: {}'.format(path))
                raise

        # Extract from collections in /content/:
        # Strings only extracted for relevant locales, determined by locale
        # scope (pod > collection > docuement > docuement part)
        last_pod_path = None
        for collection in self.pod.list_collections():
            text = 'Extracting collection: {}'.format(collection.pod_path)
            self.pod.logger.info(text)
            # Extract from blueprint.
            utils.walk(collection.tagged_fields,
                       lambda *args: _handle_field(collection.pod_path,
                                                   collection.locales, *args))
            # Extract from docs in collection.
            for doc in collection.docs(include_hidden=True):
                if not self._should_extract_as_babel(paths, doc.pod_path):
                    continue

            for doc in collection.list_docs(include_hidden=True):
                if doc.pod_path != last_pod_path:
                    self.pod.logger.info(
                        'Extracting: {} ({} locale{})'.format(
                            doc.pod_path,
                            len(doc.locales),
                            's' if len(doc.locales) != 1 else '',
                        )
                    )
                    last_pod_path = doc.pod_path
                # If doc.locale is set, this is a doc part: only extract for
                # its own locales (not those of base doc).
                if doc.locale:
                    doc_locales = [doc.locale]
                # If not is set, this is a base doc (1st or only part): extract
                # for all locales declared for this doc
                elif doc.locales:
                    doc_locales = doc.locales
                # Otherwise only include in template (--no-localized)
                else:
                    doc_locales = [None]

                doc_locales = [doc.locale]
                # Extract yaml fields: `foo@: Extract me`
                # ("tagged" = prior to stripping `@` suffix from field names)
                tagged_fields = doc.get_tagged_fields()
                utils.walk(tagged_fields,
                           lambda *args: _handle_field(doc.pod_path, doc_locales, *args))

                # Extract body: {{_('Extract me')}}
                if doc.body:
                    doc_body = cStringIO.StringIO(doc.body.encode('utf-8'))
                    _babel_extract(doc_body, doc_locales, doc.pod_path)

            # Extract from CSVs for this collection's locales
            for filepath in self.pod.list_dir(collection.pod_path):
                if filepath.endswith('.csv'):
                    pod_path = os.path.join(collection.pod_path, filepath.lstrip('/'))
                    self.pod.logger.info('Extracting: {}'.format(pod_path))
                    rows = self.pod.read_csv(pod_path)
                    for i, row in enumerate(rows):
                        for key, msgid in row.iteritems():
                            _handle_field(pod_path, collection.locales, msgid, key, row)

        # Extract from root of /content/:
        for path in self.pod.list_dir('/content/', recursive=False):
            if path.endswith(('.yaml', '.yml')):
                pod_path = os.path.join('/content/', path)
                self.pod.logger.info('Extracting: {}'.format(pod_path))
                utils.walk(
                    self.pod.get_doc(pod_path).get_tagged_fields(),
                    lambda *args: _handle_field(pod_path, self.pod.list_locales(), *args)
                )

        # Extract from /views/:
        # Not discriminating by file extension, because people use all sorts
        # (htm, html, tpl, dtml, jtml, ...)
        for path in self.pod.list_dir('/views/'):
            if path.startswith('.'):
                continue
            pod_path = os.path.join('/views/', path)
            self.pod.logger.info('Extracting: {}'.format(pod_path))
            with self.pod.open_file(pod_path) as f:
                _babel_extract(f, self.pod.list_locales(), pod_path)

        # Extract from podspec.yaml:
        self.pod.logger.info('Extracting: podspec.yaml')
        utils.walk(
            self.pod.get_podspec().get_config(),
            lambda *args: _handle_field('/podspec.yaml', self.pod.list_locales(), *args)
        )

        # Save it out: behavior depends on --localized and --locale flags
        if localized:
            # Save each localized catalog
            for locale, new_catalog in localized_catalogs.items():
                # Skip if `locales` defined but doesn't include this locale
                if locales and locale not in locales:
                    continue
                existing_catalog = self.get(locale)
                existing_catalog.update_using_catalog(
                    new_catalog,
                    include_obsolete=include_obsolete)
                existing_catalog.save(include_header=include_header)
                missing = existing_catalog.list_untranslated()
                num_messages = len(existing_catalog)
                self.pod.logger.info(
                    'Saved: /{path} ({num_translated}/{num_messages})'.format(
                        path=existing_catalog.pod_path,
                        num_translated=num_messages - len(missing),
                        num_messages=num_messages)
                    )
        else:
            # --localized omitted / --no-localized
            template_catalog = self.get_template()
            template_catalog.update_using_catalog(
                unlocalized_catalog,
                include_obsolete=include_obsolete)
            template_catalog.save(include_header=include_header)
            text = 'Saved: {} ({} messages)'
            self.pod.logger.info(
                text.format(template_catalog.pod_path, len(template_catalog))
            )
            return template_catalog

Example 52

Project: report-ng Source File: yamled.py
    def __init__(self, parent=None, title='', content=None, size=(800, 600,), *args, **kwargs):
        if title:
            self.title = title
        wx.Frame.__init__(self, parent, title=self.title, size=size, *args, **kwargs)
        self.parent = parent
        self.application = Version()
        # icon
        myStream = cStringIO.StringIO(base64.b64decode(icon))
        myImage = wx.ImageFromStream(myStream)
        myBitmap = wx.BitmapFromImage(myImage)
        self.icon = wx.EmptyIcon()
        self.icon.CopyFromBitmap(myBitmap)
        self.SetIcon(self.icon)
        # tree image list
        if self.T:
            self.tree_image_list = wx.ImageList(16, 16)
            self.dotlist = self.tree_image_list.Add(wx.Image('resources/dotlist.png', wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap())
        # Menu arrangement
        menu = wx.MenuBar()
        class Index(object):
            def __init__(self, current):
                self.__current = current - 1
            @property
            def current(self):
                return self.__current
            @current.setter
            def current(self, x):
                self.__current = x
            def next(self):
                self.__current += 1
                return self.__current
        index = Index(100)
        menu_file = wx.Menu()
        self.menu_file_open = menu_file.Append(index.next(), '&Open...')
        self.menu_file_open.Enable(True)
        self.Bind(wx.EVT_MENU, self.File_Open, id=index.current)
        self.menu_file_close = menu_file.Append(index.next(), '&Close')
        self.menu_file_close.Enable(False)
        self.Bind(wx.EVT_MENU, self.File_Close, id=index.current)
        self.menu_file_save = menu_file.Append(index.next(), '&Save\tCtrl+S')
        self.menu_file_save.Enable(False)
        self.Bind(wx.EVT_MENU, self.File_Save, id=index.current)
        self.menu_file_save_as = menu_file.Append(index.next(), '&Save As...')
        self.menu_file_save_as.Enable(False)
        self.Bind(wx.EVT_MENU, self.File_Save_As, id=index.current)
        menu_file.AppendSeparator()
        menu_file.Append(wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit application')
        self.Bind(wx.EVT_MENU, self.__Exit, id=wx.ID_EXIT)
        menu.Append(menu_file, '&File')
        menu_edit = wx.Menu()
        self.menu_edit_find = menu_edit.Append(wx.ID_FIND, '&Find...')
        self.Bind(wx.EVT_MENU, self.Find, id=wx.ID_FIND)
        self.menu_edit_find.Enable(False)
        menu.Append(menu_edit, '&Edit')
        menu_help = wx.Menu()
        menu_help.Append(wx.ID_ABOUT, '&About')
        self.Bind(wx.EVT_MENU, self.About, id=wx.ID_ABOUT)
        menu.Append(menu_help, '&Help')
        self.SetMenuBar(menu)
        # Layout
        self.splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
        #def splitter_dclick(e):
        #    e.Veto()
        #self.Bind(wx.EVT_SPLITTER_DCLICK, splitter_dclick, self.splitter)
        self.splitter.SetMinimumPaneSize(200)
        #self.gray = self.splitter.GetBackgroundColour()
        self.left = wx.Panel(self.splitter, style=wx.BORDER_SIMPLE)
        self.tree = wx.TreeCtrl(self.left, style=wx.TR_HAS_BUTTONS|wx.TR_HIDE_ROOT|wx.TR_LINES_AT_ROOT|wx.TR_MULTIPLE|wx.TR_EDIT_LABELS|wx.BORDER_NONE) #|wx.TR_NO_LINES
        if self.T:
            self.tree.AssignImageList(self.tree_image_list)
        def splitter_repaint(e):
            self._tree_adjust()
        self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGING, splitter_repaint, self.splitter)
        #self.splitter.Bind(wx.EVT_PAINT, splitter_repaint)
        #self.white = self.tree.GetBackgroundColour()
        #print self.stack.GetBackgroundColour() --> (240, 240, 240, 255)
        self.stack = sp.ScrolledPanel(self.splitter, style=wx.BORDER_SIMPLE)
        self.stack_sizer = wx.BoxSizer(wx.VERTICAL)
        self.stack.SetSizer(self.stack_sizer)
        self.stack.Layout()
        self.stack.SetupScrolling(scroll_x=False)
        self.stack_sizer.Fit(self.stack)
        self.splitter.SplitVertically(self.left, self.stack, 300)
        self.root = self.tree.AddRoot('')
        self.n=[]
        self.t=[]
        self.d=[]
        self.r=[]
        node = self.tree.AppendItem(self.root, '')
        self.item_height = self.tree.GetBoundingRect(node)[-1]-1
        self.tree.Delete(node)
        del node
        # tree popupmenu
        self.tree_popupmenu = wx.Menu()
        self.tree_popupmenu_newchildnode = self.tree_popupmenu.Append(-1, 'New child node')
        self.tree_popupmenu_newchildnode.Enable(False)
        self.Bind(wx.EVT_MENU, self.__tree_OnPopupMenu_NewChildNode, self.tree_popupmenu_newchildnode)
        self.tree_popupmenu_delnode = self.tree_popupmenu.Append(-1, 'Delete node')
        self.Bind(wx.EVT_MENU, self.__tree_OnPopupMenu_DelNode, self.tree_popupmenu_delnode)
        self.tree_popupmenu.AppendSeparator()
        tree_popupmenu_collapse_all = self.tree_popupmenu.Append(-1, 'Collapse all')
        self.Bind(wx.EVT_MENU, self.__tree_OnPopupMenu_CollapseAll, tree_popupmenu_collapse_all)
        tree_popupmenu_expand_children = self.tree_popupmenu.Append(-1, 'Expand children')
        self.Bind(wx.EVT_MENU, self.__tree_OnPopupMenu_ExpandChildren, tree_popupmenu_expand_children)
        tree_popupmenu_expand_all = self.tree_popupmenu.Append(-1, 'Expand all')
        self.Bind(wx.EVT_MENU, self.__tree_OnPopupMenu_ExpandAll, tree_popupmenu_expand_all)
        self.tree.Bind(wx.EVT_CONTEXT_MENU, self.__tree_OnPopupMenu)
        def tree_empty_OnPopupMenu(e):
            if self.tree.GetCount() == 0:
                self.__tree_OnPopupMenu(e)
        self.tree.Bind(wx.EVT_RIGHT_DOWN, tree_empty_OnPopupMenu)
        #for i in range(50):
        #    self.n += [self.tree.AppendItem(self.root, str(i+1))]
        #    ctrl = self.yTextCtrl(self.stack, self, size=(-1, self.item_height), style=wx.BORDER_NONE)
        #    self.stack_sizer.Add(ctrl, flag=wx.LEFT|wx.RIGHT|wx.EXPAND)
        #    ctrl.SetValue(str(i+1))
        #    self.t += [ctrl]
        #    del ctrl
        #self.AppendNode('abc','def',dict(a=2))
        #self.AppendNode('cgi','har',dict(a='frai'))
        #for i in range(1,50):
        #    self.AppendNode(str(i),str(i))

        #if self.perf:
        #    self.perf_load = Perf('Load')
        #    self.perf_load.start()
        #    self.perf_stack_adjust = Perf('_stack_adjust')
        #    self.perf_tree_adjust = Perf('_tree_adjust')

        if content != None:
            self.Load(content)

        #if self.perf:
        #    self.perf_load.end().result()
        #    self.perf_stack_adjust.result()
        #    self.perf_tree_adjust.result()
            
        #self._stack_adjust()
        self.tree.Bind(wx.EVT_TREE_ITEM_COLLAPSED, self.__tree_OnCollapse)
        self.tree.Bind(wx.EVT_TREE_ITEM_EXPANDED, self.__tree_OnExpand)
        self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.__tree_BeginLabelEdit)
        self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.__tree_EndLabelEdit)
        # show
        #self.SetDoubleBuffered(True)
        self.CenterOnScreen()
        self.Show()
        self.SetMinSize(tuple(map(lambda x: x*2/3, self.GetSize())))
        self.Bind(wx.EVT_SIZE, self.__OnResize, self)
        self.Bind(wx.EVT_PAINT, self.__OnRepaint, self)
        self.Bind(wx.EVT_CLOSE, self.__OnClose)
        #self._tree_adjust()
        self.stack.SetScrollRate(16, self.item_height)
        self.tree.Bind(wx.EVT_PAINT, self.__tree_OnScroll)
        self.stack.Bind(wx.EVT_PAINT, self.__stack_OnScroll)
        #self.stack.Bind(wx.EVT_SCROLLWIN, self.__stack_OnScroll)
        def stack_focus_release(e):
            self.SetFocus()
            self.tree.UnselectAll()
        self.stack.Bind(wx.EVT_LEFT_UP, stack_focus_release)
        class FileDropTarget(wx.FileDropTarget):
            def __init__(self, target, handler):
                wx.FileDropTarget.__init__(self)
                self.target = target
                self.handler = handler
            def OnDropFiles(self, x, y, filenames):
                self.handler(filenames)
        def onDropFiles(filenames):
            if len(filenames) != 1:
                wx.MessageBox('Single file is expected!', 'Error', wx.OK | wx.ICON_ERROR)
                return
            #if self.file_changed:
            #    dlg = wx.MessageDialog(self, 'You have unsaved changes. Do you want to discard them before opening new file?', 'Question', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
            if self._File_Close():
                self.Load(filenames[0])
        dt = FileDropTarget(self.splitter, onDropFiles)
        self.splitter.SetDropTarget(dt)
        #self.t[2].Hide()
        #self.stack.Layout()
        #self.t[2].Show()
        #self.stack.Layout()
        #self.t[12].SetBackgroundColour(self.white)
        self.splitter.SetDoubleBuffered(True)
        #self.stack.SetBackgroundColour((240,255,255,255))
        def tree_OnKey(e):
            keyCode = None
            try:
                keyCode = e.GetKeyCode()
            except:
                pass
            e.Skip()
            if keyCode == wx.WXK_RETURN:
                self.tree_selected_edit()
            if keyCode == 65 and e.ControlDown():
                self.tree_selected_edit(True)
        self.tree.Bind(wx.EVT_CHAR_HOOK, tree_OnKey)

Example 53

Project: ijd8 Source File: io_test.py
Function: test
	def test(self):
		def test_put():
			key = "test_%s" % r(9)
			params = "op=3"
			data = "hello bubby!"
			extra.check_crc = 2
			extra.crc32 = binascii.crc32(data) & 0xFFFFFFFF
			ret, err = io.put(policy.token(), key, data, extra)
			assert err is None
			assert ret['key'] == key

		def test_put_same_crc():
			key = "test_%s" % r(9)
			data = "hello bubby!"
			extra.check_crc = 2
			ret, err = io.put(policy.token(), key, data, extra)
			assert err is None
			assert ret['key'] == key

		def test_put_no_key():
			data = r(100)
			extra.check_crc = 0
			ret, err = io.put(policy.token(), key=None, data=data, extra=extra)
			assert err is None
			assert ret['hash'] == ret['key']

		def test_put_quote_key():
			data = r(100)
			key = 'a\\b\\c"你好' + r(9)
			ret, err = io.put(policy.token(), key, data)
			print err
			assert err is None
			assert ret['key'].encode('utf8') == key

			data = r(100)
			key = u'a\\b\\c"你好' + r(9)
			ret, err = io.put(policy.token(), key, data)
			assert err is None
			assert ret['key'] == key

		def test_put_unicode1():
			key = "test_%s" % r(9) + '你好'
			data = key
			ret, err = io.put(policy.token(), key, data, extra)
			assert err is None
			assert ret[u'key'].endswith(u'你好')

		def test_put_unicode2():
			key = "test_%s" % r(9) + '你好'
			data = key
			data = data.decode('utf8')
			ret, err = io.put(policy.token(), key, data)
			assert err is None
			assert ret[u'key'].endswith(u'你好')

		def test_put_unicode3():
			key = "test_%s" % r(9) + '你好'
			data = key
			key = key.decode('utf8')
			ret, err = io.put(policy.token(), key, data)
			assert err is None
			assert ret[u'key'].endswith(u'你好')

		def test_put_unicode4():
			key = "test_%s" % r(9) + '你好'
			data = key
			key = key.decode('utf8')
			data = data.decode('utf8')
			ret, err = io.put(policy.token(), key, data)
			assert err is None
			assert ret[u'key'].endswith(u'你好')

		def test_put_StringIO():
			key = "test_%s" % r(9)
			data = cStringIO.StringIO('hello buddy!')
			ret, err = io.put(policy.token(), key, data)
			assert err is None
			assert ret['key'] == key

		def test_put_urlopen():
			key = "test_%s" % r(9)
			data = urllib.urlopen('http://cheneya.qiniudn.com/hello_jpg')
			ret, err = io.put(policy.token(), key, data)
			assert err is None
			assert ret['key'] == key

		def test_put_no_length():
			class test_reader(object):
				def __init__(self):
					self.data = 'abc'
					self.pos = 0
				def read(self, n=None):
					if n is None or n < 0:
						newpos = len(self.data)
					else:
						newpos = min(self.pos+n, len(self.data))
					r = self.data[self.pos: newpos]
					self.pos = newpos
					return r
			key = "test_%s" % r(9)
			data = test_reader()

			extra.check_crc = 2
			extra.crc32 = binascii.crc32('abc') & 0xFFFFFFFF
			ret, err = io.put(policy.token(), key, data, extra)
			assert err is None
			assert ret['key'] == key

		test_put()
		test_put_same_crc()
		test_put_no_key()
		test_put_quote_key()
		test_put_unicode1()
		test_put_unicode2()
		test_put_unicode3()
		test_put_unicode4()
		test_put_StringIO()
		test_put_urlopen()
		test_put_no_length()

Example 54

Project: ldpush Source File: sshclient.py
Function: connect
def Connect(hostname, username, password=None, port=22, ssh_keys=(),
            timeout=TIMEOUT_DEFAULT):
  """Makes a paramiko SSH connection to a device.

  Args:
    hostname: A string, the hostname or IP address to connect to.
    username: A string, the username to use on the connection.
    password: A string, the password to use on the connection.
    port: An int, the port number to connect to.
    ssh_keys: A tuple of strings, SSH private keys (optional; may be None).
    timeout: A float, the number of seconds before a connection times out.

  Returns:
    A paramiko.SSHClient() instance
  """

  options = SshOptions()
  hostname, port, username = options.Lookup(hostname, port, username)
  ssh_client = None

  def RaiseError(e, msg):
    """Raises an exception, disconnecting the SSH client.

    Args:
      e: An Exception.
      msg: An object, exception arguments.
    """
    raise e(msg)

  try:
    ssh_client = paramiko.SSHClient()
    # Always auto-add remote SSH host keys.
    ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh_client.load_system_host_keys()
    # Connect using paramiko with a timeout parameter (requires paramiko 1.7)
    if ssh_keys:
      pkeys = []
      for key in ssh_keys:
        logging.debug('Using SSH private key for device authentication.')
        # Use a virtual temporary file to store the key.
        ssh_key_fileobj = cStringIO.StringIO()
        ssh_key_fileobj.write(key)
        ssh_key_fileobj.reset()
        try:
          pkeys.append(paramiko.DSSKey(file_obj=ssh_key_fileobj))
          logging.debug('Using SSH DSA key for %r', hostname)
        except (IndexError, paramiko.SSHException) as e:
          if (isinstance(e, IndexError) or
              'not a valid DSA private key file' in str(e)):
            ssh_key_fileobj.reset()
            try:
              logging.debug('Using SSH RSA key for %r', hostname)
              pkeys.append(paramiko.RSAKey(file_obj=ssh_key_fileobj))
            except (IndexError, paramiko.SSHException) as e:
              raise exceptions.AuthenticationError(str(e))
          else:
            raise exceptions.ConnectError('SSHException: %s' % str(e))
    else:
      logging.debug('Using password for %r', hostname)
      pkeys = [None]
    for pkey in pkeys:
      saved_exception = None
      try:
        ssh_client.connect(hostname=hostname,
                           port=port,
                           username=username,
                           password=password,
                           pkey=pkey,
                           timeout=timeout,
                           allow_agent=FLAGS.use_ssh_agent,
                           look_for_keys=False)
        break
      except (paramiko.AuthenticationException, paramiko.SSHException) as e:
        saved_exception = e
    if saved_exception is not None:
      raise saved_exception  # pylint: disable=raising-bad-type
    transport = ssh_client.get_transport()
    # Sometimes we have to authenticate a second time, eg. on Force10
    # we always fail the first authentication (if we try pkey + pass,
    # the pass succeeds; but if we do pass only, we have to do it
    # twice).  connect() above will have authenticated once.
    if not transport.is_authenticated():
      if pkeys != [None]:
        for pkey in pkeys:
          try:
            transport.auth_publickey(username, pkey)
            break
          except paramiko.SSHException:
            pass
    if not transport.is_authenticated():
      if password is not None:
        try:
          transport.auth_password(username, password)
        except paramiko.SSHException:
          pass
    if not transport.is_authenticated():
      msg = 'Not authenticated after two attempts on %r' % hostname
      RaiseError(exceptions.ConnectError, msg)
  except EOFError:
    msg = 'EOFError connecting to: %r' % hostname
    RaiseError(exceptions.ConnectError, msg)
  except paramiko.AuthenticationException as e:
    msg = 'Authentication error connecting to %s: %s' % (hostname, str(e))
    RaiseError(exceptions.AuthenticationError, msg)
  except paramiko.SSHException as e:
    msg = 'SSHException connecting to %s: %s' % (hostname, str(e))
    RaiseError(exceptions.ConnectError, msg)
  except socket.timeout as e:
    msg = 'Timed-out while connecting to %s: %s' % (hostname, str(e))
    RaiseError(exceptions.ConnectError, msg)
  except socket.error as e:
    msg = 'Socket error connecting to %r: %s %s' % (hostname, e.__class__, e)
    RaiseError(exceptions.ConnectError, msg)

  return ssh_client

Example 55

Project: OmicsIntegrator Source File: nib.py
def get_nib_seq_batch(nib,queries,mask=NOMASK) :
    '''Extract subsequence from .nib file like Jim Kent's nibFrag utility.

    Extract the nucleotide substrings defined by the closed intervals in *queries*
    from the sequence found in *nib*.  *nib* argument is either a filename or
    an open file object.  Entries in *queries* are 3-tuples defining (start,end,strand)
    sequence coordinates. Sequences are returned in order in a list as
    strings.  *mask* parameter has the following possible values:

    chipsequtil.nib.NOMASK -- masked positions are not indicated (default)
    chipsequtil.nib.MASK -- masked positions are capitalized, normal bases lower case
    chipsequtil.nib.NOMASK -- masked positions are replaced with Ns
    '''

    nib_fn, nib_f = _nib_fd(nib)

    nbases = validate_nib_file(nib_f)

    # rest of file is sequence, with each nibble (4 bytes) being a base as \
    # follows (from http://genome.ucsc.edu/FAQ/FAQformat.html#format8) :
    #
    # 0 - T
    # 1 - C
    # 2 - A
    # 3 - G
    # 4 - N
    #
    # The most significant bit in a nibble is set if the base is masked
    trans_nuc = 'tcagn'

    # start translating the nibbles into nucleotides
    def trans_nib(nib) :
        nuc = trans_nuc[nib&7]
        mask_bit = nib & 8
        if mask in [MASK,HARDMASK] and mask_bit == 0 :
            return nuc.upper()
        if mask == HARDMASK and mask_bit == 1 :
            return 'N'
        return nuc

    headers = [] # stores headers
    seqs = [] # stores sequences

    # sort the coords so we can walk most efficiently through the file
    queries.sort()

    for start, end, strand in queries :

        if start < 0 :
            raise NibException('Received negative start coordinate, this may '\
                               'indicate a region on mitochondrial DNA that '\
                               'spans reference sequence start and end.  This '\
                               'utility cannot handle these cases, aborting. '\
                               'Requested interval: %s (%d,%d)'%(nib_fn,start,end))

        start, end = map(int,(start,end))

        # end == -1 means caller wants entire sequence
        if end == -1  :
            end = nbases

        if any([nbases < c for c in [start,end]]) :
            raise NibException(('Requested slice (%(start)d,%(end)d) not compatible ' \
            'with sequence of length %(nbases)d in %(nib_fn)s, aborting\n\nnibFrag '\
            'style error: nib read past end of file (%(start)d %(end)d) in file: '\
            '%(nib_fn)s')%{'start':start,'end':end,'nbases':nbases,'nib_fn':nib_fn})

        # figure out how many bytes to read through
        start_byte,rem_byte = start/2,start%2

        # calculate where we need to move to in the file from the current location
        # + 8 is from the 2*4 bytes header info in the .nib format
        byte_offset = start_byte-nib_f.tell() + 8
        nib_f.seek(byte_offset,1) # seek forward to the beginning byte from current location
        seq_bytes,seq_rem_byte = int(math.ceil((end-start+rem_byte)/2.)),(end+1)%2
        seq_bytes = nib_f.read(seq_bytes+seq_rem_byte)

        # start translating the bytes
        seq = StringIO() # we use StringIO because it is more efficient than concatenating strings
        for c in seq_bytes :
            c_byte = struct.unpack('=b',c)[0]

            # higher nibble
            c_nib = (c_byte & (15<<4))>>4
            nuc = trans_nib(c_nib)
            seq.write(nuc)

            # lower nibble
            c_nib = int(c_byte) & 15
            nuc = trans_nib(c_nib)
            seq.write(nuc)

        # final nucleotide sequence
        seq_str = seq.getvalue()

        # if we're reading to the end, don't clip anything
        if end != nbases :
            # if the coordinate requested was not on a byte boundary, adjust
            if rem_byte == 1 :
                seq_str = seq_str[1:]
            if seq_rem_byte == 1 :
                seq_str = seq_str[:-1]

            # nibFrag apparently uses zero-based indexing, clip off one base
            seq_str = seq_str[:-1]
        seq.close()

        # adjust strand
        if strand == '-' :
            seq_str = reverse_complement(seq_str)
        seqs.append(seq_str)

    return seqs

Example 56

Project: changes Source File: build_index.py
    def post(self):
        """
        Create a new commit or diff build. The API roughly goes like this:

        1. Identify the project(s) to build for. This can be done by specifying
        ``project``, ``repository``, or ``repository[callsign]``. If a repository is
        specified somehow, then all projects for that repository are considered
        for building.

        2. Using the ``sha``, find the appropriate revision object. This may
        involve updating the repo.

        3. If ``patch`` is given, then apply the patch and mark this as a diff build.
        Otherwise, this is a commit build.

        4. If ``snapshot_id`` is given, verify that the snapshot can be used by all
        projects.

        5. If provided, apply project_whitelist, filtering out projects not in
        this whitelist.

        6. Based on the flag ``apply_project_files_trigger`` (see comment on the argument
        itself for default values), decide whether or not to filter out projects
        by file blacklist and whitelist.

        7. Attach metadata and create/ensure existence of a build for each project,
        depending on the flag ``ensure_only``.

        NOTE: In ensure-only mode, the collection_ids of the returned builds are
        not necessarily identical, as we give new builds new collection IDs
        and preserve the existing builds' collection IDs.

        NOTE: If ``patch`` is specified ``sha`` is assumed to be the original
        base revision to apply the patch.

        Not relevant until we fix TODO: ``sha`` is **not** guaranteed to be the rev
        used to apply the patch. See ``find_green_parent_sha`` for the logic of
        identifying the correct revision.
        """
        args = self.parser.parse_args()

        if args.patch_file and args.ensure_only:
            return error("Ensure-only mode does not work with a diff build yet.",
                         problems=["patch", "ensure_only"])

        if not (args.project or args.repository or args['repository[phabricator.callsign]']):
            return error("Project or repository must be specified",
                         problems=["project", "repository", "repository[phabricator.callsign]"])

        # read arguments
        if args.patch_data:
            try:
                patch_data = json.loads(args.patch_data)
            except Exception:
                return error("Invalid patch data (must be JSON dict)",
                             problems=["patch[data]"])

            if not isinstance(patch_data, dict):
                return error("Invalid patch data (must be JSON dict)",
                             problems=["patch[data]"])
        else:
            patch_data = None

        # 1. identify project(s)
        projects, repository = try_get_projects_and_repository(args)

        if not projects:
            return error("Unable to find project(s).")

        # read arguments
        label = args.label
        author = args.author
        message = args.message
        tag = args.tag
        snapshot_id = args.snapshot_id
        no_snapshot = args.no_snapshot

        cause = Cause[args.cause]

        if no_snapshot and snapshot_id:
            return error("Cannot specify snapshot with no_snapshot option")

        if not tag and args.patch_file:
            tag = 'patch'

        # 2. validate snapshot
        if snapshot_id:
            snapshot = Snapshot.query.get(snapshot_id)
            if not snapshot:
                return error("Unable to find snapshot.")
            if snapshot.status != SnapshotStatus.active:
                return error("Snapshot is in an invalid state: %s" % snapshot.status)
            for project in projects:
                plans = get_build_plans(project)
                for plan in plans:
                    plan_options = plan.get_item_options()
                    allow_snapshot = '1' == plan_options.get('snapshot.allow', '1') or plan.snapshot_plan
                    if allow_snapshot and not SnapshotImage.get(plan, snapshot_id):
                        # We want to create a build using a specific snapshot but no image
                        # was found for this plan so fail.
                        return error("Snapshot cannot be applied to %s's %s" % (project.slug, plan.label))

        # 3. find revision
        try:
            revision = identify_revision(repository, args.sha)
        except MissingRevision:
            # if the default fails, we absolutely can't continue and the
            # client should send a valid revision
            return error("Unable to find commit %s in %s." % (args.sha, repository.url),
                         problems=['sha', 'repository'])

        # get default values for arguments
        if revision:
            if not author:
                author = revision.author
            if not label:
                label = revision.subject
            # only default the message if its absolutely not set
            if message is None:
                message = revision.message
            sha = revision.sha
        else:
            sha = args.sha

        if not args.target:
            target = sha[:12]
        else:
            target = args.target[:128]

        if not label:
            if message:
                label = message.splitlines()[0]
            if not label:
                label = 'A homeless build'
        label = label[:128]

        # 4. Check for patch
        if args.patch_file:
            fp = StringIO()
            for line in args.patch_file:
                fp.write(line)
            patch_file = fp
        else:
            patch_file = None

        if patch_file:
            patch = Patch(
                repository=repository,
                parent_revision_sha=sha,
                diff=patch_file.getvalue(),
            )
            db.session.add(patch)
        else:
            patch = None

        project_options = ProjectOptionsHelper.get_options(projects, ['build.file-whitelist'])

        # mark as commit or diff build
        if not patch:
            is_commit_build = True
        else:
            is_commit_build = False

        apply_project_files_trigger = args.apply_project_files_trigger
        if apply_project_files_trigger is None:
            apply_project_files_trigger = args.apply_file_whitelist
        if apply_project_files_trigger is None:
            if is_commit_build:
                apply_project_files_trigger = False
            else:
                apply_project_files_trigger = True

        if apply_project_files_trigger:
            if patch:
                diff_parser = DiffParser(patch.diff)
                files_changed = diff_parser.get_changed_files()
            elif revision:
                try:
                    files_changed = _get_revision_changed_files(repository, revision)
                except MissingRevision:
                    return error("Unable to find commit %s in %s." % (args.sha, repository.url),
                                 problems=['sha', 'repository'])
            else:
                # the only way that revision can be null is if this repo does not have a vcs backend
                logging.warning('Revision and patch are both None for sha %s. This is because the repo %s does not have a VCS backend.', sha, repository.url)
                files_changed = None
        else:
            # we won't be applying file whitelist, so there is no need to get the list of changed files.
            files_changed = None

        collection_id = uuid.uuid4()

        builds = []
        for project in projects:
            plan_list = get_build_plans(project)
            if not plan_list:
                logging.warning('No plans defined for project %s', project.slug)
                continue
            # 5. apply project whitelist as appropriate
            if args.project_whitelist is not None and project.slug not in args.project_whitelist:
                logging.info('Project %s is not in the supplied whitelist', project.slug)
                continue
            forced_sha = sha
            # TODO(dcramer): find_green_parent_sha needs to take branch
            # into account
            # if patch_file:
            #     forced_sha = find_green_parent_sha(
            #         project=project,
            #         sha=sha,
            #     )

            # 6. apply file whitelist as appropriate
            diff = None
            if patch is not None:
                diff = patch.diff
            if (
                apply_project_files_trigger and
                files_changed is not None and
                not files_changed_should_trigger_project(
                    files_changed, project, project_options[project.id], sha, diff)
            ):
                logging.info('Changed files do not trigger build for project %s', project.slug)
                continue
            # 7. create/ensure build
            build_message = None
            selective_testing_policy = SelectiveTestingPolicy.disabled
            if args.selective_testing and project_lib.contains_active_autogenerated_plan(project):
                if is_commit_build:
                    selective_testing_policy, reasons = get_selective_testing_policy(project, sha, diff)
                    if reasons:
                        if selective_testing_policy is SelectiveTestingPolicy.disabled:
                            reasons = ["Selective testing was requested but not done because:"] + ['    ' + m for m in reasons]
                        build_message = '\n'.join(reasons)
                else:
                    # NOTE: for diff builds, it makes sense to just do selective testing,
                    # since it will never become a parent build and will never be used to
                    # calculate revision results.
                    selective_testing_policy = SelectiveTestingPolicy.enabled
            if args.ensure_only:
                potentials = list(Build.query.filter(
                    Build.project_id == project.id,
                    Build.source.has(revision_sha=sha, patch=patch),
                ).order_by(
                    Build.date_created.desc()  # newest first
                ).limit(1))
                if len(potentials) == 0:
                    builds.append(create_build(
                        project=project,
                        collection_id=collection_id,
                        sha=forced_sha,
                        target=target,
                        label=label,
                        message=message,
                        author=author,
                        patch=patch,
                        source_data=patch_data,
                        tag=tag,
                        cause=cause,
                        snapshot_id=snapshot_id,
                        no_snapshot=no_snapshot,
                        selective_testing_policy=selective_testing_policy,
                    ))
                else:
                    builds.append(potentials[0])
            else:
                builds.append(create_build(
                    project=project,
                    collection_id=collection_id,
                    sha=forced_sha,
                    target=target,
                    label=label,
                    message=message,
                    author=author,
                    patch=patch,
                    source_data=patch_data,
                    tag=tag,
                    cause=cause,
                    snapshot_id=snapshot_id,
                    no_snapshot=no_snapshot,
                    selective_testing_policy=selective_testing_policy,
                ))

            if build_message:
                message = BuildMessage(
                    build=builds[-1],
                    text=build_message,
                )
                db.session.add(message)
                db.session.commit()

        return self.respond(builds)

Example 57

Project: hitch Source File: testing.py
    @contextlib.contextmanager
    def isolation(self, input=None, env=None, color=False):
        """A context manager that sets up the isolation for invoking of a
        command line tool.  This sets up stdin with the given input data
        and `os.environ` with the overrides from the given dictionary.
        This also rebinds some internals in Click to be mocked (like the
        prompt functionality).

        This is automatically done in the :meth:`invoke` method.

        .. versionadded:: 4.0
           The ``color`` parameter was added.

        :param input: the input stream to put into sys.stdin.
        :param env: the environment overrides as dictionary.
        :param color: whether the output should contain color codes. The
                      application can still override this explicitly.
        """
        input = make_input_stream(input, self.charset)

        old_stdin = sys.stdin
        old_stdout = sys.stdout
        old_stderr = sys.stderr

        env = self.make_env(env)

        if PY2:
            sys.stdout = sys.stderr = bytes_output = StringIO()
            if self.echo_stdin:
                input = EchoingStdin(input, bytes_output)
        else:
            bytes_output = io.BytesIO()
            if self.echo_stdin:
                input = EchoingStdin(input, bytes_output)
            input = io.TextIOWrapper(input, encoding=self.charset)
            sys.stdout = sys.stderr = io.TextIOWrapper(
                bytes_output, encoding=self.charset)

        sys.stdin = input

        def visible_input(prompt=None):
            sys.stdout.write(prompt or '')
            val = input.readline().rstrip('\r\n')
            sys.stdout.write(val + '\n')
            sys.stdout.flush()
            return val

        def hidden_input(prompt=None):
            sys.stdout.write((prompt or '') + '\n')
            sys.stdout.flush()
            return input.readline().rstrip('\r\n')

        def _getchar(echo):
            char = sys.stdin.read(1)
            if echo:
                sys.stdout.write(char)
                sys.stdout.flush()
            return char

        default_color = color
        def should_strip_ansi(stream=None, color=None):
            if color is None:
                return not default_color
            return not color

        old_visible_prompt_func = clickpkg.termui.visible_prompt_func
        old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
        old__getchar_func = clickpkg.termui._getchar
        old_should_strip_ansi = clickpkg.utils.should_strip_ansi
        clickpkg.termui.visible_prompt_func = visible_input
        clickpkg.termui.hidden_prompt_func = hidden_input
        clickpkg.termui._getchar = _getchar
        clickpkg.utils.should_strip_ansi = should_strip_ansi

        old_env = {}
        try:
            for key, value in iteritems(env):
                old_env[key] = os.environ.get(value)
                if value is None:
                    try:
                        del os.environ[key]
                    except Exception:
                        pass
                else:
                    os.environ[key] = value
            yield bytes_output
        finally:
            for key, value in iteritems(old_env):
                if value is None:
                    try:
                        del os.environ[key]
                    except Exception:
                        pass
                else:
                    os.environ[key] = value
            sys.stdout = old_stdout
            sys.stderr = old_stderr
            sys.stdin = old_stdin
            clickpkg.termui.visible_prompt_func = old_visible_prompt_func
            clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
            clickpkg.termui._getchar = old__getchar_func
            clickpkg.utils.should_strip_ansi = old_should_strip_ansi

Example 58

Project: datapusher Source File: jobs.py
@job.async
def push_to_datastore(task_id, input, dry_run=False):
    '''Download and parse a resource push its data into CKAN's DataStore.

    An asynchronous job that gets a resource from CKAN, downloads the
    resource's data file and, if the data file has changed since last time,
    parses the data and posts it into CKAN's DataStore.

    :param dry_run: Fetch and parse the data file but don't actually post the
        data to the DataStore, instead return the data headers and rows that
        would have been posted.
    :type dry_run: boolean

    '''
    handler = util.StoringHandler(task_id, input)
    logger = logging.getLogger(task_id)
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)

    validate_input(input)

    data = input['metadata']

    ckan_url = data['ckan_url']
    resource_id = data['resource_id']
    api_key = input.get('api_key')

    try:
        resource = get_resource(resource_id, ckan_url, api_key)
    except util.JobError, e:
        #try again in 5 seconds just incase CKAN is slow at adding resource
        time.sleep(5)
        resource = get_resource(resource_id, ckan_url, api_key)

    # fetch the resource data
    logger.info('Fetching from: {0}'.format(resource.get('url')))
    try:
        request = urllib2.Request(resource.get('url'))

        if resource.get('url_type') == 'upload':
            # If this is an uploaded file to CKAN, authenticate the request,
            # otherwise we won't get file from private resources
            request.add_header('Authorization', api_key)

        response = urllib2.urlopen(request, timeout=DOWNLOAD_TIMEOUT)
    except urllib2.HTTPError as e:
        raise HTTPError(
            "DataPusher received a bad HTTP response when trying to download "
            "the data file", status_code=e.code,
            request_url=resource.get('url'), response=e.read())
    except urllib2.URLError as e:
        if isinstance(e.reason, socket.timeout):
            raise util.JobError('Connection timed out after %ss' %
                                DOWNLOAD_TIMEOUT)
        else:
            raise HTTPError(
                message=str(e.reason), status_code=None,
                request_url=resource.get('url'), response=None)

    cl = response.info().getheader('content-length')
    if cl and int(cl) > MAX_CONTENT_LENGTH:
        raise util.JobError(
            'Resource too large to download: {cl} > max ({max_cl}).'.format(
            cl=cl, max_cl=MAX_CONTENT_LENGTH))

    ct = response.info().getheader('content-type').split(';', 1)[0]

    f = cStringIO.StringIO(response.read())
    file_hash = hashlib.md5(f.read()).hexdigest()
    f.seek(0)

    if (resource.get('hash') == file_hash
            and not data.get('ignore_hash')):
        logger.info("The file hash hasn't changed: {hash}.".format(
            hash=file_hash))
        return

    resource['hash'] = file_hash

    try:
        table_set = messytables.any_tableset(f, mimetype=ct, extension=ct)
    except messytables.ReadError as e:
        ## try again with format
        f.seek(0)
        try:
            format = resource.get('format')
            table_set = messytables.any_tableset(f, mimetype=format, extension=format)
        except:
            raise util.JobError(e)

    row_set = table_set.tables.pop()
    offset, headers = messytables.headers_guess(row_set.sample)

    # Some headers might have been converted from strings to floats and such.
    headers = [unicode(header) for header in headers]

    row_set.register_processor(messytables.headers_processor(headers))
    row_set.register_processor(messytables.offset_processor(offset + 1))
    types = messytables.type_guess(row_set.sample, types=TYPES, strict=True)
    row_set.register_processor(messytables.types_processor(types))

    headers = [header.strip() for header in headers if header.strip()]
    headers_set = set(headers)

    def row_iterator():
        for row in row_set:
            data_row = {}
            for index, cell in enumerate(row):
                column_name = cell.column.strip()
                if column_name not in headers_set:
                    continue
                data_row[column_name] = cell.value
            yield data_row
    result = row_iterator()

    '''
    Delete existing datstore resource before proceeding. Otherwise
    'datastore_create' will append to the existing datastore. And if
    the fields have significantly changed, it may also fail.
    '''
    if datastore_resource_exists(resource_id, api_key, ckan_url):
        logger.info('Deleting "{res_id}" from datastore.'.format(
            res_id=resource_id))
        delete_datastore_resource(resource_id, api_key, ckan_url)

    headers_dicts = [dict(id=field[0], type=TYPE_MAPPING[str(field[1])])
                     for field in zip(headers, types)]

    logger.info('Determined headers and types: {headers}'.format(
        headers=headers_dicts))

    if dry_run:
        return headers_dicts, result

    count = 0
    for i, records in enumerate(chunky(result, 250)):
        count += len(records)
        logger.info('Saving chunk {number}'.format(number=i))
        send_resource_to_datastore(resource, headers_dicts,
                                   records, api_key, ckan_url)

    logger.info('Successfully pushed {n} entries to "{res_id}".'.format(
        n=count, res_id=resource_id))

    if data.get('set_url_type', False):
        update_resource(resource, api_key, ckan_url)

Example 59

Project: opentuner Source File: halidetuner.py
  def cfg_to_schedule(self, cfg):
    """
    Produce a Halide schedule from a configuration dictionary
    """
    o = StringIO()
    cnt = 0
    temp_vars = list()
    schedule = ComputeAtStoreAtParser(cfg['schedule'], self.post_dominators)
    compute_at = schedule.compute_at
    store_at = schedule.store_at

    # build list of all used variable names
    var_names = dict()
    var_name_order = dict()
    for func in self.settings['functions']:
      name = func['name']
      compute_order = cfg['{0}_compute_order'.format(name)]
      for var in func['vars']:
        var_names[(name, var, 0)] = var
        for nesting in xrange(1, self.args.nesting):
          split_factor = cfg.get('{0}_splitfactor_{1}_{2}'.format(
            name, nesting, var), 0)
          if split_factor > 1 and (name, var, nesting - 1) in var_names:
            var_names[(name, var, nesting)] = '_{var}{cnt}'.format(
              func=name, var=var, nesting=nesting, cnt=cnt)
            temp_vars.append(var_names[(name, var, nesting)])
          cnt += 1
      var_name_order[name] = [var_names[(name, v, n)] for v, n in compute_order
                              if (name, v, n) in var_names]

    # set a schedule for each function
    for func in self.settings['functions']:
      name = func['name']
      inner_var_name = var_name_order[name][-1] # innermost variable in the reordered list for this func
      vectorize = cfg['{0}_vectorize'.format(name)]
      if self.args.enable_unroll:
        unroll = cfg['{0}_unroll'.format(name)]
      else:
        unroll = 1

      print >> o, 'Halide::Func(funcs["%s"])' % name

      for var in func['vars']:
        # handle all splits
        for nesting in xrange(1, self.args.nesting):
          split_factor = cfg.get('{0}_splitfactor_{1}_{2}'.format(
            name, nesting, var), 0)
          if split_factor <= 1:
            break

          for nesting2 in xrange(nesting + 1, self.args.nesting):
            split_factor2 = cfg.get('{0}_splitfactor_{1}_{2}'.format(
              name, nesting2, var), 0)
            if split_factor2 <= 1:
              break
            split_factor *= split_factor2
          var_name = var_names[(name, var, nesting)]
          last_var_name = var_names[(name, var, nesting - 1)]
          
          # apply unroll, vectorize factors to all surrounding splits iff we're the innermost var
          if var_name == inner_var_name:
            split_factor *= unroll
            split_factor *= vectorize

          print >> o, '.split({0}, {0}, {1}, {2})'.format(
            last_var_name, var_name, split_factor)

      # drop unused variables and truncate (Halide supports only 10 reorders)
      if len(var_name_order[name]) > 1:
        print >> o, '.reorder({0})'.format(
            ', '.join(reversed(var_name_order[name][:10])))

      # reorder_storage
      store_order_enabled = cfg['{0}_store_order_enabled'.format(name)]
      if store_order_enabled or not self.args.gated_store_reorder:
        store_order = cfg['{0}_store_order'.format(name)]
        if len(store_order) > 1:
          print >> o, '.reorder_storage({0})'.format(', '.join(store_order))

      if unroll > 1:
        # apply unrolling to innermost var
        print >> o, '.unroll({0}, {1})'.format(
          var_name_order[name][-1], unroll * vectorize)

      if vectorize > 1:
        # apply vectorization to innermost var
        print >> o, '.vectorize({0}, {1})'.format(
          var_name_order[name][-1], vectorize)
      
      # compute_at(not root)
      if (compute_at[name] is not None and
              len(var_name_order[compute_at[name][0]]) >= compute_at[name][1]):
        at_func, at_idx = compute_at[name]
        try:
          at_var = var_name_order[at_func][-at_idx]
          print >> o, '.compute_at(Halide::Func(funcs["{0}"]), {1})'.format(at_func, at_var)
          if not self.args.enable_store_at:
            pass  # disabled
          elif store_at[name] is None:
            print >> o, '.store_root()'
          elif store_at[name] != compute_at[name]:
            at_func, at_idx = store_at[name]
            at_var = var_name_order[at_func][-at_idx]
            print >> o, '.store_at(Halide::Func(funcs["{0}"]), {1})'.format(at_func, at_var)
        except IndexError:
          # this is expected when at_idx is too large
          # TODO: implement a cleaner fix
          pass
      # compute_root
      else:
        parallel = cfg['{0}_parallel'.format(name)]
        if parallel:
          # only apply parallelism to outermost var of root funcs
          print >> o, '.parallel({0})'.format(var_name_order[name][0])
        print >> o, '.compute_root()'

      print >> o, ';'

    if temp_vars:
      return 'Halide::Var {0};\n{1}'.format(
        ', '.join(temp_vars), o.getvalue())
    else:
      return o.getvalue()

Example 60

Project: odoo Source File: custom.py
    def _create_lines(self, cr, uid, ids, report, fields, results, context):
        pool = openerp.registry(cr.dbname)
        pdf_string = cStringIO.StringIO()
        can = canvas.init(fname=pdf_string, format='pdf')
        
        can.show(80,380,'/16/H'+report['title'])
        
        ar = area.T(size=(350,350),
        #x_coord = category_coord.T(['2005-09-01','2005-10-22'],0),
        x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
        y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))
        
        process_date = {
            'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
            'M': lambda x: x.split('-')[1],
            'Y': lambda x: x.split('-')[0]
        }

        order_date = {
            'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
            'M': lambda x: x,
            'Y': lambda x: x
        }

        abscissa = []
        
        idx = 0 
        date_idx = None
        fct = {}
        for f in fields:
            field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0])
            if field_id:
                type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype'])
                if type[0]['ttype'] == 'date':
                    date_idx = idx
                    fct[idx] = process_date[report['frequency']] 
                else:
                    fct[idx] = lambda x : x
            else:
                fct[idx] = lambda x : x
            idx+=1

        # plots are usually displayed year by year
        # so we do so if the first field is a date
        data_by_year = {}
        if date_idx is not None:
            for r in results:
                key = process_date['Y'](r[date_idx])
                if key not in data_by_year:
                    data_by_year[key] = []
                for i in range(len(r)):
                    r[i] = fct[i](r[i])
                data_by_year[key].append(r)
        else:
            data_by_year[''] = results

        idx0 = 0
        nb_bar = len(data_by_year)*(len(fields)-1)
        colors = map(lambda x:line_style.T(color=x), misc.choice_colors(nb_bar))
        abscissa = {}
        for line in data_by_year.keys():
            fields_bar = []
            # sum data and save it in a list. An item for a fields
            for d in data_by_year[line]:
                for idx in range(len(fields)-1):
                    fields_bar.append({})
                    if d[0] in fields_bar[idx]:
                        fields_bar[idx][d[0]] += d[idx+1]
                    else:
                        fields_bar[idx][d[0]] = d[idx+1]
            for idx  in range(len(fields)-1):
                data = {}
                for k in fields_bar[idx].keys():
                    if k in data:
                        data[k] += fields_bar[idx][k]
                    else:
                        data[k] = fields_bar[idx][k]
                data_cuem = []
                prev = 0.0
                keys = data.keys()
                keys.sort()
                # cuemulate if necessary
                for k in keys:
                    data_cuem.append([k, float(data[k])+float(prev)])
                    if fields[idx+1]['cuemulate']:
                        prev += data[k]
                idx0 = 0
                plot = line_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cuem, line_style=colors[idx0*(len(fields)-1)+idx])
                ar.add_plot(plot)
                abscissa.update(fields_bar[idx])
                idx0 += 1
        
        abscissa = map(lambda x : [x, None], abscissa)
        ar.x_coord = category_coord.T(abscissa,0)
        ar.draw(can)

        can.close()
        self.obj = external_pdf(pdf_string.getvalue())
        self.obj.render()
        pdf_string.close()
        return True

Example 61

Project: Flask Source File: pyopenssl.py
    def readline(self, size=-1):
        buf = self._rbuf
        buf.seek(0, 2)  # seek end
        if buf.tell() > 0:
            # check if we already have it in our buffer
            buf.seek(0)
            bline = buf.readline(size)
            if bline.endswith('\n') or len(bline) == size:
                self._rbuf = StringIO()
                self._rbuf.write(buf.read())
                return bline
            del bline
        if size < 0:
            # Read until \n or EOF, whichever comes first
            if self._rbufsize <= 1:
                # Speed up unbuffered case
                buf.seek(0)
                buffers = [buf.read()]
                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
                data = None
                recv = self._sock.recv
                while True:
                    try:
                        while data != "\n":
                            data = recv(1)
                            if not data:
                                break
                            buffers.append(data)
                    except OpenSSL.SSL.WantReadError:
                        self._wait_for_sock()
                        continue
                    break
                return "".join(buffers)

            buf.seek(0, 2)  # seek end
            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
            while True:
                try:
                    data = self._sock.recv(self._rbufsize)
                except OpenSSL.SSL.WantReadError:
                    self._wait_for_sock()
                    continue
                if not data:
                    break
                nl = data.find('\n')
                if nl >= 0:
                    nl += 1
                    buf.write(data[:nl])
                    self._rbuf.write(data[nl:])
                    del data
                    break
                buf.write(data)
            return buf.getvalue()
        else:
            # Read until size bytes or \n or EOF seen, whichever comes first
            buf.seek(0, 2)  # seek end
            buf_len = buf.tell()
            if buf_len >= size:
                buf.seek(0)
                rv = buf.read(size)
                self._rbuf = StringIO()
                self._rbuf.write(buf.read())
                return rv
            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
            while True:
                try:
                    data = self._sock.recv(self._rbufsize)
                except OpenSSL.SSL.WantReadError:
                    self._wait_for_sock()
                    continue
                if not data:
                    break
                left = size - buf_len
                # did we just receive a newline?
                nl = data.find('\n', 0, left)
                if nl >= 0:
                    nl += 1
                    # save the excess data to _rbuf
                    self._rbuf.write(data[nl:])
                    if buf_len:
                        buf.write(data[:nl])
                        break
                    else:
                        # Shortcut.  Avoid data copy through buf when returning
                        # a substring of our first recv().
                        return data[:nl]
                n = len(data)
                if n == size and not buf_len:
                    # Shortcut.  Avoid data copy through buf when
                    # returning exactly all of our first recv().
                    return data
                if n >= left:
                    buf.write(data[:left])
                    self._rbuf.write(data[left:])
                    break
                buf.write(data)
                buf_len += n
                #assert buf_len == buf.tell()
            return buf.getvalue()

Example 62

Project: kaa-metadata Source File: jpg.py
    def __init__(self,file):
        core.Image.__init__(self)
        self.mime = 'image/jpeg'
        self.type = 'jpeg image'

        if file.read(2) != '\xff\xd8':
            raise core.ParseError()

        file.seek(-2,2)
        if file.read(2) != '\xff\xd9':
            # Normally an JPEG should end in ffd9. This does not however
            # we assume it's an jpeg for now
            log.info("Wrong encode found for jpeg")

        file.seek(2)
        app = file.read(4)
        self.meta = {}

        while (len(app) == 4):
            (ff,segtype,seglen) = struct.unpack(">BBH", app)
            if ff != 0xff: break
            if segtype == 0xd9:
                break

            elif SOF.has_key(segtype):
                data = file.read(seglen-2)
                (precision,self.height,self.width,\
                 num_comp) = struct.unpack('>BHHB', data[:6])

            elif segtype == 0xe1:
                data = file.read(seglen-2)
                type = data[:data.find('\0')]
                if type == 'Exif':
                    # create a fake file from the data we have to
                    # pass it to the EXIF parser
                    fakefile = cStringIO.StringIO()
                    fakefile.write('\xFF\xD8')
                    fakefile.write(app)
                    fakefile.write(data)
                    fakefile.seek(0)
                    exif = EXIF.process_file(fakefile)
                    fakefile.close()
                    if exif:
                        self.thumbnail = exif.get('JPEGThumbnail', None)
                        if self.thumbnail:
                            self.thumbnail = str(self.thumbnail)
                        self._appendtable('EXIF', exif)

                        if 'Image Orientation' in exif:
                            orientation = str(exif['Image Orientation'])
                            if orientation.find('90 CW') > 0:
                                self.rotation = 90
                            elif orientation.find('90') > 0:
                                self.rotation = 270
                            elif orientation.find('180') > 0:
                                self.rotation = 180
                        t = exif.get('Image DateTimeOriginal')
                        if not t:
                            # sometimes it is called this way
                            t = exif.get('EXIF DateTimeOriginal')
                        if not t:
                            t = exif.get('Image DateTime')
                        if t:
                            try:
                                t = time.strptime(str(t), '%Y:%m:%d %H:%M:%S')
                                self.timestamp = int(time.mktime(t))
                            except ValueError:
                                # Malformed time string.
                                pass
                elif type == 'http://ns.adobe.com/xap/1.0/':
                    # FIXME: parse XMP data (xml)
                    doc = data[data.find('\0')+1:]
                else:
                    pass

            elif segtype == 0xed:
                iptc = IPTC.parseiptc(file.read(seglen-2))
                if iptc:
                    self._appendtable('IPTC', iptc)

            elif segtype == 0xe7:
                # information created by libs like epeg
                data = file.read(seglen-2)
                if data.count('\n') == 1:
                    key, value = data.split('\n')
                    self.meta[key] = value

            elif segtype == 0xfe:
                self.comment = file.read(seglen-2)
                if self.comment.startswith('<?xml'):
                    # This could be a comment based on
                    # http://www.w3.org/TR/photo-rdf/
                    log.error('xml comment parser not integrated')
                    self.comment = ''
            else:
                # Huffman table marker (FFC4)
                # Start of Scan marker (FFDA)
                # Quantization table marker (FFDB)
                # Restart Interval (FFDD) ???
                if not segtype in (0xc4, 0xda, 0xdb, 0xdd):
                    log.info("SEGMENT: 0x%x%x, len=%d" % (ff,segtype,seglen))
                file.seek(seglen-2,1)
            app = file.read(4)

        if len(self.meta.keys()):
            self._appendtable( 'JPGMETA', self.meta )

        for key, value in self.meta.items():
            if key.startswith('Thumb:') or key == 'Software':
                self._set(key, value)

Example 63

Project: dopey Source File: document.py
    def load_ora(self, filename, feedback_cb=None):
        """Loads from an OpenRaster file"""
        logger.info('load_ora: %r', filename)
        t0 = time.time()
        tempdir = tempfile.mkdtemp('mypaint')
        if not isinstance(tempdir, unicode):
            tempdir = tempdir.decode(sys.getfilesystemencoding())
        z = zipfile.ZipFile(filename)
        logger.debug('mimetype: %r', z.read('mimetype').strip())
        xml = z.read('stack.xml')
        image = ET.fromstring(xml)
        stack = image.find('stack')

        image_w = int(image.attrib['w'])
        image_h = int(image.attrib['h'])

        def get_pixbuf(filename):
            t1 = time.time()

            try:
                fp = z.open(filename, mode='r')
            except KeyError:
                # support for bad zip files (saved by old versions of the GIMP ORA plugin)
                fp = z.open(filename.encode('utf-8'), mode='r')
                logger.warning('Bad OpenRaster ZIP file. There is an utf-8 '
                               'encoded filename that does not have the '
                               'utf-8 flag set: %r', filename)

            res = self._pixbuf_from_stream(fp, feedback_cb)
            fp.close()
            logger.debug('%.3fs loading pixbuf %s', time.time() - t1, filename)
            return res

        def get_layers_list(root, x=0,y=0):
            res = []
            for item in root:
                if item.tag == 'layer':
                    if 'x' in item.attrib:
                        item.attrib['x'] = int(item.attrib['x']) + x
                    if 'y' in item.attrib:
                        item.attrib['y'] = int(item.attrib['y']) + y
                    res.append(item)
                elif item.tag == 'stack':
                    stack_x = int( item.attrib.get('x', 0) )
                    stack_y = int( item.attrib.get('y', 0) )
                    res += get_layers_list(item, stack_x, stack_y)
                else:
                    logger.warning('ignoring unsupported tag %r', item.tag)
            return res

        self.clear() # this leaves one empty layer
        no_background = True

        selected_layer = None
        for layer in get_layers_list(stack):
            a = layer.attrib

            if 'background_tile' in a:
                assert no_background
                try:
                    logger.debug("background tile: %r", a['background_tile'])
                    self.set_background(get_pixbuf(a['background_tile']))
                    no_background = False
                    continue
                except tiledsurface.BackgroundError, e:
                    logger.warning('ORA background tile not usable: %r', e)

            src = a.get('src', '')
            if not src.lower().endswith('.png'):
                logger.warning('Ignoring non-png layer %r', src)
                continue
            name = a.get('name', '')
            x = int(a.get('x', '0'))
            y = int(a.get('y', '0'))
            opac = float(a.get('opacity', '1.0'))
            compositeop = str(a.get('composite-op', DEFAULT_COMPOSITE_OP))
            if compositeop not in VALID_COMPOSITE_OPS:
                compositeop = DEFAULT_COMPOSITE_OP
            selected = self.__xsd2bool(a.get("selected", 'false'))
            locked = self.__xsd2bool(a.get("edit-locked", 'false'))

            visible = not 'hidden' in a.get('visibility', 'visible')
            self.add_layer(insert_idx=0, name=name)
            t1 = time.time()

            # extract the png form the zip into a file first
            # the overhead for doing so seems to be neglegible (around 5%)
            z.extract(src, tempdir)
            tmp_filename = join(tempdir, src)
            self.load_layer_from_png(tmp_filename, x, y, feedback_cb)
            os.remove(tmp_filename)

            layer = self.layers[0]

            self.set_layer_opacity(helpers.clamp(opac, 0.0, 1.0), layer)
            self.set_layer_compositeop(compositeop, layer)
            self.set_layer_visibility(visible, layer)
            self.set_layer_locked(locked, layer)
            if selected:
                selected_layer = layer
            logger.debug('%.3fs loading and converting layer png',
                         time.time() - t1)
            # strokemap
            fname = a.get('mypaint_strokemap_v2', None)
            if fname:
                sio = StringIO(z.read(fname))
                layer.load_strokemap_from_file(sio, x, y)
                sio.close()

        if len(self.layers) == 1:
            # no assertion (allow empty docuements)
            logger.error('Could not load any layer, docuement is empty.')

        if len(self.layers) > 1:
            # remove the still present initial empty top layer
            self.select_layer(len(self.layers)-1)
            self.remove_layer()
            # this leaves the topmost layer selected

        try:
            ani_data = z.read('animation.xsheet')
            self.ani.str_to_xsheet(ani_data)
        except KeyError:
            self.ani.load_xsheet(filename)

        if selected_layer is not None:
            for i, layer in zip(range(len(self.layers)), self.layers):
                if layer is selected_layer:
                    self.select_layer(i)
                    break

        # Set the frame size to that saved in the image.
        self.update_frame(x=0, y=0, width=image_w, height=image_h,
                          user_initiated=False)

        # Enable frame if the saved image size is something other than the
        # calculated bounding box. Goal: if the user saves an "infinite
        # canvas", it loads as an infinite canvas.
        bbox_c = helpers.Rect(x=0, y=0, w=image_w, h=image_h)
        bbox = self.get_bbox()
        frame_enab = not (bbox_c==bbox or bbox.empty() or bbox_c.empty())
        self.set_frame_enabled(frame_enab, user_initiated=False)

        z.close()

        # remove empty directories created by zipfile's extract()
        for root, dirs, files in os.walk(tempdir, topdown=False):
            for name in dirs:
                os.rmdir(os.path.join(root, name))
        os.rmdir(tempdir)

        logger.info('%.3fs load_ora total', time.time() - t0)

Example 64

Project: simian Source File: blob_upload.py
  def store_and_build_forward_message(self, form, boundary=None,
                                      max_bytes_per_blob=None,
                                      max_bytes_total=None,
                                      bucket_name=None):
    """Reads form data, stores blobs data and builds the forward request.

    This finds all of the file uploads in a set of form fields, converting them
    into blobs and storing them in the blobstore. It also generates the HTTP
    request to forward to the user's application.

    Args:
      form: cgi.FieldStorage instance representing the whole form derived from
        original POST data.
      boundary: The optional boundary to use for the resulting form. If omitted,
        one is randomly generated.
      max_bytes_per_blob: The maximum size in bytes that any single blob
        in the form is allowed to be.
      max_bytes_total: The maximum size in bytes that the total of all blobs
        in the form is allowed to be.
      bucket_name: The name of the Google Storage bucket to store the uploaded
                   files.

    Returns:
      A tuple (content_type, content_text), where content_type is the value of
      the Content-Type header, and content_text is a string containing the body
      of the HTTP request to forward to the application.

    Raises:
      webob.exc.HTTPException: The upload failed.
    """
    message = multipart.MIMEMultipart('form-data', boundary)

    creation = self._now_func()
    total_bytes_uploaded = 0
    created_blobs = []
    mime_type_error = None
    too_many_conflicts = False
    upload_too_large = False
    filename_too_large = False
    content_type_too_large = False

    # Extract all of the individual form items out of the FieldStorage.
    form_items = []
    # Sorting of forms is done merely to make testing a little easier since
    # it means blob-keys are generated in a predictable order.
    for key in sorted(form):
      form_item = form[key]
      if isinstance(form_item, list):
        form_items.extend(form_item)
      else:
        form_items.append(form_item)

    for form_item in form_items:
      disposition_parameters = {'name': form_item.name}

      variable = email.message.Message()

      if form_item.filename is None:
        # Copy as is
        variable.add_header('Content-Type', 'text/plain')
        variable.set_payload(form_item.value)
      else:
        # If there is no filename associated with this field it means that the
        # file form field was not filled in.  This blob should not be created
        # and forwarded to success handler.
        if not form_item.filename:
          continue

        disposition_parameters['filename'] = form_item.filename

        try:
          main_type, sub_type = _split_mime_type(form_item.type)
        except _InvalidMIMETypeFormatError, ex:
          mime_type_error = str(ex)
          break

        # Seek to the end of file and use the pos as the length.
        form_item.file.seek(0, os.SEEK_END)
        content_length = form_item.file.tell()
        form_item.file.seek(0)

        total_bytes_uploaded += content_length

        if max_bytes_per_blob is not None:
          if content_length > max_bytes_per_blob:
            upload_too_large = True
            break
        if max_bytes_total is not None:
          if total_bytes_uploaded > max_bytes_total:
            upload_too_large = True
            break
        if form_item.filename is not None:
          if len(form_item.filename) > _MAX_STRING_NAME_LENGTH:
            filename_too_large = True
            break
        if form_item.type is not None:
          if len(form_item.type) > _MAX_STRING_NAME_LENGTH:
            content_type_too_large = True
            break

        # Compute the MD5 hash of the upload.
        digester = hashlib.md5()
        while True:
          block = form_item.file.read(1 << 20)
          if not block:
            break
          digester.update(block)
        form_item.file.seek(0)

        # Create the external body message containing meta-data about the blob.
        external = email.message.Message()
        external.add_header('Content-Type', '%s/%s' % (main_type, sub_type),
                            **form_item.type_options)
        # NOTE: This is in violation of RFC 2616 (Content-MD5 should be the
        # base-64 encoding of the binary hash, not the hex digest), but it is
        # consistent with production.
        content_md5 = base64.urlsafe_b64encode(digester.hexdigest())
        # Create header MIME message
        headers = dict(form_item.headers)
        for name in _STRIPPED_FILE_HEADERS:
          if name in headers:
            del headers[name]
        headers['Content-Length'] = str(content_length)
        headers[blobstore.UPLOAD_INFO_CREATION_HEADER] = (
            blobstore._format_creation(creation))
        headers['Content-MD5'] = content_md5
        gs_filename = None
        if bucket_name:
          random_key = str(self._generate_blob_key())
          gs_filename = '%s/fake-%s' % (bucket_name, random_key)
          headers[blobstore.CLOUD_STORAGE_OBJECT_HEADER] = (
              blobstore.GS_PREFIX + gs_filename)
        for key, value in headers.iteritems():
          external.add_header(key, value)
        # Add disposition parameters (a clone of the outer message's field).
        if not external.get('Content-Disposition'):
          external.add_header('Content-Disposition', 'form-data',
                              **disposition_parameters)

        base64_encoding = (form_item.headers.get('Content-Transfer-Encoding') ==
                           'base64')
        content_type, blob_file, filename = self._preprocess_data(
            external['content-type'],
            form_item.file,
            form_item.filename,
            base64_encoding)

        # Store the actual contents to storage.
        if gs_filename:
          info_entity = self.store_gs_file(
              content_type, gs_filename, blob_file, filename)
        else:
          try:
            info_entity = self.store_blob(content_type, filename,
                                          digester, blob_file, creation)
          except _TooManyConflictsError:
            too_many_conflicts = True
            break

        # Track created blobs in case we need to roll them back.
        created_blobs.append(info_entity)

        variable.add_header('Content-Type', 'message/external-body',
                            access_type=blobstore.BLOB_KEY_HEADER,
                            blob_key=info_entity.key().name())
        variable.set_payload([external])

      # Set common information.
      variable.add_header('Content-Disposition', 'form-data',
                          **disposition_parameters)
      message.attach(variable)

    if (mime_type_error or too_many_conflicts or upload_too_large or
        filename_too_large or content_type_too_large):
      for blob in created_blobs:
        datastore.Delete(blob)
      if mime_type_error:
        self.abort(400, detail=mime_type_error)
      elif too_many_conflicts:
        self.abort(500, detail='Could not generate a blob key.')
      elif upload_too_large:
        self.abort(413)
      else:
        if filename_too_large:
          invalid_field = 'filename'
        elif content_type_too_large:
          invalid_field = 'Content-Type'
        detail = 'The %s exceeds the maximum allowed length of %s.' % (
            invalid_field, _MAX_STRING_NAME_LENGTH)
        self.abort(400, detail=detail)

    message_out = cStringIO.StringIO()
    gen = email.generator.Generator(message_out, maxheaderlen=0)
    gen.flatten(message, unixfrom=False)

    # Get the content text out of the message.
    message_text = message_out.getvalue()
    content_start = message_text.find('\n\n') + 2
    content_text = message_text[content_start:]
    content_text = content_text.replace('\n', '\r\n')

    return message.get('Content-Type'), content_text

Example 65

Project: openmoltools Source File: forcefield_generators.py
def generateResidueTemplate(molecule, residue_atoms=None):
    """
    Generate an residue template for simtk.openmm.app.ForceField using GAFF/AM1-BCC.

    This requires the OpenEye toolkit.

    Parameters
    ----------
    molecule : openeye.oechem.OEMol
        The molecule to be parameterized.
        The molecule must have explicit hydrogens.
        Net charge will be inferred from the net formal charge on each molecule.
        Partial charges will be determined automatically using oequacpac and canonical AM1-BCC charging rules.
    residue_atomset : set of OEAtom, optional, default=None
        If not None, only the atoms in this set will be used to construct the residue template

    Returns
    -------
    template : simtk.openmm.app.forcefield._TemplateData
        Residue template for ForceField using atom types and parameters from `gaff.xml`.
    additional_parameters_ffxml : str
        Contents of ForceField `ffxml` file defining additional parameters from parmchk(2).

    Notes
    -----
    The residue template will be named after the molecule title.
    This method preserves stereochemistry during AM1-BCC charge parameterization.
    Atom names in molecules will be assigned Tripos atom names if any are blank or not unique.

    """
    # Set the template name based on the molecule title plus a globally unique UUID.
    from uuid import uuid4
    template_name = molecule.GetTitle() + '-' + str(uuid4())

    # If any atom names are not unique, atom names
    _ensureUniqueAtomNames(molecule)

    # Compute net formal charge.
    net_charge = _computeNetCharge(molecule)

    # Generate canonical AM1-BCC charges and a reference conformation.
    molecule = get_charges(molecule, strictStereo=False, keep_confs=1)

    # DEBUG: This may be necessary.
    molecule.SetTitle('MOL')

    # Create temporary directory for running antechamber.
    import tempfile
    tmpdir = tempfile.mkdtemp()
    prefix = 'molecule'
    input_mol2_filename = os.path.join(tmpdir, prefix + '.tripos.mol2')
    gaff_mol2_filename = os.path.join(tmpdir, prefix + '.gaff.mol2')
    frcmod_filename = os.path.join(tmpdir, prefix + '.frcmod')

    # Write Tripos mol2 file as antechamber input.
    _writeMolecule(molecule, input_mol2_filename)

    # Parameterize the molecule with antechamber.
    run_antechamber(template_name, input_mol2_filename, charge_method=None, net_charge=net_charge, gaff_mol2_filename=gaff_mol2_filename, frcmod_filename=frcmod_filename)

    # Read the resulting GAFF mol2 file as a ParmEd structure.
    from openeye import oechem
    ifs = oechem.oemolistream(gaff_mol2_filename)
    ifs.SetFlavor(oechem.OEFormat_MOL2, oechem.OEIFlavor_MOL2_DEFAULT | oechem.OEIFlavor_MOL2_M2H | oechem.OEIFlavor_MOL2_Forcefield)
    m2h = True
    oechem.OEReadMolecule(ifs, molecule)
    ifs.close()

    # If residue_atoms = None, add all atoms to the residues
    if residue_atoms == None:
        residue_atoms = [ atom for atom in molecule.GetAtoms() ]

    # Modify partial charges so that charge on residue atoms is integral.
    residue_charge = 0.0
    sum_of_absolute_charge = 0.0
    for atom in residue_atoms:
        charge = atom.GetPartialCharge()
        residue_charge += charge
        sum_of_absolute_charge += abs(charge)
    excess_charge = residue_charge - net_charge
    if sum_of_absolute_charge == 0.0:
        sum_of_absolute_charge = 1.0
    for atom in residue_atoms:
        charge = atom.GetPartialCharge()
        atom.SetPartialCharge( charge + excess_charge * (abs(charge) / sum_of_absolute_charge) )

    # Create residue template.
    template = ForceField._TemplateData(template_name)
    for (index, atom) in enumerate(molecule.GetAtoms()):
        atomname = atom.GetName()
        typename = atom.GetType()
        element = Element.getByAtomicNumber(atom.GetAtomicNum())
        charge = atom.GetPartialCharge()
        parameters = { 'charge' : charge }
        atom_template = ForceField._TemplateAtomData(atomname, typename, element, parameters)
        template.atoms.append(atom_template)
    for bond in molecule.GetBonds():
        if (bond.GetBgn() in residue_atoms) and (bond.GetEnd() in residue_atoms):
            template.addBondByName(bond.GetBgn().GetName(), bond.GetEnd().GetName())
        elif (bond.GetBgn() in residue_atoms) and (bond.GetEnd() not in residue_atoms):
            template.addExternalBondByName(bond.GetBgn().GetName())
        elif (bond.GetBgn() not in residue_atoms) and (bond.GetEnd() in residue_atoms):
            template.addExternalBondByName(bond.GetEnd().GetName())

    # Generate ffxml file contents for parmchk-generated frcmod output.
    leaprc = StringIO('parm = loadamberparams %s' % frcmod_filename)
    params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
    params = parmed.openmm.OpenMMParameterSet.from_parameterset(params)
    ffxml = StringIO()
    params.write(ffxml)

    return template, ffxml.getvalue()

Example 66

Project: sonospy Source File: sqlhtml.py
Function: accepts
    def accepts(
        self,
        request_vars,
        session=None,
        formname='%(tablename)s_%(record_id)s',
        keepvalues=False,
        onvalidation=None,
        dbio=True,
        ):
        """
        same as FORM.accepts but also does insert, update or delete in SQLDB.
        """

        keyed = hasattr(self.table,'_primarykey')
        if self.record:
            if keyed:
                formname_id = '.'.join([str(self.record[k]) for k in self.table._primarykey if hasattr(self.record,k)])
                record_id = dict([(k,request_vars[k]) for k in self.table._primarykey])
            else:
                (formname_id, record_id) = \
                    (self.record.id, request_vars.get('id', None))
            keepvalues = True
        else:
            if keyed:
                formname_id = 'create'
                record_id = dict([(k,None) for k in self.table._primarykey])
            else:
                (formname_id, record_id) = ('create', None)

        if not keyed and isinstance(record_id, (list, tuple)):
            record_id = record_id[0]

        if formname:
            formname = formname % dict(tablename = self.table._tablename,
                                       record_id = formname_id)

        # ## THIS IS FOR UNIQUE RECORDS, read IS_NOT_IN_DB

        for fieldname in self.fields:
            field = self.table[fieldname]
            requires = field.requires or []
            if not isinstance(requires, (list, tuple)):
                requires = [requires]
            [item.set_self_id(self.record_id) for item in requires
            if hasattr(item, 'set_self_id') and self.record_id]

        # ## END

        fields = {}
        for key in self.vars:
            fields[key] = self.vars[key]
        ret = FORM.accepts(
            self,
            request_vars,
            session,
            formname,
            keepvalues,
            onvalidation,
            )

        if not ret and self.record and self.errors:
            for key in self.errors.keys():
                if not request_vars.get(key, None) \
                        and not key == 'captcha' \
                        and self.table[key].type=='upload' \
                        and self.record[key] \
                        and not key+UploadWidget.ID_DELETE_SUFFIX in \
                            request_vars:
                    del self.errors[key]
            if not self.errors:
                ret = True

        requested_delete = \
            request_vars.get(self.FIELDNAME_REQUEST_DELETE, False)

        self.custom.end = TAG[''](self.hidden_fields(), self.custom.end)

        auch = record_id and self.errors and requested_delete

        # auch is true when user tries to delete a record
        # that does not pass validation, yet it should be deleted

        if not ret and not auch:
            for fieldname in self.fields:
                field = self.table[fieldname]                
                if fieldname in self.vars:
                    value = self.vars[fieldname]
                elif self.record:
                    value = self.record[fieldname]
                else:
                    value = self.table[fieldname].default
                #was value = request_vars[fieldname]
                if hasattr(field, 'widget') and field.widget\
                    and fieldname in request_vars:
                    self.trows[fieldname][1].components = \
                        [field.widget(field, value)]
                    self.trows[fieldname][1]._traverse(False)
            return ret

        if record_id and record_id != self.record_id:
            raise SyntaxError, 'user is tampering with form\'s record_id: ' \
                               '%s != %s' % (record_id, self.record_id)

        if requested_delete:
            if keyed:
                qry = reduce(lambda x,y: x & y, [self.table[k]==record_id[k] for k in self.table._primarykey])
                if self.table._db(qry).delete():
                    self.vars.update(record_id)
            else:
                self.table._db(self.table.id == self.record.id).delete()
                self.vars.id = self.record.id
            return True

        for fieldname in self.fields:
            if not fieldname in self.table:
                continue

            if not self.ignore_rw and not self.table[fieldname].writable:
                continue

            field = self.table[fieldname]
            if field.type == 'id':
                continue
            if field.type == 'boolean':
                if self.vars.get(fieldname, False):
                    self.vars[fieldname] = fields[fieldname] = True
                else:
                    self.vars[fieldname] = fields[fieldname] = False
            elif field.type == 'password' and self.record\
                and request_vars.get(fieldname, None) == \
                    PasswordWidget.DEFAULT_PASSWORD_DISPLAY:
                continue  # do not update if password was not changed
            elif field.type == 'upload':
                f = self.vars[fieldname]
                fd = fieldname + '__delete'
                if f == '' or f == None:
                    if self.vars.get(fd, False) or not self.record:
                        fields[fieldname] = ''
                    else:
                        fields[fieldname] = self.record[fieldname]
                    continue
                elif hasattr(f,'file'):
                    (source_file, original_filename) = (f.file, f.filename)
                elif isinstance(f, (str, unicode)):
                    ### do not know why this happens, it should not
                    (source_file, original_filename) = \
                        (cStringIO.StringIO(f), 'file.txt')
                newfilename = field.store(source_file, original_filename)
                self.vars['%s_newfilename' % fieldname] = \
                    fields[fieldname] = newfilename
                if field.uploadfield and not field.uploadfield==True:
                    fields[field.uploadfield] = source_file.read()
                continue
            elif fieldname in self.vars:
                fields[fieldname] = self.vars[fieldname]
            elif field.default == None and field.type!='blob':
                self.errors[fieldname] = 'no data'
                return False

            if field.type == 'integer':
                if fields[fieldname] != None:
                    fields[fieldname] = int(fields[fieldname])
            elif str(field.type).startswith('reference'):
                if fields[fieldname] != None and isinstance(self.table,Table) and not keyed:
                    fields[fieldname] = int(fields[fieldname])
            elif field.type == 'double':
                if fields[fieldname] != None:
                    fields[fieldname] = float(fields[fieldname])

        for fieldname in self.vars:
            if fieldname != 'id' and fieldname in self.table.fields\
                 and not fieldname in fields and not fieldname\
                 in request_vars:
                fields[fieldname] = self.vars[fieldname]

        if dbio:
            if keyed:
                if reduce(lambda x,y: x and y, record_id.values()): # if record_id
                    if fields:
                        qry = reduce(lambda x,y: x & y, [self.table[k]==self.record[k] for k in self.table._primarykey])
                        self.table._db(qry).update(**fields)
                else:
                    pk = self.table.insert(**fields)
                    if pk:
                        self.vars.update(pk)
                    else:
                        ret = False
            else:
                if record_id:
                    self.vars.id = self.record.id
                    if fields:
                        self.table._db(self.table.id == self.record.id).update(**fields)
                else:
                    self.vars.id = self.table.insert(**fields)

        return ret

Example 67

Project: edx-platform Source File: xml.py
    def load_course(self, course_dir, course_ids, tracker, target_course_id=None):
        """
        Load a course into this module store
        course_path: Course directory name

        returns a CourseDescriptor for the course
        """
        log.debug('========> Starting courselike import from %s', course_dir)
        with open(self.data_dir / course_dir / self.parent_xml) as course_file:

            # VS[compat]
            # TODO (cpennington): Remove this once all fall 2012 courses have
            # been imported into the cms from xml
            course_file = StringIO(clean_out_mako_templating(course_file.read()))

            course_data = etree.parse(course_file, parser=edx_xml_parser).getroot()

            org = course_data.get('org')

            if org is None:
                msg = ("No 'org' attribute set for courselike in {dir}. "
                       "Using default 'edx'".format(dir=course_dir))
                log.warning(msg)
                tracker(msg)
                org = 'edx'

            # Parent XML should be something like 'library.xml' or 'course.xml'
            courselike_label = self.parent_xml.split('.')[0]

            course = course_data.get(courselike_label)

            if course is None:
                msg = (
                    "No '{courselike_label}' attribute set for course in {dir}."
                    " Using default '{default}'".format(
                        courselike_label=courselike_label,
                        dir=course_dir,
                        default=course_dir
                    )
                )
                log.warning(msg)
                tracker(msg)
                course = course_dir

            url_name = course_data.get('url_name', course_data.get('slug'))

            if url_name:
                policy_dir = self.data_dir / course_dir / 'policies' / url_name
                policy_path = policy_dir / 'policy.json'

                policy = self.load_policy(policy_path, tracker)

                # VS[compat]: remove once courses use the policy dirs.
                if policy == {}:

                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:xml_load_course_policy_dir",
                            u"course:{}".format(course),
                        )
                    )

                    old_policy_path = self.data_dir / course_dir / 'policies' / '{0}.json'.format(url_name)
                    policy = self.load_policy(old_policy_path, tracker)
            else:
                policy = {}
                # VS[compat] : 'name' is deprecated, but support it for now...
                if course_data.get('name'):

                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:xml_load_course_course_data_name",
                            u"course:{}".format(course_data.get('course')),
                            u"org:{}".format(course_data.get('org')),
                            u"name:{}".format(course_data.get('name')),
                        )
                    )

                    url_name = Location.clean(course_data.get('name'))
                    tracker("'name' is deprecated for module xml.  Please use "
                            "display_name and url_name.")
                else:
                    url_name = None

            course_id = self.get_id(org, course, url_name)

            if course_ids is not None and course_id not in course_ids:
                return None

            def get_policy(usage_id):
                """
                Return the policy dictionary to be applied to the specified XBlock usage
                """
                return policy.get(policy_key(usage_id), {})

            services = {}
            if self.i18n_service:
                services['i18n'] = self.i18n_service

            if self.fs_service:
                services['fs'] = self.fs_service

            if self.user_service:
                services['user'] = self.user_service

            system = ImportSystem(
                xmlstore=self,
                course_id=course_id,
                course_dir=course_dir,
                error_tracker=tracker,
                load_error_modules=self.load_error_modules,
                get_policy=get_policy,
                mixins=self.xblock_mixins,
                default_class=self.default_class,
                select=self.xblock_select,
                field_data=self.field_data,
                services=services,
                target_course_id=target_course_id,
            )
            course_descriptor = system.process_xml(etree.tostring(course_data, encoding='unicode'))
            # If we fail to load the course, then skip the rest of the loading steps
            if isinstance(course_descriptor, ErrorDescriptor):
                return course_descriptor

            self.content_importers(system, course_descriptor, course_dir, url_name)

            log.debug('========> Done with courselike import from %s', course_dir)
            return course_descriptor

Example 68

Project: temoa Source File: pformat_results.py
def pformat_results ( pyomo_instance, pyomo_result ):
	from pyomo.core import Objective, Var, Constraint

	output = StringIO()

	m = pyomo_instance            # lazy typist
	result = pyomo_result

	soln = result['Solution']
	solv = result['Solver']      # currently unused, but may want it later
	prob = result['Problem']     # currently unused, but may want it later

	optimal_solutions = (
	  'feasible', 'globallyOptimal', 'locallyOptimal', 'optimal'
	)
	if str(soln.Status) not in optimal_solutions:
		output.write( 'No solution found.' )
		return output

	objs = m.active_components( Objective )
	if len( objs ) > 1:
		msg = '\nWarning: More than one objective.  Using first objective.\n'
		SE.write( msg )

	# This awkward workaround so as to be generic.  Unfortunately, I don't
	# know how else to automatically discover the objective name
	objs = objs.items()[0]
	obj_name, obj_value = objs[0], value( objs[1]() )

	Cons = soln.Constraint

	def collect_result_data( cgroup, clist, epsilon):
		# ctype = "Component group"; i.e., Vars or Cons
		# clist = "Component list"; i.e., where to store the data
		# epsilon = absolute value below which to ignore a result
		results = defaultdict(list)
		for name, data in cgroup.iteritems():
			if not (abs( data['Value'] ) > epsilon ): continue

			# name looks like "Something[some,index]"
			group, index = name[:-1].split('[')
			results[ group ].append( (name.replace("'", ''), data['Value']) )
		clist.extend( t for i in sorted( results ) for t in sorted(results[i]))

	svars = defaultdict( lambda: defaultdict( float ))    # "solved" vars
	psvars = defaultdict( lambda: defaultdict( float ))   # "post-solve" vars
	con_info = list()

	epsilon = 1e-9   # threshold for "so small it's zero"

	emission_keys = { (i, t, v, o) : e for e, i, t, v, o in m.EmissionActivity }
	P_0 = min( m.time_optimize )
	GDR = value( m.GlobalDiscountRate )
	MLL = m.ModelLoanLife
	MPL = m.ModelProcessLife
	x   = 1 + GDR    # convenience variable, nothing more

	for p, s, d, t, v in m.V_Activity:
		val = value( m.V_Activity[p, s, d, t, v] )
		if abs(val) < epsilon: continue

		svars['V_Activity'][p, s, d, t, v] = val

	for p, t, v in m.V_ActivityByPeriodAndProcess:
		val = value( m.V_ActivityByPeriodAndProcess[p, t, v] )
		if abs(val) < epsilon: continue

		svars['V_ActivityByPeriodAndProcess'][p, t, v] = val

	for t, v in m.V_Capacity:
		val = value( m.V_Capacity[t, v] )
		if abs(val) < epsilon: continue

		svars['V_Capacity'][t, v] = val

	for p, t in m.V_CapacityAvailableByPeriodAndTech:
		val = value( m.V_CapacityAvailableByPeriodAndTech[p, t] )
		if abs(val) < epsilon: continue

		svars['V_CapacityAvailableByPeriodAndTech'][p, t] = val

	for p, s, d, i, t, v, o in m.V_FlowIn:
		val = value( m.V_FlowIn[p, s, d, i, t, v, o] )
		if abs(val) < epsilon: continue

		svars['V_FlowIn'][p, s, d, i, t, v, o] = val

		psvars['V_EnergyConsumptionByTech'               ][ t ]     += val
		psvars['V_EnergyConsumptionByPeriodAndTech'      ][p, t]    += val
		psvars['V_EnergyConsumptionByTechAndOutput'      ][t, o]    += val
		psvars['V_EnergyConsumptionByPeriodAndProcess'   ][p, t, v] += val
		psvars['V_EnergyConsumptionByPeriodInputAndTech' ][p, i, t] += val
		psvars['V_EnergyConsumptionByPeriodTechAndOutput'][p, t, o] += val

	for p, s, d, i, t, v, o in m.V_FlowOut:
		val = value( m.V_FlowOut[p, s, d, i, t, v, o] )
		if abs(val) < epsilon: continue

		svars['V_FlowOut'][p, s, d, i, t, v, o] = val
		psvars['V_ActivityByInputAndTech'          ][i, t]       += val
		psvars['V_ActivityByPeriodAndTech'         ][p, t]       += val
		psvars['V_ActivityByTechAndOutput'         ][t, o]       += val
		psvars['V_ActivityByProcess'               ][t, v]       += val
		psvars['V_ActivityByPeriodInputAndTech'    ][p, i, t]    += val
		psvars['V_ActivityByPeriodTechAndOutput'   ][p, t, o]    += val
		psvars['V_ActivityByPeriodInputAndProcess' ][p, i, t, v] += val
		psvars['V_ActivityByPeriodProcessAndOutput'][p, t, v, o] += val

		if (i, t, v, o) not in emission_keys: continue

		e = emission_keys[i, t, v, o]
		evalue = val * m.EmissionActivity[e, i, t, v, o]

		psvars[ 'V_EmissionActivityByPeriod'        ][ p ]  += evalue
		psvars[ 'V_EmissionActivityByTech'          ][ t ]  += evalue
		psvars[ 'V_EmissionActivityByPeriodAndTech' ][p, t] += evalue
		psvars[ 'V_EmissionActivityByProcess'       ][t, v] += evalue

	for t, v in m.CostInvest.sparse_iterkeys():
		# CostInvest guaranteed not 0

		icost = value( m.V_Capacity[t, v] )
		if abs(icost) < epsilon: continue

		icost *= value( m.CostInvest[t, v] )
		psvars[ 'V_UndiscountedInvestmentByPeriod'  ][ v ]  += icost
		psvars[ 'V_UndiscountedInvestmentByTech'    ][ t ]  += icost
		psvars[ 'V_UndiscountedInvestmentByProcess' ][t, v] += icost
		psvars[ 'V_UndiscountedPeriodCost'          ][ v ]  += icost


		icost *= value( m.LoanAnnualize[t, v] )
		icost *= (
		  value( MLL[t, v] ) if not GDR else
		    (x **(P_0 - v + 1) * (1 - x **(-value( MLL[t, v] ))) / GDR)
		)

		psvars[ 'V_DiscountedInvestmentByPeriod'  ][ v ]  += icost
		psvars[ 'V_DiscountedInvestmentByTech'    ][ t ]  += icost
		psvars[ 'V_DiscountedInvestmentByProcess' ][t, v] += icost
		psvars[ 'V_DiscountedPeriodCost'          ][ v ]  += icost

	for p, t, v in m.CostFixed.sparse_iterkeys():
		fcost = value( m.V_Capacity[t, v] )
		if abs(fcost) < epsilon: continue

		fcost *= value( m.CostFixed[p, t, v] )
		psvars[ 'V_UndiscountedFixedCostsByPeriod'  ][ p ]  += fcost
		psvars[ 'V_UndiscountedFixedCostsByTech'    ][ t ]  += fcost
		psvars[ 'V_UndiscountedFixedCostsByVintage' ][ v ]  += fcost
		psvars[ 'V_UndiscountedFixedCostsByProcess' ][t, v] += fcost
		psvars[ 'V_UndiscountedFixedCostsByPeriodAndProcess' ][p, t, v] = fcost
		psvars[ 'V_UndiscountedPeriodCost'          ][ p ]  += fcost

		fcost *= (
		  value( MPL[p, t, v] ) if not GDR else
		    (x **(P_0 - p + 1) * (1 - x **(-value( MPL[p, t, v] ))) / GDR)
		)

		psvars[ 'V_DiscountedFixedCostsByPeriod'  ][ p ]  += fcost
		psvars[ 'V_DiscountedFixedCostsByTech'    ][ t ]  += fcost
		psvars[ 'V_DiscountedFixedCostsByVintage' ][ v ]  += fcost
		psvars[ 'V_DiscountedFixedCostsByProcess' ][t, v] += fcost
		psvars[ 'V_DiscountedFixedCostsByPeriodAndProcess' ][p, t, v] = fcost
		psvars[ 'V_DiscountedPeriodCost'          ][ p ]  += fcost

	for p, t, v in m.CostVariable.sparse_iterkeys():
		vcost = value( m.V_ActivityByPeriodAndProcess[p, t, v] )
		if abs(vcost) < epsilon: continue

		vcost *= value( m.CostVariable[p, t, v] )
		psvars[ 'V_UndiscountedVariableCostsByPeriod'  ][ p ]  += vcost
		psvars[ 'V_UndiscountedVariableCostsByTech'    ][ t ]  += vcost
		psvars[ 'V_UndiscountedVariableCostsByVintage' ][ v ]  += vcost
		psvars[ 'V_UndiscountedVariableCostsByProcess' ][t, v] += vcost
		psvars[ 'V_UndiscountedVariableCostsByPeriodAndProcess' ][p, t, v] = vcost
		psvars[ 'V_UndiscountedPeriodCost'             ][ p ]  += vcost

		vcost *= value( m.PeriodRate[ p ])
		psvars[ 'V_DiscountedVariableCostsByPeriod'  ][ p ]  += vcost
		psvars[ 'V_DiscountedVariableCostsByTech'    ][ t ]  += vcost
		psvars[ 'V_DiscountedVariableCostsByVintage' ][ v ]  += vcost
		psvars[ 'V_DiscountedVariableCostsByProcess' ][t, v] += vcost
		psvars[ 'V_DiscountedPeriodCost'             ][ p ]  += vcost

	collect_result_data( Cons, con_info, epsilon=1e-9 )

	msg = ( 'Model name: %s\n'
	   'Objective function value (%s): %s\n'
	   'Non-zero variable values:\n'
	)
	output.write( msg % (m.name, obj_name, obj_value) )

	def make_var_list ( variables ):
		var_list = []
		for vgroup, values in sorted( variables.iteritems() ):
			for vindex, val in sorted( values.iteritems() ):
				if isinstance( vindex, tuple ):
					vindex = ','.join( str(i) for i in vindex )
				var_list.append(( '{}[{}]'.format(vgroup, vindex), val ))
		return var_list

	if svars:
		stringify_data( make_var_list(svars), output )
	else:
		output.write( '\nAll variables have a zero (0) value.\n' )

	if psvars:
		output.write('\n"Reporting Variables" (calculated after solve)\n')
		stringify_data( make_var_list(psvars), output )

	if len( con_info ) > 0:
		output.write( '\nBinding constraint values:\n' )
		stringify_data( con_info, output )
		del con_info
	else:
		# Since not all Coopr solvers give constraint results, must check
		msg = '\nSelected Coopr solver plugin does not give constraint data.\n'
		output.write( msg )

	output.write( '\n\nIf you use these results for a published article, '
	  "please run Temoa with the '--how_to_cite' command line argument for "
	  'citation information.\n')

	return output

Example 69

Project: FriendlyTorrent Source File: track.py
    def get_infopage(self):
        try:
            if not self.config['show_infopage']:
                return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
            red = self.config['infopage_redirect']
            if red:
                return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
                        '<A HREF="'+red+'">Click Here</A>')
            
            s = StringIO()
            s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
                '<html><head><title>BitTorrent download info</title>\n')
            if self.favicon is not None:
                s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
            s.write('</head>\n<body>\n' \
                '<h3>BitTorrent download info</h3>\n'\
                '<ul>\n'
                '<li><strong>tracker version:</strong> %s</li>\n' \
                '<li><strong>server time:</strong> %s</li>\n' \
                '</ul>\n' % (version, isotime()))
            if self.config['allowed_dir']:
                if self.show_names:
                    names = [ (self.allowed[hash]['name'],hash)
                              for hash in self.allowed.keys() ]
                else:
                    names = [ (None,hash)
                              for hash in self.allowed.keys() ]
            else:
                names = [ (None,hash) for hash in self.downloads.keys() ]
            if not names:
                s.write('<p>not tracking any files yet...</p>\n')
            else:
                names.sort()
                tn = 0
                tc = 0
                td = 0
                tt = 0  # Total transferred
                ts = 0  # Total size
                nf = 0  # Number of files displayed
                if self.config['allowed_dir'] and self.show_names:
                    s.write('<table summary="files" border="1">\n' \
                        '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
                else:
                    s.write('<table summary="files">\n' \
                        '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
                for name,hash in names:
                    l = self.downloads[hash]
                    n = self.completed.get(hash, 0)
                    tn = tn + n
                    c = self.seedcount[hash]
                    tc = tc + c
                    d = len(l) - c
                    td = td + d
                    if self.config['allowed_dir'] and self.show_names:
                        if self.allowed.has_key(hash):
                            nf = nf + 1
                            sz = self.allowed[hash]['length']  # size
                            ts = ts + sz
                            szt = sz * n   # Transferred for this torrent
                            tt = tt + szt
                            if self.allow_get == 1:
                                linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>'
                            else:
                                linkname = name
                            s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
                                % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt)))
                    else:
                        s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
                            % (b2a_hex(hash), c, d, n))
                if self.config['allowed_dir'] and self.show_names:
                    s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n'
                            % (nf, size_format(ts), tc, td, tn, size_format(tt)))
                else:
                    s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td></tr>\n'
                            % (nf, tc, td, tn))
                s.write('</table>\n' \
                    '<ul>\n' \
                    '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
                    '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
                    '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
                    '<li><em>downloaded:</em> reported complete downloads</li>\n' \
                    '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
                    '</ul>\n')

            s.write('</body>\n' \
                '</html>\n')
            return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
        except:
            print_exc()
            return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')

Example 70

Project: simplemonitor Source File: file.py
    def process_batch(self):
        """Save the HTML file."""
        ok_count = 0
        fail_count = 0
        old_count = 0
        remote_count = 0

        try:
            my_host = socket.gethostname().split(".")[0]
        except:
            my_host = socket.gethostname()

        try:
            temp_file = tempfile.mkstemp()
            file_handle = os.fdopen(temp_file[0], "w")
            file_name = temp_file[1]
        except:
            sys.stderr.write("Couldn't create temporary file for HTML output\n")
            return

        output_ok = cStringIO.StringIO()
        output_fail = cStringIO.StringIO()

        keys = self.batch_data.keys()
        keys.sort()
        for entry in keys:
            if self.batch_data[entry]["age"] > 120:
                status = "OLD"
                old_count += 1
            elif self.batch_data[entry]["status"]:
                status = "OK"
                ok_count += 1
            else:
                status = "FAIL"
                fail_count += 1
            if self.batch_data[entry]["host"] != my_host:
                remote_count += 1
            try:
                monitor_name = entry.split("/")[1]
            except:
                monitor_name = entry
            if status == "FAIL":
                output = output_fail
            else:
                output = output_ok
            output.write("<tr class=\"%srow\">" % status.lower())
            output.write("""
            <td class="monitor_name">%s</td>
            <td class="status %s">%s</td>
            <td>%s</td>
            <td>%s</td>
            """ % (
                monitor_name,
                status.lower(), status,
                self.batch_data[entry]["host"],
                self.batch_data[entry]["fail_time"],
            )
            )
            if self.batch_data[entry]["fail_count"] == 0:
                output.write("<td class=\"vfc\">&nbsp;</td>")
            else:
                output.write("<td class=\"vfc\">%s</td>" % self.batch_data[entry]["fail_count"])
            try:
                output.write("<td>%d+%02d:%02d:%02d</td>" % (self.batch_data[entry]["downtime"][0], self.batch_data[entry]["downtime"][1], self.batch_data[entry]["downtime"][2], self.batch_data[entry]["downtime"][3]))
            except:
                output.write("<td>&nbsp;</td>")
            output.write("<td>%s &nbsp;</td>" % (self.batch_data[entry]["fail_data"]))
            if self.batch_data[entry]["failures"] == 0:
                output.write("<td></td><td></td>")
            else:
                output.write("""<td>%s</td>
                <td>%s</td>""" % (
                    self.batch_data[entry]["failures"],
                    self.format_datetime(self.batch_data[entry]["last_failure"])
                )
                )
            if self.batch_data[entry]["host"] == my_host:
                output.write("<td></td>")
            else:
                output.write("<td>%d</td>" % self.batch_data[entry]["age"])
            output.write("</tr>\n")
        count_data = "<div id=\"summary\""
        if old_count > 0:
            cls = "old"
        elif fail_count > 0:
            cls = "fail"
        else:
            cls = "ok"

        count_data = count_data + " class=\"%s\">%s" % (cls, cls.upper())
        self.count_data = count_data + "<div id=\"details\"><span class=\"ok\">%d OK</span> <span class=\"fail\">%d FAIL</span> <span class=\"old\">%d OLD</span> <span class=\"remote\">%d remote</span></div></div>" % (ok_count, fail_count, old_count, remote_count)

        self.status = cls.upper()

        with open(os.path.join(self.folder, self.header), "r") as file_input:
            file_handle.writelines(self.parse_file(file_input))

        file_handle.write(output_fail.getvalue())
        file_handle.write(output_ok.getvalue())

        with open(os.path.join(self.folder, self.footer), "r") as file_input:
            file_handle.writelines(self.parse_file(file_input))

        try:
            file_handle.flush()
            file_handle.close()
            os.chmod(file_name, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IROTH)
            shutil.move(file_name, os.path.join(self.folder, self.filename))
        except Exception, e:
            print "problem closing temporary file for HTML output", e

Example 71

Project: p2ptv-pi Source File: WebUI.py
    def doget(self, urlpath):
        if not urlpath.startswith(URLPATH_WEBIF_PREFIX):
            return streaminfo404()
        else:
            self.lastreqtime = time.time()
            try:
                fakeurl = 'http://127.0.0.1' + urlpath[len(URLPATH_WEBIF_PREFIX):]
                if DEBUG:
                    log('webui::doget: fakeurl', fakeurl)
                request_url = urlparse.urlparse(fakeurl)
            except:
                print_exc()
                return

            path = request_url[2]
            query_string = request_url[4]
            query_params = urlparse.parse_qs(query_string)
            if DEBUG:
                log('webui::doget: urlpath', urlpath, 'request_url', request_url, 'path', path, 'query_params', query_params)
            if len(path) == 0:
                if DEBUG:
                    log('webui::doget: show status page')
                page = self.statusPage()
                pageStream = StringIO(page)
                return {'statuscode': 200,
                 'mimetype': 'text/html',
                 'stream': pageStream,
                 'length': len(page)}
            if path == 'permid.js':
                try:
                    permid = encodestring(self.bgApp.s.get_permid()).replace('\n', '')
                    txt = "var permid = '%s';" % permid
                    dataStream = StringIO(txt)
                except:
                    print_exc()
                    return {'statuscode': 500,
                     'statusmsg': 'Bad permid'}

                return {'statuscode': 200,
                 'mimetype': 'text/javascript',
                 'stream': dataStream,
                 'length': len(txt)}
            if path == '/createstream':
                if DEBUG:
                    log('webui::doget: show create stream page')
                page = self.createStreamPage()
                pageStream = StringIO(page)
                return {'statuscode': 200,
                 'mimetype': 'text/html',
                 'stream': pageStream,
                 'length': len(page)}
            if path == '/dispatch':
                if 'url' not in query_params:
                    if DEBUG:
                        log('webui::doget:dispatch: missing url')
                    return streaminfo404()
                url = query_params['url'][0]
                redirect_url = 'http://127.0.0.1:6878/webui/' + url
                params = []
                for name, val in query_params.iteritems():
                    if name != 'url':
                        params.append(urllib.quote_plus(name) + '=' + urllib.quote_plus(val[0]))

                if len(params):
                    redirect_url += '?' + '&'.join(params)
                if DEBUG:
                    log('webui::doget:dispatch: redirect_url', redirect_url)
                page = '<!DOCTYPE html><html><head><script type="text/javascript">'
                page += 'parent.location.href = "' + redirect_url + '";'
                page += '</script></head><body></body></html>'
                pageStream = StringIO(page)
                return {'statuscode': 200,
                 'mimetype': 'text/html',
                 'stream': pageStream,
                 'length': len(page)}
            if path.startswith('/player/') and query_params.has_key('a') and query_params['a'][0] == 'check':
                player_id = path.split('/')[2]
                redirect_url = 'http://127.0.0.1:6878/webui/player/' + player_id
                params = []
                for name, val in query_params.iteritems():
                    if name != 'a':
                        params.append(urllib.quote_plus(name) + '=' + urllib.quote_plus(val[0]))

                if len(params):
                    redirect_url += '?' + '&'.join(params)
                if DEBUG:
                    log('webui::doget:dispatch: redirect_url', redirect_url)
                page = '<!DOCTYPE html><html><head><script type="text/javascript">'
                page += 'parent.location.href = "' + redirect_url + '";'
                page += '</script></head><body></body></html>'
                pageStream = StringIO(page)
                return {'statuscode': 200,
                 'mimetype': 'text/html',
                 'stream': pageStream,
                 'length': len(page)}
            if path.startswith('/player/'):
                player_id = path.split('/')[2]
                if DEBUG:
                    log('webui::doget: show player page: id', player_id)
                params = {}
                for name, val in query_params.iteritems():
                    params[name] = val[0]

                page = self.playerPage(player_id, params)
                pageStream = StringIO(page)
                return {'statuscode': 200,
                 'mimetype': 'text/html',
                 'stream': pageStream,
                 'length': len(page)}
            static_path = None
            json_query = None
            if path.startswith('/json/'):
                json_query = request_url[4]
            else:
                static_path = os.path.join(self.webUIPath, path[1:])
            if DEBUG:
                log('webui::doget: request parsed: static_path', static_path, 'json_query', json_query)
            if static_path is not None:
                if not os.path.isfile(static_path):
                    if DEBUG:
                        log('webui::doget: file not found:', static_path)
                    return streaminfo404()
                extension = os.path.splitext(static_path)[1]
                if extension in self.binaryExtensions:
                    mode = 'rb'
                else:
                    mode = 'r'
                fp = open(static_path, mode)
                data = fp.read()
                fp.close()
                dataStream = StringIO(data)
                return {'statuscode': 200,
                 'mimetype': self.getContentType(extension),
                 'stream': dataStream,
                 'length': len(data)}
            if json_query is not None:
                params = {}
                for s in json_query.split('&'):
                    name, value = s.split('=')
                    params[name] = value

                if DEBUG:
                    log('webui:doget: got json request:', json_query, 'params', params)
                if 'q' not in params:
                    return
                try:
                    req = urllib.unquote(params['q'])
                    if DEBUG:
                        log('webui::doget: parse json: req', req)
                    jreq = json.loads(req)
                    if DEBUG:
                        log('webui::doget: parse json done: jreq', jreq)
                except:
                    print_exc()
                    return

                try:
                    method = jreq['method']
                except:
                    return {'statuscode': 504,
                     'statusmsg': 'Json request in wrong format! At least a method has to be specified!'}

                try:
                    args = jreq['arguments']
                    if DEBUG:
                        print >> sys.stderr, 'webUI: Got JSON request: ', jreq, '; method: ', method, '; arguments: ', args
                except:
                    args = None
                    if DEBUG:
                        print >> sys.stderr, 'webUI: Got JSON request: ', jreq, '; method: ', method

                if args is None:
                    data = self.process_json_request(method)
                    if DEBUG:
                        print >> sys.stderr, 'WebUI: response to JSON ', method, ' request: ', data
                else:
                    data = self.process_json_request(method, args)
                    if DEBUG:
                        print >> sys.stderr, 'WebUI: response to JSON ', method, ' request: ', data, ' arguments: ', args
                if data == 'Args missing':
                    return {'statuscode': 504,
                     'statusmsg': 'Json request in wrong format! Arguments have to be specified!'}
                dataStream = StringIO(data)
                return {'statuscode': 200,
                 'mimetype': 'application/json',
                 'stream': dataStream,
                 'length': len(data)}
            if DEBUG:
                log('webui::doget: unknow request format: request_url', request_url)
            return streaminfo404()

Example 72

Project: PyBabe Source File: base.py
def push(instream, filename=None, filename_template = None, directory = None, stream = None, format=None, encoding=None, protocol=None, compress=None, stream_dict=None, **kwargs):
    outstream = None
    compress_format = None
    fileExtension = None
    fileBaseName = None
    to_close = []


    ## Guess format from file extensions .. 
    filename_for_guess = filename if filename else filename_template

    if filename_for_guess: 
        fileBaseName, fileExtension = split_ext(filename_for_guess) 

    if fileExtension in BabeBase.pushCompressExtensions:
        if not compress_format:
            compress_format = BabeBase.pushCompressExtensions[fileExtension]
        fileBaseName, fileExtension = split_ext(fileBaseName)

    if not format and fileExtension in BabeBase.pushExtensions:
        format = BabeBase.pushExtensions[fileExtension] 
            
    if not format: 
        format = "csv"
    
    if not format in BabeBase.pushFormats: 
        raise Exception('Unsupported format %s' % format) 
    if compress_format and not compress_format in BabeBase.pushCompressFormats:
        raise Exception('Unsupported compression format %s' % compress_format)
                
    if protocol and not (protocol in BabeBase.pushProtocols):
        raise Exception('Unsupported protocol %s' % protocol)

    if protocol and kwargs.get('protocol_early_check', True):
        early_check = BabeBase.pushProtocols[protocol][0]
        if early_check:
            early_check(**kwargs)

    if filename: 
        if protocol and kwargs.get("ignore_if_exists", False):
            check_exists = BabeBase.pushProtocols[protocol][2]
            if check_exists:
                if check_exists(filename, **kwargs):
                    logging.info("Skipping push for existing file %s" %  filename)
                    return 

    it = iter(instream)
    while True:
        this_filename = None
        try: 
            header = it.next()
        except StopIteration: 
            break 

        if not filename and filename_template:
            d = header.__dict__.copy()
            if header.partition:
                d.update(header.partition)
            this_filename = Template(filename_template).substitute(d)

        if directory and filename:
            this_filename = os.path.join(directory, this_filename if this_filename else filename)

        if this_filename == None:
            this_filename = filename 

        # If external protocol or compression, write to a temporary file. 
        if protocol or compress_format:
            outstream = tempfile.NamedTemporaryFile()
            to_close.append(outstream)
        elif stream_dict != None: 
            n = filename if filename else header.get_stream_name()
            if not n  in stream_dict:
                stream_dict[n] = StringIO()
            outstream = stream_dict[n]
        elif stream: 
            outstream = stream
        else: 
            outstream = open(this_filename, 'wb')
            to_close.append(outstream)
            
        # Actually write the file. 
        BabeBase.pushFormats[format](format, header, it, outstream, encoding, **kwargs)
        outstream.flush()
        
        if compress_format:
            # Apply file compression. If output protocol, use a temporary file name 
            if protocol:
                n = tempfile.NamedTemporaryFile()
                compress_file = n.name
            else:
                compress_file = this_filename
            name_in_archive = os.path.splitext(os.path.basename(this_filename))[0] + '.' + format
            BabeBase.pushCompressFormats[compress_format](compress_file, outstream.name, name_in_archive)
            if protocol:
                outstream = n 
                
        # Apply protocol 
        if protocol:
            BabeBase.pushProtocols[protocol][1](outstream.name, this_filename, **kwargs)
        
        for s in to_close:
            s.close()

Example 73

Project: teuthology Source File: syslog.py
@contextlib.contextmanager
def syslog(ctx, config):
    """
    start syslog / stop syslog on exit.
    """
    if ctx.archive is None:
        # disable this whole feature if we're not going to archive the data
        # anyway
        yield
        return

    log.info('Starting syslog monitoring...')

    archive_dir = misc.get_archive_dir(ctx)
    log_dir = '{adir}/syslog'.format(adir=archive_dir)
    run.wait(
        ctx.cluster.run(
            args=['mkdir', '-p', '-m0755', '--', log_dir],
            wait=False,
        )
    )

    CONF = '/etc/rsyslog.d/80-cephtest.conf'
    kern_log = '{log_dir}/kern.log'.format(log_dir=log_dir)
    misc_log = '{log_dir}/misc.log'.format(log_dir=log_dir)
    conf_lines = [
        'kern.* -{kern_log};RSYSLOG_FileFormat'.format(kern_log=kern_log),
        '*.*;kern.none -{misc_log};RSYSLOG_FileFormat'.format(
            misc_log=misc_log),
    ]
    conf_fp = StringIO('\n'.join(conf_lines))
    try:
        for rem in ctx.cluster.remotes.iterkeys():
            log_context = 'system_u:object_r:var_log_t:s0'
            for log_path in (kern_log, misc_log):
                rem.run(args='touch %s' % log_path)
                rem.chcon(log_path, log_context)
            misc.sudo_write_file(
                remote=rem,
                path=CONF,
                data=conf_fp,
            )
            conf_fp.seek(0)
        run.wait(
            ctx.cluster.run(
                args=[
                    'sudo',
                    'service',
                    # a mere reload (SIGHUP) doesn't seem to make
                    # rsyslog open the files
                    'rsyslog',
                    'restart',
                ],
                wait=False,
            ),
        )

        yield
    finally:
        log.info('Shutting down syslog monitoring...')

        run.wait(
            ctx.cluster.run(
                args=[
                    'sudo',
                    'rm',
                    '-f',
                    '--',
                    CONF,
                    run.Raw('&&'),
                    'sudo',
                    'service',
                    'rsyslog',
                    'restart',
                ],
                wait=False,
            ),
        )
        # race condition: nothing actually says rsyslog had time to
        # flush the file fully. oh well.

        log.info('Checking logs for errors...')
        for rem in ctx.cluster.remotes.iterkeys():
            log.debug('Checking %s', rem.name)
            r = rem.run(
                args=[
                    'egrep', '--binary-files=text',
                    '\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
                    run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
                    run.Raw('|'),
                    'grep', '-v', 'task .* blocked for more than .* seconds',
                    run.Raw('|'),
                    'grep', '-v', 'lockdep is turned off',
                    run.Raw('|'),
                    'grep', '-v', 'trying to register non-static key',
                    run.Raw('|'),
                    'grep', '-v', 'DEBUG: fsize',  # xfs_fsr
                    run.Raw('|'),
                    'grep', '-v', 'CRON',  # ignore cron noise
                    run.Raw('|'),
                    'grep', '-v', 'BUG: bad unlock balance detected',  # #6097
                    run.Raw('|'),
                    'grep', '-v', 'inconsistent lock state',  # FIXME see #2523
                    run.Raw('|'),
                    'grep', '-v', '*** DEADLOCK ***',  # part of lockdep output
                    run.Raw('|'),
                    'grep', '-v',
                    # FIXME see #2590 and #147
                    'INFO: possible irq lock inversion dependency detected',
                    run.Raw('|'),
                    'grep', '-v',
                    'INFO: NMI handler (perf_event_nmi_handler) took too long to run',  # noqa
                    run.Raw('|'),
                    'grep', '-v', 'INFO: recovery required on readonly',
                    run.Raw('|'),
                    'grep', '-v', 'ceph-create-keys: INFO',
                    run.Raw('|'),
                    'head', '-n', '1',
                ],
                stdout=StringIO(),
            )
            stdout = r.stdout.getvalue()
            if stdout != '':
                log.error('Error in syslog on %s: %s', rem.name, stdout)
                set_status(ctx.summary, 'fail')
                if 'failure_reason' not in ctx.summary:
                    ctx.summary['failure_reason'] = \
                        "'{error}' in syslog".format(error=stdout)

        log.info('Compressing syslogs...')
        run.wait(
            ctx.cluster.run(
                args=[
                    'find',
                    '{adir}/syslog'.format(adir=archive_dir),
                    '-name',
                    '*.log',
                    '-print0',
                    run.Raw('|'),
                    'sudo',
                    'xargs',
                    '-0',
                    '--no-run-if-empty',
                    '--',
                    'gzip',
                    '--',
                ],
                wait=False,
            ),
        )

Example 74

Project: securedrop Source File: test_unit_integration.py
    def helper_test_reply(self, test_reply, expected_success=True):
        test_msg = "This is a test message."

        with self.source_app as source_app:
            rv = source_app.get('/generate')
            rv = source_app.post('/create', follow_redirects=True)
            codename = session['codename']
            sid = g.sid
            # redirected to submission form
            rv = source_app.post('/submit', data=dict(
                msg=test_msg,
                fh=(StringIO(''), ''),
            ), follow_redirects=True)
            self.assertEqual(rv.status_code, 200)
            self.assertFalse(g.source.flagged)
            common.logout(source_app)

        rv = self.journalist_app.get('/')
        self.assertEqual(rv.status_code, 200)
        self.assertIn("Sources", rv.data)
        soup = BeautifulSoup(rv.data)
        col_url = soup.select('ul#cols > li a')[0]['href']

        rv = self.journalist_app.get(col_url)
        self.assertEqual(rv.status_code, 200)

        with self.source_app as source_app:
            rv = source_app.post('/login', data=dict(
                codename=codename), follow_redirects=True)
            self.assertEqual(rv.status_code, 200)
            self.assertFalse(g.source.flagged)
            common.logout(source_app)

        with self.journalist_app as journalist_app:
            rv = journalist_app.post('/flag', data=dict(
                sid=sid))
            self.assertEqual(rv.status_code, 200)

        with self.source_app as source_app:
            rv = source_app.post('/login', data=dict(
                codename=codename), follow_redirects=True)
            self.assertEqual(rv.status_code, 200)
            self.assertTrue(g.source.flagged)
            source_app.get('/lookup')
            self.assertTrue(g.source.flagged)
            common.logout(source_app)

        # Block until the reply keypair has been generated, so we can test
        # sending a reply
        _block_on_reply_keypair_gen(codename)

        # Create 2 replies to test deleting on journalist and source interface
        for i in range(2):
            rv = self.journalist_app.post('/reply', data=dict(
                sid=sid,
                msg=test_reply
            ), follow_redirects=True)
            self.assertEqual(rv.status_code, 200)

        if not expected_success:
            pass
        else:
            self.assertIn("Thanks! Your reply has been stored.", rv.data)

        with self.journalist_app as journalist_app:
            rv = journalist_app.get(col_url)
            self.assertIn("reply-", rv.data)

        soup = BeautifulSoup(rv.data)

        # Download the reply and verify that it can be decrypted with the
        # journalist's key as well as the source's reply key
        sid = soup.select('input[name="sid"]')[0]['value']
        checkbox_values = [
            soup.select('input[name="doc_names_selected"]')[1]['value']]
        rv = self.journalist_app.post('/bulk', data=dict(
            sid=sid,
            action='download',
            doc_names_selected=checkbox_values
        ), follow_redirects=True)
        self.assertEqual(rv.status_code, 200)

        zf = zipfile.ZipFile(StringIO(rv.data), 'r')
        data = zf.read(zf.namelist()[0])
        self._can_decrypt_with_key(data, config.JOURNALIST_KEY)
        self._can_decrypt_with_key(data, crypto_util.getkey(sid), codename)

        # Test deleting reply on the journalist interface
        last_reply_number = len(
            soup.select('input[name="doc_names_selected"]')) - 1
        self.helper_filenames_delete(soup, last_reply_number)

        with self.source_app as source_app:
            rv = source_app.post('/login', data=dict(codename=codename),
                                 follow_redirects=True)
            self.assertEqual(rv.status_code, 200)
            rv = source_app.get('/lookup')
            self.assertEqual(rv.status_code, 200)

            if not expected_success:
                # there should be no reply
                self.assertNotIn("You have received a reply.", rv.data)
            else:
                self.assertIn(
                    "You have received a reply. For your security, please delete all replies when you're done with them.",
                    rv.data)
                self.assertIn(test_reply, rv.data)
                soup = BeautifulSoup(rv.data)
                msgid = soup.select(
                    'form.message > input[name="reply_filename"]')[0]['value']
                rv = source_app.post('/delete', data=dict(
                    sid=sid,
                    reply_filename=msgid
                ), follow_redirects=True)
                self.assertEqual(rv.status_code, 200)
                self.assertIn("Reply deleted", rv.data)

                # Make sure the reply is deleted from the filesystem
                self._wait_for(
                    lambda: self.assertFalse(
                        os.path.exists(
                            store.path(
                                sid,
                                msgid))))

                common.logout(source_app)

Example 75

Project: cstar_perf Source File: cluster_api.py
@sockets.route('/api/cluster_comms')
def cluster_comms(ws):
    """Websocket to communicate with the test clusters

    Commands are logical actions one end of the socket wishes the
    other end to take. Responses are follow ups to a command, which
    there can be multiple, back and forth between client and server
    until the receiving end marks a response as Done.

    Command structure:
     {type:'command',
      command_id:'some unique string for this command',
      message:'some message to the other end',
      action:'some action for the receiver to take',
      // Extra parameters:
      foo: ...,
      bar: ...,
     }

    Response structure:
     {type:'response',
      command_id:'the command id this is a response to',
      message:'some message to the other end',
      done: true/false (the responder considers the command complete)
      // Extra parameters:
      foo: ...,
      bar: ...,
     }

    Possible commands:
      * authenticate - server asks client to authenticate
      * get_work - client asks for a test
      * test_done - client is done with a test, and sending artifacts
      * cancel_test - server asks client to cancel test
      * shutdown - server asks client to shutdown service

    Protocol:
     Authentication:
      * client initiates connection to this server
      * server sends client a random challenge token
        {type:'command', command_id='zzzz', action:'authenticate', token:'xxxxxxx'}
      * client signs challenge token with it's private key ands sends the signature
        {type:'response', command_id='zzz', cluster:'bdplab', signature:'xxxxxxxx'}
      * server verifies the signature is against the token it sent and the public
        key it has on file for the cluster.
      * server sends a 'you are authenticated' response.
        {type:'response', command_id='zzz', authenticated: true, done:true}

     Task loop:
      * client sends a 'give me work' request.
        {type:'command', command_id='yyy', action:'get_work'}
      * server sends a 'ok, wait for work' response.
        {type:'response', command_id='yyy', action:'wait'}
      * server sends a single test to the cluster
        {type:'response', command_id='yyy', test:{...}}
      * client responds 'ok, received test' response
        {type:'response', command_id:'yyy', test_id:'xxxxxxx'}
      * server updates status of test to in_progress in database
        {type:'response', command_id:'yyy', message:'test_updated', done:true}
      * client sends artifacts via streaming protocol (See below)
      * client sends 'ok, test done, artifacts sent.' request.
        {type:'command', command_id:'llll', action:'test_done', test_id:'xxxxxxx'}
      * server updates status of test to completed
      * server sends a 'ok, test updated' response
        {type:'response', command_id:'llll', test_id:'xxxxxx', message='test_update', done:true}

     Streaming:
      protocol for streaming raw data: console output, binary artifacts etc.
      * Sending peer sends a "I'm going to send binary data to you" request:
        {type:'command', command_id='xxx', action:'stream', test_id='xxxxx', 
         kind:"[console|failure|chart|system_logs|stress_logs]", name='name', 
         eof='$$$EOF$$$', keepalive='$$$KEEPALIVE$$$'}
      * Receiving peer sends response indicating it's ready to receive the stream:
        {type:'response', command_id='xxx', action='ready'}
      * Peer starts sending arbitrary binary data messages.
      * The receiving peer reads binary data. If it encounters $$$KEEPALIVE$$$ as it's own message, it will 
        omit that data, as it's only meant to keep the socket open.
      * Once $$$EOF$$$ is seen by the receiving peer, in it's own message, the receiving peer can respond:
        {type:'response', command_id='xxx', message:'stream_received', done:true}

    """
    context = {'apikey': APIKey.load(SERVER_KEY_PATH),
               'cluster': None}

    def authenticate():
        token_to_sign = random_token()
        cmd = Command.new(ws, action='authenticate', token=token_to_sign)
        response = cmd.send()
        context['cluster'] = cluster = response['cluster']
        client_pubkey = db.get_pub_key(cluster)
        client_apikey = APIKey(client_pubkey['pubkey'])
        
        # Verify the client correctly signed the token:
        try:
            client_apikey.verify_message(token_to_sign, response.get('signature'))
        except:
            response.respond(message='Bad Signature of token for authentication', done=True)
            log.error('client provided bad signature for auth token')
            raise

        response.respond(authenticated=True, done=True)

        # Client will ask us to authenticate too:
        command = receive_data(ws)
        assert command.get('action') == 'authenticate'
        data = {'signature' :context['apikey'].sign_message(command['token'])}
        response = command.respond(**data)
        if response.get('authenticated') != True:
            raise UnauthenticatedError("Our peer could not validate our signed auth token")

    def get_work(command):
        # Mark any existing in_process jobs for this cluster as
        # failed. If the cluster is asking for new work, then these
        # got dropped on the floor:
        for test in db.get_in_progress_tests(context['cluster']):
            db.update_test_status(test['test_id'], 'failed')

        # Find the next test scheduled for the client's cluster:
        tests = db.get_scheduled_tests(context['cluster'], limit=1)
        if len(tests) > 0:
            test_id = tests[0]['test_id']
        else:
            # No tests are currently scheduled.
            # Register a zmq listener of notifications of incoming tests, with a timeout.
            # When we see any test scheduled notification for our cluster, redo the query.
            # If timeout reached, redo the query anyway in case we missed the notification.
            def setup_zmq():
                zmq_context = zmq.Context()
                zmq_socket = zmq_context.socket(zmq.SUB)
                zmq_socket.connect('tcp://127.0.0.1:5557')
                zmq_socket.setsockopt_string(
                    zmq.SUBSCRIBE, 
                    unicode('scheduled {cluster} '.format(cluster=context['cluster'])))
                zmq_socket.setsockopt(zmq.RCVTIMEO, 15000)
                return zmq_socket
            zmq_socket = setup_zmq()
            while True:
                try:
                    cluster, test_id = zmq_socket.recv_string().split()
                except zmq.error.Again:
                    pass
                except zmq.error.ZMQError, e:
                    if e.errno == zmq.POLLERR:
                        log.error(e)
                        # Interrupted zmq socket code, reinitialize:
                        # I get this when I resize my terminal.. WTF?
                        zmq_socket = setup_zmq()
                finally:
                    tests = db.get_scheduled_tests(context['cluster'], limit=1)
                    if len(tests) > 0:
                        test_id = tests[0]['test_id']
                        break
                    else:
                        # Send no-work-yet message:
                        console_publish(context['cluster'], {'ctl':'WAIT'})
                        command.respond(action='wait', follow_up=False)
        test = db.get_test(test_id)
        # Give the test to the client:
        response = command.respond(test=test)
        # Expect an prepared status message back:
        assert response['test_id'] == test['test_id'] and \
            response['status'] == 'prepared'
        # Update the test status:
        db.update_test_status(test['test_id'], 'in_progress')
        # Let the client know they can start it:
        response.respond(test_id=test['test_id'], status="in_progress", done=True)

    def test_done(command):
        """Receive completed test artifacts from client"""
        db.update_test_status(command['test_id'], command['status'])
        # Record test failure message, if any:
        if command['status'] == 'failed':
            msg = (command.get('message','') + "\n" + command.get('stacktrace','')).strip()
            db.update_test_artifact(command['test_id'], 'failure', msg)
        # Send response:
        command.respond(test_id=command['test_id'], message='test_update', done=True)

    def receive_artifact_chunk_object(command):
        command.respond(message="ready", follow_up=False, done=False)
        tmp = cStringIO.StringIO()
        chunk_sha = hashlib.sha256()

        def frame_callback(frame, binary):
            if not binary:
                frame = frame.encode("utf-8")
            chunk_sha.update(frame)
            tmp.write(frame)

        socket_comms.receive_stream(ws, command, frame_callback)
        # save chunk to db
        db.insert_artifact_chunk(command['object_id'], command['chunk_id'], command['chunk_size'],
                                 chunk_sha.hexdigest(), tmp, command['num_of_chunks'], command['file_size'],
                                 command['object_sha'])
        # respond with current sha
        command.respond(message='chunk_received', done=True, chunk_id=command['chunk_id'], chunk_sha=chunk_sha.hexdigest())

    def receive_artifact_chunk_complete(command):
        db.update_test_artifact(command['test_id'], command['kind'], None, command['name'],
                                available=command['successful'], object_id=command['object_id'])
        command.respond(message='ok', stored_chunk_shas=_get_stored_chunks(command['object_id']), done=True)

    def receive_artifact_chunk_query(command):
        command.respond(message='ok', stored_chunk_shas=_get_stored_chunks(command['object_id']), done=True)

    def _get_stored_chunks(object_id):
        """
        This is super lame, but....
        currently returning a list as a value on commands breaks the assertion functionality on the client
        """
        chunk_info = db.get_chunk_info(object_id)
        return ','.join(["{}:{}".format(hsh['chunk_id'], hsh['chunk_sha']) for hsh in chunk_info])

    def receive_stream(command):
        """Receive a stream of data"""
        command.respond(message="ready", follow_up=False)
        log.debug("Receving data stream ....")
        if command['kind'] == 'console':
            console_dir = os.path.join(os.path.expanduser("~"), ".cstar_perf", "console_out")
            try:
                os.makedirs(console_dir)
            except OSError:
                pass
            console = open(os.path.join(console_dir, command['test_id']), "w")
        tmp = cStringIO.StringIO()
        sha = hashlib.sha256()
        try:
            def frame_callback(frame, binary):
                if not binary:
                    frame = frame.encode("utf-8")
                if command['kind'] == 'console':
                    console.write(frame)
                    console_publish(context['cluster'], {'job_id':command['test_id'], 'msg':frame})
                    console.flush()
                else:
                    console_publish(context['cluster'], {'job_id':command['test_id'], 'ctl':'IN_PROGRESS'})
                sha.update(frame)
                tmp.write(frame)
            socket_comms.receive_stream(ws, command, frame_callback)
            if command['kind'] == 'console':
                console.close()
            # TODO: confirm with the client that the sha is correct
            # before storing
        finally:
            # In the event of a socket error, we always want to commit
            # what we have of the artifact to the database. Better to
            # have something than nothing. It's the client's
            # responsibility to resend artifacts that failed.

            db.update_test_artifact(command['test_id'], command['kind'], tmp, command['name'])

        command.respond(message='stream_received', done=True, sha256=sha.hexdigest())
        
    # Client and Server both authenticate to eachother:
    authenticate()
    
    try:
        # Dispatch on client commands:
        while True:
            command = receive_data(ws)
            assert command['type'] == 'command'
            if command['action'] == 'get_work':
                console_publish(context['cluster'], {'ctl':'WAIT'})
                get_work(command)
            elif command['action'] == 'test_done':
                console_publish(context['cluster'], {'ctl':'DONE'})
                test_done(command)
            elif command['action'] == 'stream':
                receive_stream(command)
            elif command['action'] == 'chunk-stream-query':
                receive_artifact_chunk_query(command)
            elif command['action'] == 'chunk-stream':
                receive_artifact_chunk_object(command)
            elif command['action'] == 'chunk-stream-complete':
                receive_artifact_chunk_complete(command)
            elif command['action'] == 'good_bye':
                log.info("client said good_bye. Closing socket.")
                break
    finally:
        console_publish(context['cluster'], {'ctl':'GOODBYE'})

Example 76

Project: ganga Source File: Remote.py
    def preparejob(self, jobconfig, master_input_sandbox):
        """Prepare the script to create the job on the remote host"""

        import tempfile

        workdir = tempfile.mkdtemp()
        job = self.getJobObject()

        script = """#!/usr/bin/env python
from __future__ import print_function
#-----------------------------------------------------
# This job wrapper script is automatically created by
# GANGA Remote backend handler.
#
# It controls:
# 1. unpack input sandbox
# 2. create the new job
# 3. submit it
#-----------------------------------------------------
import os,os.path,shutil,tempfile
import sys,popen2,time,traceback
import tarfile

############################################################################################

###INLINEMODULES###

############################################################################################

j = Job()

output_sandbox = ###OUTPUTSANDBOX###
input_sandbox = ###INPUTSANDBOX###
appexec = ###APPLICATIONEXEC###
appargs = ###APPLICATIONARGS###
back_end = ###BACKEND###
ganga_dir = ###GANGADIR###
code = ###CODE###
environment = ###ENVIRONMENT###
user_env = ###USERENV###

if user_env != None:
   for env_var in user_env:
      environment[env_var] = user_env[env_var]

j.outputsandbox = output_sandbox
j.backend = back_end

# Unpack the input sandboxes
shutil.move(os.path.expanduser(ganga_dir + "/__subjob_input_sbx__" + code), j.inputdir+"/__subjob_input_sbx__")
shutil.move(os.path.expanduser(ganga_dir + "/__master_input_sbx__" + code), j.inputdir+"/__master_input_sbx__")

# Add the files in the sandbox to the job
inputsbx = []
fullsbxlist = []
try:
   tar = tarfile.open(j.inputdir+"/__master_input_sbx__")
   filelist = tar.getnames()
   print(filelist)
   
   for f in filelist:
      fullsbxlist.append( f )
      inputsbx.append( j.inputdir + "/" + f )

except:
   print("Unable to open master input sandbox")

try:
   tar = tarfile.open(j.inputdir+"/__subjob_input_sbx__")
   filelist = tar.getnames()

   for f in filelist:
      fullsbxlist.append( f )
      inputsbx.append( j.inputdir + "/" + f )

except:
   print("Unable to open subjob input sandbox")

# sort out the path of the exe
if appexec in fullsbxlist:
   j.application = Executable ( exe = File(os.path.join(j.inputdir, appexec)), args = appargs, env = environment )
   print("Script found: %s" % appexec)
else:
   j.application = Executable ( exe = appexec, args = appargs, env = environment )

   
j.inputsandbox = inputsbx

getPackedInputSandbox(j.inputdir+"/__subjob_input_sbx__", j.inputdir + "/.")
getPackedInputSandbox(j.inputdir+"/__master_input_sbx__", j.inputdir + "/.")

# submit the job
j.submit()

# Start pickle token
print("***_START_PICKLE_***")

# pickle the job
import pickle
print(j.outputdir)
print(pickle.dumps(j._impl))

# print a finished token
print("***_END_PICKLE_***")
print("***_FINISHED_***")
"""
        import inspect
        import Ganga.Core.Sandbox as Sandbox
        script = script.replace('###ENVIRONMENT###', repr(jobconfig.env))
        script = script.replace('###USERENV###', repr(self.environment))
        script = script.replace(
            '###INLINEMODULES###', inspect.getsource(Sandbox.WNSandbox))
        script = script.replace(
            '###OUTPUTSANDBOX###', repr(jobconfig.outputbox))
        script = script.replace(
            '###APPLICATIONEXEC###', repr(os.path.basename(jobconfig.getExeString())))
        script = script.replace(
            '###APPLICATIONARGS###', repr(jobconfig.getArgStrings()))

        # get a string describing the required backend
        import cStringIO
        be_out = cStringIO.StringIO()
        job.backend.remote_backend.printTree(be_out, "copyable")
        be_str = be_out.getvalue()
        script = script.replace('###BACKEND###', be_str)

        script = script.replace('###GANGADIR###', repr(self.ganga_dir))
        script = script.replace('###CODE###', repr(self._code))

        sandbox_list = jobconfig.getSandboxFiles()

        str_list = "[ "
        for fname in sandbox_list:
            str_list += "j.inputdir + '/' + " + \
                repr(os.path.basename(fname.name))
            str_list += ", "

        str_list += "j.inputdir + '/__master_input_sbx__' ]"

        script = script.replace('###INPUTSANDBOX###', str_list)
        return job.getInputWorkspace().writefile(FileBuffer('__jobscript__.py', script), executable=0)

Example 77

Project: gae-init-debug Source File: line_profiler.py
def magic_lprun(self, parameter_s=''):
    """ Execute a statement under the line-by-line profiler from the
    line_profiler module.

    Usage:
      %lprun -f func1 -f func2 <statement>

    The given statement (which doesn't require quote marks) is run via the
    LineProfiler. Profiling is enabled for the functions specified by the -f
    options. The statistics will be shown side-by-side with the code through the
    pager once the statement has completed.

    Options:
    
    -f <function>: LineProfiler only profiles functions and methods it is told
    to profile.  This option tells the profiler about these functions. Multiple
    -f options may be used. The argument may be any expression that gives
    a Python function or method object. However, one must be careful to avoid
    spaces that may confuse the option parser. Additionally, functions defined
    in the interpreter at the In[] prompt or via %run currently cannot be
    displayed.  Write these functions out to a separate file and import them.

    One or more -f options are required to get any useful results.

    -D <filename>: dump the raw statistics out to a pickle file on disk. The
    usual extension for this is ".lprof". These statistics may be viewed later
    by running line_profiler.py as a script.

    -T <filename>: dump the text-formatted statistics with the code side-by-side
    out to a text file.

    -r: return the LineProfiler object after it has completed profiling.
    """
    # Local imports to avoid hard dependency.
    from distutils.version import LooseVersion
    import IPython
    ipython_version = LooseVersion(IPython.__version__)
    if ipython_version < '0.11':
        from IPython.genutils import page
        from IPython.ipstruct import Struct
        from IPython.ipapi import UsageError
    else:
        from IPython.core.page import page
        from IPython.utils.ipstruct import Struct
        from IPython.core.error import UsageError

    # Escape quote markers.
    opts_def = Struct(D=[''], T=[''], f=[])
    parameter_s = parameter_s.replace('"',r'\"').replace("'",r"\'")
    opts, arg_str = self.parse_options(parameter_s, 'rf:D:T:', list_all=True)
    opts.merge(opts_def)

    global_ns = self.shell.user_global_ns
    local_ns = self.shell.user_ns

    # Get the requested functions.
    funcs = []
    for name in opts.f:
        try:
            funcs.append(eval(name, global_ns, local_ns))
        except Exception, e:
            raise UsageError('Could not find function %r.\n%s: %s' % (name, 
                e.__class__.__name__, e))

    profile = LineProfiler(*funcs)

    # Add the profiler to the builtins for @profile.
    import __builtin__
    if 'profile' in __builtin__.__dict__:
        had_profile = True
        old_profile = __builtin__.__dict__['profile']
    else:
        had_profile = False
        old_profile = None
    __builtin__.__dict__['profile'] = profile

    try:
        try:
            profile.runctx(arg_str, global_ns, local_ns)
            message = ''
        except SystemExit:
            message = """*** SystemExit exception caught in code being profiled."""
        except KeyboardInterrupt:
            message = ("*** KeyboardInterrupt exception caught in code being "
                "profiled.")
    finally:
        if had_profile:
            __builtin__.__dict__['profile'] = old_profile

    # Trap text output.
    stdout_trap = StringIO()
    profile.print_stats(stdout_trap)
    output = stdout_trap.getvalue()
    output = output.rstrip()

    if ipython_version < '0.11':
        page(output, screen_lines=self.shell.rc.screen_length)
    else:
        page(output)
    print message,

    dump_file = opts.D[0]
    if dump_file:
        profile.dump_stats(dump_file)
        print '\n*** Profile stats pickled to file',\
              `dump_file`+'.',message

    text_file = opts.T[0]
    if text_file:
        pfile = open(text_file, 'w')
        pfile.write(output)
        pfile.close()
        print '\n*** Profile printout saved to text file',\
              `text_file`+'.',message

    return_value = None
    if opts.has_key('r'):
        return_value = profile

    return return_value

Example 78

Project: dopey Source File: document.py
    def save_ora(self, filename, options=None, **kwargs):
        logger.info('save_ora: %r (%r, %r)', filename, options, kwargs)
        t0 = time.time()
        tempdir = tempfile.mkdtemp('mypaint')
        if not isinstance(tempdir, unicode):
            tempdir = tempdir.decode(sys.getfilesystemencoding())
        # use .tmp extension, so we don't overwrite a valid file if there is an exception
        z = zipfile.ZipFile(filename + '.tmpsave', 'w', compression=zipfile.ZIP_STORED)
        # work around a permission bug in the zipfile library: http://bugs.python.org/issue3394
        def write_file_str(filename, data):
            zi = zipfile.ZipInfo(filename)
            zi.external_attr = 0100644 << 16
            z.writestr(zi, data)
        write_file_str('mimetype', 'image/openraster') # must be the first file
        image = ET.Element('image')
        stack = ET.SubElement(image, 'stack')
        x0, y0, w0, h0 = self.get_effective_bbox()
        a = image.attrib
        a['w'] = str(w0)
        a['h'] = str(h0)

        def store_pixbuf(pixbuf, name):
            tmp = join(tempdir, 'tmp.png')
            t1 = time.time()
            pixbuf.savev(tmp, 'png', [], [])
            logger.debug('%.3fs pixbuf saving %s', time.time() - t1, name)
            z.write(tmp, name)
            os.remove(tmp)

        def store_surface(surface, name, rect=[]):
            tmp = join(tempdir, 'tmp.png')
            t1 = time.time()
            surface.save_as_png(tmp, *rect, **kwargs)
            logger.debug('%.3fs surface saving %s', time.time() - t1, name)
            z.write(tmp, name)
            os.remove(tmp)

        def add_layer(x, y, opac, surface, name, layer_name, visible=True,
                      locked=False, selected=False,
                      compositeop=DEFAULT_COMPOSITE_OP, rect=[]):
            layer = ET.Element('layer')
            stack.append(layer)
            store_surface(surface, name, rect)
            a = layer.attrib
            if layer_name:
                a['name'] = layer_name
            a['src'] = name
            a['x'] = str(x)
            a['y'] = str(y)
            a['opacity'] = str(opac)
            if compositeop not in VALID_COMPOSITE_OPS:
                compositeop = DEFAULT_COMPOSITE_OP
            a['composite-op'] = compositeop
            if visible:
                a['visibility'] = 'visible'
            else:
                a['visibility'] = 'hidden'
            if locked:
                a['edit-locked'] = 'true'
            if selected:
                a['selected'] = 'true'
            return layer

        for idx, l in enumerate(reversed(self.layers)):
            if l.is_empty():
                continue
            opac = l.opacity
            x, y, w, h = l.get_bbox()
            sel = (idx == self.layer_idx)
            el = add_layer(x-x0, y-y0, opac, l._surface,
                           'data/layer%03d.png' % idx, l.name, l.visible,
                           locked=l.locked, selected=sel,
                           compositeop=l.compositeop, rect=(x, y, w, h))

            # strokemap
            sio = StringIO()
            l.save_strokemap_to_file(sio, -x, -y)
            data = sio.getvalue(); sio.close()
            name = 'data/layer%03d_strokemap.dat' % idx
            el.attrib['mypaint_strokemap_v2'] = name
            write_file_str(name, data)

        ani_data = self.ani.xsheet_as_str()
        write_file_str('animation.xsheet', ani_data)

        # save background as layer (solid color or tiled)
        bg = self.background
        # save as fully rendered layer
        x, y, w, h = self.get_bbox()
        l = add_layer(x-x0, y-y0, 1.0, bg, 'data/background.png', 'background',
                      locked=True, selected=False,
                      compositeop=DEFAULT_COMPOSITE_OP,
                      rect=(x,y,w,h))
        x, y, w, h = bg.get_bbox()
        # save as single pattern (with corrected origin)
        store_surface(bg, 'data/background_tile.png', rect=(x+x0, y+y0, w, h))
        l.attrib['background_tile'] = 'data/background_tile.png'

        # preview (256x256)
        t2 = time.time()
        logger.debug('starting to render full image for thumbnail...')

        thumbnail_pixbuf = self.render_thumbnail()
        store_pixbuf(thumbnail_pixbuf, 'Thumbnails/thumbnail.png')
        logger.debug('total %.3fs spent on thumbnail', time.time() - t2)

        helpers.indent_etree(image)
        xml = ET.tostring(image, encoding='UTF-8')

        write_file_str('stack.xml', xml)
        z.close()
        os.rmdir(tempdir)
        if os.path.exists(filename):
            os.remove(filename) # windows needs that
        os.rename(filename + '.tmpsave', filename)

        logger.info('%.3fs save_ora total', time.time() - t0)

        return thumbnail_pixbuf

Example 79

Project: grr Source File: collects.py
  @utils.Synchronized
  def Compact(self, callback=None, timestamp=None):
    """Compacts versioned attributes into the collection stream.

    Versioned attributes come from the datastore sorted by the timestamp
    in the decreasing order. This is the opposite of what we want in
    the collection (as items in the collection should be in chronological
    order).

    Compact's implementation can handle very large collections that can't
    be reversed in memory. It reads them in batches, reverses every batch
    individually, and then reads batches back in the reversed order and
    write their contents to the collection stream.

    Args:
      callback: An optional function without arguments that gets called
                periodically while processing is done. Useful in flows
                that have to heartbeat.
      timestamp: Only items added before this timestamp will be compacted.

    Raises:
      RuntimeError: if problems are encountered when reading back temporary
                    saved data.

    Returns:
      Number of compacted results.
    """
    if not self.locked:
      raise aff4.LockError("Collection must be locked before compaction.")

    compacted_count = 0

    batches_urns = []
    current_batch = []

    # This timestamp will be used to delete attributes. We don't want
    # to delete anything that was added after we started the compaction.
    freeze_timestamp = timestamp or rdfvalue.RDFDatetime.Now()

    def UpdateIndex():
      seek_index = self.Get(self.Schema.SEEK_INDEX, SeekIndex())

      prev_index_pair = seek_index.checkpoints and seek_index.checkpoints[-1]
      if (not prev_index_pair or
          self.size - prev_index_pair.index_offset >= self.INDEX_INTERVAL):
        new_index_pair = SeekIndexPair(
            index_offset=self.size, byte_offset=self.fd.Tell())
        seek_index.checkpoints.Append(new_index_pair)
        self.Set(self.Schema.SEEK_INDEX, seek_index)

    def DeleteVersionedDataAndFlush():
      """Removes versioned attributes and flushes the stream."""
      data_store.DB.DeleteAttributes(
          self.urn, [self.Schema.DATA.predicate],
          end=freeze_timestamp,
          token=self.token,
          sync=True)
      if self.IsJournalingEnabled():
        journal_entry = self.Schema.COMPACTION_JOURNAL(
            compacted_count, age=freeze_timestamp)
        attrs_to_set = {self.Schema.COMPACTION_JOURNAL: [journal_entry]}
        aff4.FACTORY.SetAttributes(
            self.urn,
            attrs_to_set,
            set(),
            add_child_index=False,
            sync=True,
            token=self.token)

      if self.Schema.DATA in self.synced_attributes:
        del self.synced_attributes[self.Schema.DATA]

      self.Flush(sync=True)

    def HeartBeat():
      """Update the lock lease if needed and call the callback."""
      lease_time = config_lib.CONFIG["Worker.compaction_lease_time"]
      if self.CheckLease() < lease_time / 2:
        logging.info("%s: Extending compaction lease.", self.urn)
        self.UpdateLease(lease_time)
        stats.STATS.IncrementCounter("packed_collection_lease_extended")

      if callback:
        callback()

    HeartBeat()

    # We iterate over all versioned attributes. If we get more than
    # self.COMPACTION_BATCH_SIZE, we write the data to temporary
    # stream in the reversed order.
    for _, value, _ in data_store.DB.ResolvePrefix(
        self.urn,
        self.Schema.DATA.predicate,
        token=self.token,
        timestamp=(0, freeze_timestamp)):

      HeartBeat()

      current_batch.append(value)
      compacted_count += 1

      if len(current_batch) >= self.COMPACTION_BATCH_SIZE:
        batch_urn = rdfvalue.RDFURN("aff4:/tmp").Add("%X" %
                                                     utils.PRNG.GetULong())
        batches_urns.append(batch_urn)

        buf = cStringIO.StringIO()
        for data in reversed(current_batch):
          buf.write(struct.pack("<i", len(data)))
          buf.write(data)

        # We use AFF4Image to avoid serializing/deserializing data stored
        # in versioned attributes.
        with aff4.FACTORY.Create(
            batch_urn, aff4.AFF4Image, mode="w",
            token=self.token) as batch_stream:
          batch_stream.Write(buf.getvalue())

        current_batch = []

    # If there are no versioned attributes, we have nothing to do.
    if not current_batch and not batches_urns:
      return 0

    # The last batch of results can be written to our collection's stream
    # immediately, because we have to reverse the order of all the data
    # stored in versioned attributes.
    if current_batch:
      buf = cStringIO.StringIO()
      for data in reversed(current_batch):
        buf.write(struct.pack("<i", len(data)))
        buf.write(data)

      self.fd.Seek(0, 2)
      self.fd.Write(buf.getvalue())
      self.stream_dirty = True
      self.size += len(current_batch)
      UpdateIndex()

      # If current_batch was the only available batch, just write everything
      # and return.
      if not batches_urns:
        DeleteVersionedDataAndFlush()
        return compacted_count

    batches = {}
    for batch in aff4.FACTORY.MultiOpen(
        batches_urns, aff4_type=aff4.AFF4Image, token=self.token):
      batches[batch.urn] = batch

    if len(batches_urns) != len(batches):
      raise RuntimeError("Internal inconsistency can't read back all the "
                         "temporary batches.")

    # We read all the temporary batches in reverse order (batches itself
    # were reversed when they were written).
    self.fd.Seek(0, 2)
    for batch_urn in reversed(batches_urns):
      batch = batches[batch_urn]

      HeartBeat()

      data = batch.Read(len(batch))
      self.fd.Write(data)
      self.stream_dirty = True
      self.size += self.COMPACTION_BATCH_SIZE
      UpdateIndex()

      aff4.FACTORY.Delete(batch_urn, token=self.token)

    DeleteVersionedDataAndFlush()

    # Update system-wide stats.
    stats.STATS.IncrementCounter(
        "packed_collection_compacted", delta=compacted_count)

    return compacted_count

Example 80

Project: cgstudiomap Source File: custom.py
    def _create_bars(self, cr, uid, ids, report, fields, results, context):
        pool = openerp.registry(cr.dbname)
        pdf_string = cStringIO.StringIO()
        can = canvas.init(fname=pdf_string, format='pdf')
        
        can.show(80,380,'/16/H'+report['title'])
        
        process_date = {
            'D': lambda x: reduce(lambda xx, yy: xx + '-' + yy, x.split('-')[1:3]),
            'M': lambda x: x.split('-')[1],
            'Y': lambda x: x.split('-')[0]
        }

        order_date = {
            'D': lambda x: time.mktime((2005, int(x.split('-')[0]), int(x.split('-')[1]), 0, 0, 0, 0, 0, 0)),
            'M': lambda x: x,
            'Y': lambda x: x
        }

        ar = area.T(size=(350,350),
            x_axis = axis.X(label = fields[0]['name'], format="/a-30{}%s"),
            y_axis = axis.Y(label = ', '.join(map(lambda x : x['name'], fields[1:]))))

        idx = 0 
        date_idx = None
        fct = {}
        for f in fields:
            field_id = (f['field_child3'] and f['field_child3'][0]) or (f['field_child2'] and f['field_child2'][0]) or (f['field_child1'] and f['field_child1'][0]) or (f['field_child0'] and f['field_child0'][0])
            if field_id:
                type = pool['ir.model.fields'].read(cr, uid, [field_id],['ttype'])
                if type[0]['ttype'] == 'date':
                    date_idx = idx
                    fct[idx] = process_date[report['frequency']] 
                else:
                    fct[idx] = lambda x : x
            else:
                fct[idx] = lambda x : x
            idx+=1
        
        # plot are usually displayed year by year
        # so we do so if the first field is a date
        data_by_year = {}
        if date_idx is not None:
            for r in results:
                key = process_date['Y'](r[date_idx])
                if key not in data_by_year:
                    data_by_year[key] = []
                for i in range(len(r)):
                    r[i] = fct[i](r[i])
                data_by_year[key].append(r)
        else:
            data_by_year[''] = results


        nb_bar = len(data_by_year)*(len(fields)-1)
        colors = map(lambda x:fill_style.Plain(bgcolor=x), misc.choice_colors(nb_bar))
        
        abscissa = {}
        for line in data_by_year.keys():
            fields_bar = []
            # sum data and save it in a list. An item for a fields
            for d in data_by_year[line]:
                for idx in range(len(fields)-1):
                    fields_bar.append({})
                    if d[0] in fields_bar[idx]:
                        fields_bar[idx][d[0]] += d[idx+1]
                    else:
                        fields_bar[idx][d[0]] = d[idx+1]
            for idx  in range(len(fields)-1):
                data = {}
                for k in fields_bar[idx].keys():
                    if k in data:
                        data[k] += fields_bar[idx][k]
                    else:
                        data[k] = fields_bar[idx][k]
                data_cuem = []
                prev = 0.0
                keys = data.keys()
                keys.sort()
                # cuemulate if necessary
                for k in keys:
                    data_cuem.append([k, float(data[k])+float(prev)])
                    if fields[idx+1]['cuemulate']:
                        prev += data[k]
                        
                idx0 = 0
                plot = bar_plot.T(label=fields[idx+1]['name']+' '+str(line), data = data_cuem, cluster=(idx0*(len(fields)-1)+idx,nb_bar), fill_style=colors[idx0*(len(fields)-1)+idx])
                ar.add_plot(plot)
                abscissa.update(fields_bar[idx])
            idx0 += 1
        abscissa = map(lambda x : [x, None], abscissa)
        abscissa.sort()
        ar.x_coord = category_coord.T(abscissa,0)
        ar.draw(can)

        can.close()
        self.obj = external_pdf(pdf_string.getvalue())
        self.obj.render()
        pdf_string.close()
        return True

Example 81

Project: rekall Source File: runplugin.py
    @classmethod
    def PlugManageDocuement(cls, app):
        sockets = Sockets(app)

        @sockets.route("/rekall/docuement/upload")
        def upload_docuement(ws):  # pylint: disable=unused-variable
            cells = json.loads(ws.receive())
            if not cells:
                return

            worksheet = app.config['worksheet']
            new_data = worksheet.Encoder(cells)
            old_data = worksheet.GetData("notebook_cells", raw=True)
            if old_data != new_data:
                worksheet.StoreData("notebook_cells", new_data, raw=True)


        @app.route("/worksheet/load_nodes")
        def rekall_load_nodes():  # pylint: disable=unused-variable
            worksheet = app.config["worksheet"]
            cells = worksheet.GetData("notebook_cells") or []
            result = dict(filename=worksheet.location,
                          sessions=worksheet.GetSessionsAsJson(),
                          cells=cells)

            return json.dumps(result), 200

        @app.route("/worksheet/load_file")
        def load_new_worksheet():  # pylint: disable=unused-variable
            session = app.config['worksheet'].session
            worksheet_dir = session.GetParameter("notebook_dir", ".")
            path = os.path.normpath(request.args.get("path", ""))
            full_path = os.path.join(worksheet_dir, "./" + path)

            # First check that this is a valid Rekall file.
            try:
                fd = io_manager.ZipFileManager(full_path, mode="a")
                if not fd.GetData("notebook_cells"):
                    raise IOError
            except IOError:
                return "File is not a valid Rekall File.", 500

            old_worksheet = app.config["worksheet"]
            old_worksheet.Close()

            app.config["worksheet"] = fd

            return "Worksheet is updated", 200

        @app.route("/worksheet/save_file")
        def save_current_worksheet():  # pylint: disable=unused-variable
            """Save the current worksheet into worksheet directory."""
            worksheet = app.config['worksheet']
            session = app.config['worksheet'].session
            worksheet_dir = session.GetParameter("notebook_dir", ".")
            path = os.path.normpath(request.args.get("path", ""))
            full_path = os.path.join(worksheet_dir, "./" + path)

            with open(full_path, "wb") as out_zip:
                with zipfile.ZipFile(
                    out_zip, mode="w",
                    compression=zipfile.ZIP_DEFLATED) as out_fd:
                    cells = worksheet.GetData("notebook_cells") or []
                    out_fd.writestr("notebook_cells", json.dumps(cells))

                    for cell in cells:
                        # Copy all the files under this cell id:
                        path = "%s/" % cell["id"]
                        for filename in worksheet.ListFiles():
                            if filename.startswith(path):
                                with worksheet.Open(filename) as in_fd:
                                    # Limit reading to a reasonable size (10Mb).
                                    out_fd.writestr(
                                        filename, in_fd.read(100000000))

            worksheet.Close()

            app.config["worksheet"] = io_manager.ZipFileManager(
                full_path, mode="a")

            return "Worksheet is saved", 200

        @app.route("/worksheet/list_files")
        def list_files_in_worksheet_dir():  # pylint: disable=unused-variable
            worksheet = app.config['worksheet']
            session = worksheet.session

            try:
                worksheet_dir = os.path.abspath(worksheet.location or ".")
                full_path = os.path.abspath(os.path.join(
                    worksheet_dir, request.args.get("path", "")))

                if not os.path.isdir(full_path):
                    full_path = os.path.dirname(full_path)

                result = []
                for filename in sorted(os.listdir(full_path)):
                    if filename.startswith("."):
                        continue

                    file_stat = os.stat(os.path.join(full_path, filename))
                    file_type = "file"
                    if stat.S_ISDIR(file_stat.st_mode):
                        file_type = "directory"

                    full_file_path = os.path.join(full_path, filename)

                    # If the path is within the worksheet - make it relative to
                    # the worksheet.
                    relative_file_path = os.path.relpath(
                        full_file_path, worksheet_dir)

                    if not relative_file_path.startswith(".."):
                        full_file_path = relative_file_path

                    result.append(
                        dict(name=filename,
                             path=full_file_path,
                             type=file_type,
                             size=file_stat.st_size))

                # If the path is within the worksheet - make it relative
                # to the worksheet.
                relative_path = os.path.relpath(full_path, worksheet_dir)
                if not relative_path.startswith(".."):
                    full_path = relative_path

                return jsonify(files=result, path=full_path)
            except (IOError, OSError) as e:
                return str(e), 500

        @app.route("/uploads/worksheet", methods=["POST"])
        def upload_new_worksheet():  # pylint: disable=unused-variable
            """Replace worksheet with uploaded file."""
            worksheet = app.config['worksheet']
            session = app.config['worksheet'].session
            worksheet_dir = session.GetParameter("notebook_dir", ".")

            for in_fd in request.files.itervalues():
                path = os.path.normpath(in_fd.filename)
                full_path = os.path.join(worksheet_dir, "./" + path)

                with open(full_path, "wb") as out_fd:
                    utils.CopyFDs(in_fd, out_fd)

            return "OK", 200

        @app.route("/downloads/worksheet")
        def download_worksheet():  # pylint: disable=unused-variable
            worksheet = app.config["worksheet"]
            data = cStringIO.StringIO()
            with zipfile.ZipFile(
                data, mode="w", compression=zipfile.ZIP_DEFLATED) as out_fd:
                cells = worksheet.GetData("notebook_cells") or []
                out_fd.writestr("notebook_cells", json.dumps(cells))

                for cell in cells:
                    # Copy all the files under this cell id:
                    path = "%s/" % cell["id"]
                    for filename in worksheet.ListFiles():
                        if filename.startswith(path):
                            with worksheet.Open(filename) as in_fd:
                                # Limit reading to a reasonable size (10Mb).
                                out_fd.writestr(filename, in_fd.read(100000000))

            return data.getvalue(), 200, {
                "content-type": 'binary/octet-stream',
                'content-disposition': "attachment; filename='rekall_file.zip'"
                }

Example 82

Project: burp_extended Source File: DrivebySqlInjection.py
	def makeInjection(self, messageInfo, currentCount, mCallBacks, payload, outputFile):
		output = open(outputFile, "ab")
		originalRequestString = messageInfo.getRequest().tostring()
		requestMethod = BurpCommonClasses.GetMethodArray(messageInfo.getRequest())

		if requestMethod == "GET":
			baseRequestString = StringIO()
			baseRequestString.write('GET ')
		elif requestMethod == "POST":
			baseRequestString = StringIO()
			baseRequestString.write('POST ')
		else:
			return baseRequestString.write(messageInfo.getUrl().getPath())

		if messageInfo.getUrl().getQuery() == None:
			newRequestString = StringIO()
			newRequestString.write(baseRequestString.getvalue())
			if (messageInfo.getUrl().getPath()[-1:] != "/") and not messageInfo.getUrl().getPath().count(".",-4):
				newRequestString.write('/')
			## stip() typo???? replaced with strip() for the time being.......
			#newRequestString.write(payload.stip() + str(currentCount) + " HTTP/1.1\r\n")
			newRequestString.write(payload.strip() + str(currentCount) + " HTTP/1.1\r\n")
			###
			newRequestString.write(originalRequestString[originalRequestString.index("\n")+1:])
			currentCount = currentCount + 1
			newResponse = mCallBacks.makeHttpRequest(messageInfo.getHost(), messageInfo.getPort(), messageInfo.getProtocol()=='https', newRequestString.getvalue())
			output.write(newRequestString.getvalue())
			self.parent.printLogTab(newRequestString.getvalue())
			newRequestString.close()
			baseRequestString.write(" HTTP/1.1\r\n")
		else:
			baseRequestString.write("?")
			query = messageInfo.getUrl().getQuery()
			count = query.count("=")
			oldEqualsIndex = 0
			oldAmpIndex = 0
			for c in range(count):
				equalsIndex = query.index("=", oldEqualsIndex)
				if query.count("&", equalsIndex):
					ampIndex = query.index("&", equalsIndex)
				else:
					ampIndex = len(query)
				#f.write(query[oldAmpIndex:equalsIndex] + "\n")
				#f.write(query[equalsIndex+1:ampIndex] + "\n")
				newRequestString = StringIO()
				newRequestString.write(baseRequestString.getvalue())
				currentCount = currentCount + 1
				newRequestString.write(query[:ampIndex])
				newRequestString.write(payload.strip() + str(currentCount) + query[ampIndex:] + " HTTP/1.1\r\n")
				newRequestString.write(originalRequestString[originalRequestString.index("\n")+1:])
				newResponse = mCallBacks.makeHttpRequest(messageInfo.getHost(), messageInfo.getPort(), messageInfo.getProtocol()=='https', newRequestString.getvalue())
				output.write(newRequestString.getvalue())
				self.parent.printLogTab(newRequestString.getvalue())
				newRequestString.close()
				newRequestString = StringIO()
				newRequestString.write(baseRequestString.getvalue())
				currentCount = currentCount + 1
				newRequestString.write(query[:equalsIndex])
				newRequestString.write(payload.strip() + str(currentCount) + query[equalsIndex:] + " HTTP/1.1\r\n")
				newRequestString.write(originalRequestString[originalRequestString.index("\n")+1:])
				newResponse = mCallBacks.makeHttpRequest(messageInfo.getHost(), messageInfo.getPort(), messageInfo.getProtocol()=='https', newRequestString.getvalue())
				output.write(newRequestString.getvalue())
				self.parent.printLogTab(newRequestString.getvalue())
				newRequestString.close()
				oldEqualsIndex = equalsIndex + 1
				oldAmpIndex = ampIndex +1
			baseRequestString.write(messageInfo.getUrl().getQuery() + " HTTP/1.1\r\n")

		if requestMethod == "POST":
			bodyIndex = originalRequestString.find("\r\n\r\n")
			if bodyIndex != -1:
				contentLengthIndex = originalRequestString.index("Content-Length: ")
				contentLengthEnd = originalRequestString.index("\n", contentLengthIndex)
				contentLength = int(originalRequestString[contentLengthIndex+16:+contentLengthEnd])
				contentLength = contentLength + len(payload.strip()) + len(str(currentCount))
				baseRequestString.write(originalRequestString[originalRequestString.index("\n")+1:contentLengthIndex+16] + str(contentLength) + originalRequestString[contentLengthIndex+16+contentLengthEnd:bodyIndex] + "\r\n\r\n")
				originalBody = originalRequestString[bodyIndex +4:]
				count = originalBody.count("=")
				oldEqualsIndex = 0
				oldAmpIndex = 0
				for c in range(count):
					equalsIndex = originalBody.index("=", oldEqualsIndex)
					if originalBody.count("&", equalsIndex):
						ampIndex = originalBody.index("&", equalsIndex)
					else:
						ampIndex = len(originalBody[:originalBody.find("\r")])
					newRequestString = StringIO()
					newRequestString.write(baseRequestString.getvalue())
					currentCount = currentCount + 1
					newRequestString.write(originalBody[:ampIndex])
					newRequestString.write(payload.strip() + str(currentCount) + originalBody[ampIndex:] + "\r\n\r\n")
					newResponse = mCallBacks.makeHttpRequest(messageInfo.getHost(), messageInfo.getPort(), messageInfo.getProtocol()=='https', newRequestString.getvalue())
					output.write(newRequestString.getvalue())
					self.parent.printLogTab(newRequestString.getvalue())
					newRequestString.close()
					newRequestString = StringIO()
					newRequestString.write(baseRequestString.getvalue())
					currentCount = currentCount + 1
					newRequestString.write(originalBody[:equalsIndex])
					newRequestString.write(payload.strip() + str(currentCount) + originalBody[equalsIndex:] + "\r\n\r\n")
					newResponse = mCallBacks.makeHttpRequest(messageInfo.getHost(), messageInfo.getPort(), messageInfo.getProtocol()=='https', newRequestString.getvalue())
					output.write(newRequestString.getvalue())
					self.parent.printLogTab(newRequestString.getvalue())
					newRequestString.close()
					oldEqualsIndex = equalsIndex + 1
					oldAmpIndex = ampIndex +1
			else:
				baseRequestString.write(originalRequestString[originalRequestString.index("\n")+1:] + "\r\n\r\n")
		baseRequestString.close()
		output.close()

		return currentCount

Example 83

Project: transitfeed Source File: loader.py
  def _ReadCsvDict(self, file_name, cols, required, deprecated):
    """Reads lines from file_name, yielding a dict of unicode values."""
    assert file_name.endswith(".txt")
    table_name = file_name[0:-4]
    contents = self._GetUtf8Contents(file_name)
    if not contents:
      return

    eol_checker = util.EndOfLineChecker(StringIO.StringIO(contents),
                                   file_name, self._problems)
    # The csv module doesn't provide a way to skip trailing space, but when I
    # checked 15/675 feeds had trailing space in a header row and 120 had spaces
    # after fields. Space after header fields can cause a serious parsing
    # problem, so warn. Space after body fields can cause a problem time,
    # integer and id fields; they will be validated at higher levels.
    reader = csv.reader(eol_checker, skipinitialspace=True)

    raw_header = reader.next()
    header_occurrences = util.defaultdict(lambda: 0)
    header = []
    valid_columns = []  # Index into raw_header and raw_row
    for i, h in enumerate(raw_header):
      h_stripped = h.strip()
      if not h_stripped:
        self._problems.CsvSyntax(
            description="The header row should not contain any blank values. "
                        "The corresponding column will be skipped for the "
                        "entire file.",
            context=(file_name, 1, [''] * len(raw_header), raw_header),
            type=problems.TYPE_ERROR)
        continue
      elif h != h_stripped:
        self._problems.CsvSyntax(
            description="The header row should not contain any "
                        "space characters.",
            context=(file_name, 1, [''] * len(raw_header), raw_header),
            type=problems.TYPE_WARNING)
      header.append(h_stripped)
      valid_columns.append(i)
      header_occurrences[h_stripped] += 1

    for name, count in header_occurrences.items():
      if count > 1:
        self._problems.DuplicateColumn(
            header=name,
            file_name=file_name,
            count=count)

    self._schedule._table_columns[table_name] = header

    # check for unrecognized columns, which are often misspellings
    header_context = (file_name, 1, [''] * len(header), header)
    valid_cols = cols + [deprecated_name for (deprecated_name, _) in deprecated]
    unknown_cols = set(header) - set(valid_cols)
    if len(unknown_cols) == len(header):
      self._problems.CsvSyntax(
            description="The header row did not contain any known column "
                        "names. The file is most likely missing the header row "
                        "or not in the expected CSV format.",
            context=(file_name, 1, [''] * len(raw_header), raw_header),
            type=problems.TYPE_ERROR)
    else:
      for col in unknown_cols:
        # this is provided in order to create a nice colored list of
        # columns in the validator output
        self._problems.UnrecognizedColumn(file_name, col, header_context)

    # check for missing required columns
    missing_cols = set(required) - set(header)
    for col in missing_cols:
      # this is provided in order to create a nice colored list of
      # columns in the validator output
      self._problems.MissingColumn(file_name, col, header_context)

    # check for deprecated columns
    for (deprecated_name, new_name) in deprecated:
      if deprecated_name in header:
        self._problems.DeprecatedColumn(file_name, deprecated_name, new_name,
                                        header_context)

    line_num = 1  # First line read by reader.next() above
    for raw_row in reader:
      line_num += 1
      if len(raw_row) == 0:  # skip extra empty lines in file
        continue

      if len(raw_row) > len(raw_header):
        self._problems.OtherProblem('Found too many cells (commas) in line '
                                    '%d of file "%s".  Every row in the file '
                                    'should have the same number of cells as '
                                    'the header (first line) does.' %
                                    (line_num, file_name),
                                    (file_name, line_num),
                                    type=problems.TYPE_WARNING)

      if len(raw_row) < len(raw_header):
        self._problems.OtherProblem('Found missing cells (commas) in line '
                                    '%d of file "%s".  Every row in the file '
                                    'should have the same number of cells as '
                                    'the header (first line) does.' %
                                    (line_num, file_name),
                                    (file_name, line_num),
                                    type=problems.TYPE_WARNING)

      # raw_row is a list of raw bytes which should be valid utf-8. Convert each
      # valid_columns of raw_row into Unicode.
      valid_values = []
      unicode_error_columns = []  # index of valid_values elements with an error
      for i in valid_columns:
        try:
          valid_values.append(raw_row[i].decode('utf-8'))
        except UnicodeDecodeError:
          # Replace all invalid characters with REPLACEMENT CHARACTER (U+FFFD)
          valid_values.append(codecs.getdecoder("utf8")
                              (raw_row[i], errors="replace")[0])
          unicode_error_columns.append(len(valid_values) - 1)
        except IndexError:
          break

      # The error report may contain a dump of all values in valid_values so
      # problems can not be reported until after converting all of raw_row to
      # Unicode.
      for i in unicode_error_columns:
        self._problems.InvalidValue(header[i], valid_values[i],
                                    'Unicode error',
                                    (file_name, line_num,
                                     valid_values, header))

      # We strip ALL whitespace from around values.  This matches the behavior
      # of both the Google and OneBusAway GTFS parser.
      valid_values = [value.strip() for value in valid_values]

      d = dict(zip(header, valid_values))
      yield (d, line_num, header, valid_values)

Example 84

Project: nzbget-subliminal Source File: upload.py
    def upload_file(self, command, pyversion, filename):
        # Sign if requested
        if self.sign:
            gpg_args = ["gpg", "--detach-sign", "-a", filename]
            if self.identity:
                gpg_args[2:2] = ["--local-user", self.identity]
            spawn(gpg_args,
                  dry_run=self.dry_run)

        # Fill in the data
        f = open(filename,'rb')
        content = f.read()
        f.close()
        basename = os.path.basename(filename)
        comment = ''
        if command=='bdist_egg' and self.distribution.has_ext_modules():
            comment = "built on %s" % platform.platform(terse=1)
        data = {
            ':action':'file_upload',
            'protcol_version':'1',
            'name':self.distribution.get_name(),
            'version':self.distribution.get_version(),
            'content':(basename,content),
            'filetype':command,
            'pyversion':pyversion,
            'md5_digest':md5(content).hexdigest(),
            }
        if command == 'bdist_rpm':
            dist, version, id = platform.dist()
            if dist:
                comment = 'built for %s %s' % (dist, version)
        elif command == 'bdist_dumb':
            comment = 'built for %s' % platform.platform(terse=1)
        data['comment'] = comment

        if self.sign:
            data['gpg_signature'] = (os.path.basename(filename) + ".asc",
                                     open(filename+".asc").read())

        # set up the authentication
        auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip()

        # Build up the MIME payload for the POST data
        boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
        sep_boundary = '\n--' + boundary
        end_boundary = sep_boundary + '--'
        body = StringIO.StringIO()
        for key, value in data.items():
            # handle multiple entries for the same name
            if type(value) != type([]):
                value = [value]
            for value in value:
                if type(value) is tuple:
                    fn = ';filename="%s"' % value[0]
                    value = value[1]
                else:
                    fn = ""
                value = str(value)
                body.write(sep_boundary)
                body.write('\nContent-Disposition: form-data; name="%s"'%key)
                body.write(fn)
                body.write("\n\n")
                body.write(value)
                if value and value[-1] == '\r':
                    body.write('\n')  # write an extra newline (lurve Macs)
        body.write(end_boundary)
        body.write("\n")
        body = body.getvalue()

        self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)

        # build the Request
        # We can't use urllib2 since we need to send the Basic
        # auth right with the first request
        schema, netloc, url, params, query, fragments = \
            urlparse.urlparse(self.repository)
        assert not params and not query and not fragments
        if schema == 'http':
            http = httplib.HTTPConnection(netloc)
        elif schema == 'https':
            http = httplib.HTTPSConnection(netloc)
        else:
            raise AssertionError, "unsupported schema "+schema

        data = ''
        loglevel = log.INFO
        try:
            http.connect()
            http.putrequest("POST", url)
            http.putheader('Content-type',
                           'multipart/form-data; boundary=%s'%boundary)
            http.putheader('Content-length', str(len(body)))
            http.putheader('Authorization', auth)
            http.endheaders()
            http.send(body)
        except socket.error, e:
            self.announce(str(e), log.ERROR)
            return

        r = http.getresponse()
        if r.status == 200:
            self.announce('Server response (%s): %s' % (r.status, r.reason),
                          log.INFO)
        else:
            self.announce('Upload failed (%s): %s' % (r.status, r.reason),
                          log.ERROR)
        if self.show_response:
            print '-'*75, r.read(), '-'*75

Example 85

Project: ZeroNet Source File: StatsPlugin.py
    def actionBenchmark(self):
        import sys
        import gc
        from contextlib import contextmanager

        output = self.sendHeader()

        if "Multiuser" in PluginManager.plugin_manager.plugin_names and not config.multiuser_local:
            yield "This function is disabled on this proxy"
            raise StopIteration

        @contextmanager
        def benchmark(name, standard):
            s = time.time()
            output("- %s" % name)
            try:
                yield 1
            except Exception, err:
                output("<br><b>! Error: %s</b><br>" % err)
            taken = time.time() - s
            multipler = standard / taken
            if multipler < 0.3:
                speed = "Sloooow"
            elif multipler < 0.5:
                speed = "Ehh"
            elif multipler < 0.8:
                speed = "Goodish"
            elif multipler < 1.2:
                speed = "OK"
            elif multipler < 1.7:
                speed = "Fine"
            elif multipler < 2.5:
                speed = "Fast"
            elif multipler < 3.5:
                speed = "WOW"
            else:
                speed = "Insane!!"
            output("%.3fs [x%.2f: %s]<br>" % (taken, multipler, speed))
            time.sleep(0.01)

        yield """
        <style>
         * { font-family: monospace }
         table * { text-align: right; padding: 0px 10px }
        </style>
        """

        yield "Benchmarking ZeroNet %s (rev%s) Python %s on: %s...<br>" % (config.version, config.rev, sys.version, sys.platform)

        t = time.time()

        # CryptBitcoin
        yield "<br>CryptBitcoin:<br>"
        from Crypt import CryptBitcoin

        # seed = CryptBitcoin.newSeed()
        # yield "- Seed: %s<br>" % seed
        seed = "e180efa477c63b0f2757eac7b1cce781877177fe0966be62754ffd4c8592ce38"

        with benchmark("hdPrivatekey x 10", 0.7):
            for i in range(10):
                privatekey = CryptBitcoin.hdPrivatekey(seed, i * 10)
                yield "."
            valid = "5JsunC55XGVqFQj5kPGK4MWgTL26jKbnPhjnmchSNPo75XXCwtk"
            assert privatekey == valid, "%s != %s" % (privatekey, valid)

        data = "Hello" * 1024  # 5k
        with benchmark("sign x 10", 0.35):
            for i in range(10):
                yield "."
                sign = CryptBitcoin.sign(data, privatekey)
            valid = "HFGXaDauZ8vX/N9Jn+MRiGm9h+I94zUhDnNYFaqMGuOi+4+BbWHjuwmx0EaKNV1G+kP0tQDxWu0YApxwxZbSmZU="
            assert sign == valid, "%s != %s" % (sign, valid)

        address = CryptBitcoin.privatekeyToAddress(privatekey)
        if CryptBitcoin.opensslVerify:  # Openssl avalible
            with benchmark("openssl verify x 100", 0.37):
                for i in range(100):
                    if i % 10 == 0:
                        yield "."
                    ok = CryptBitcoin.verify(data, address, sign)
                assert ok, "does not verify from %s" % address
        else:
            yield " - openssl verify x 100...not avalible :(<br>"

        openssl_verify_bk = CryptBitcoin.opensslVerify  # Emulate openssl not found in any way
        CryptBitcoin.opensslVerify = None
        with benchmark("pure-python verify x 10", 1.6):
            for i in range(10):
                yield "."
                ok = CryptBitcoin.verify(data, address, sign)
            assert ok, "does not verify from %s" % address
        CryptBitcoin.opensslVerify = openssl_verify_bk

        # CryptHash
        yield "<br>CryptHash:<br>"
        from Crypt import CryptHash
        from cStringIO import StringIO

        data = StringIO("Hello" * 1024 * 1024)  # 5m
        with benchmark("sha256 5M x 10", 0.6):
            for i in range(10):
                data.seek(0)
                hash = CryptHash.sha256sum(data)
                yield "."
            valid = "8cd629d9d6aff6590da8b80782a5046d2673d5917b99d5603c3dcb4005c45ffa"
            assert hash == valid, "%s != %s" % (hash, valid)

        data = StringIO("Hello" * 1024 * 1024)  # 5m
        with benchmark("sha512 5M x 10", 0.6):
            for i in range(10):
                data.seek(0)
                hash = CryptHash.sha512sum(data)
                yield "."
            valid = "9ca7e855d430964d5b55b114e95c6bbb114a6d478f6485df93044d87b108904d"
            assert hash == valid, "%s != %s" % (hash, valid)

        with benchmark("os.urandom(256) x 100 000", 0.65):
            for i in range(10):
                for y in range(10000):
                    data = os.urandom(256)
                yield "."

        # Msgpack
        yield "<br>Msgpack:<br>"
        import msgpack
        binary = 'fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv'
        data = {"int": 1024*1024*1024, "float": 12345.67890, "text": "hello"*1024, "binary": binary}
        with benchmark("pack 5K x 10 000", 0.78):
            for i in range(10):
                for y in range(1000):
                    data_packed = msgpack.packb(data)
                yield "."
            valid = """\x84\xa3int\xce@\x00\x00\x00\xa4text\xda\x14\x00hellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohellohello\xa5float\xcb@\xc8\x1c\xd6\xe61\xf8\xa1\xa6binary\xda\x01\x00fqv\xf0\x1a"e\x10,\xbe\x9cT\x9e(\xa5]u\x072C\x8c\x15\xa2\xa8\x93Sw)\x19\x02\xdd\t\xfb\xf67\x88\xd9\xee\x86\xa1\xe4\xb6,\xc6\x14\xbb\xd7$z\x1d\xb2\xda\x85\xf5\xa0\x97^\x01*\xaf\xd3\xb0!\xb7\x9d\xea\x89\xbbh8\xa1"\xa7]e(@\xa2\xa5g\xb7[\xae\x8eE\xc2\x9fL\xb6s\x19\x19\r\xc8\x04S\xd0N\xe4]?/\x01\xea\xf6\xec\xd1\xb3\xc2\x91\x86\xd7\xf4K\xdf\xc2lV\xf4\xe8\x80\xfc\x8ep\xbb\x82\xb3\x86\x98F\x1c\xecS\xc8\x15\xcf\xdc\xf1\xed\xfc\xd8\x18r\xf9\x80\x0f\xfa\x8cO\x97(\x0b]\xf1\xdd\r\xe7\xbf\xed\x06\xbd\x1b?\xc5\xa0\xd7a\x82\xf3\xa8\xe6@\xf3\ri\xa1\xb10\xf6\xd4W\xbc\x86\x1a\xbb\xfd\x94!bS\xdb\xaeM\x92\x00#\x0b\xf7\xad\xe9\xc2\x8e\x86\xbfi![%\xd31]\xc6\xfc2\xc9\xda\xc6v\x82P\xcc\xa9\xea\xb9\xff\xf6\xc8\x17iD\xcf\xf3\xeeI\x04\xe9\xa1\x19\xbb\x01\x92\xf5nn4K\xf8\xbb\xc6\x17e>\xa7 \xbbv"""
            assert data_packed == valid, "%s<br>!=<br>%s" % (repr(data_packed), repr(valid))

        with benchmark("unpack 5K x 10 000", 1.2):
            for i in range(10):
                for y in range(1000):
                    data_unpacked = msgpack.unpackb(data_packed)
                yield "."
            assert data == data_unpacked, "%s != %s" % (data_unpack, data)

        with benchmark("streaming unpack 5K x 10 000", 1.4):
            for i in range(10):
                unpacker = msgpack.Unpacker()
                for y in range(1000):
                    unpacker.feed(data_packed)
                    for data_unpacked in unpacker:
                        pass
                yield "."
            assert data == data_unpacked, "%s != %s" % (data_unpack, data)

        # Db
        yield "<br>Db:<br>"
        from Db import Db

        schema = {
            "db_name": "TestDb",
            "db_file": "%s/benchmark.db" % config.data_dir,
            "maps": {
                ".*": {
                    "to_table": {
                        "test": "test"
                    }
                }
            },
            "tables": {
                "test": {
                    "cols": [
                        ["test_id", "INTEGER"],
                        ["title", "TEXT"],
                        ["json_id", "INTEGER REFERENCES json (json_id)"]
                    ],
                    "indexes": ["CREATE UNIQUE INDEX test_key ON test(test_id, json_id)"],
                    "schema_changed": 1426195822
                }
            }
        }

        if os.path.isfile("%s/benchmark.db" % config.data_dir):
            os.unlink("%s/benchmark.db" % config.data_dir)

        with benchmark("Open x 10", 0.13):
            for i in range(10):
                db = Db(schema, "%s/benchmark.db" % config.data_dir)
                db.checkTables()
                db.close()
                yield "."

        db = Db(schema, "%s/benchmark.db" % config.data_dir)
        db.checkTables()
        import json

        with benchmark("Insert x 10 x 1000", 1.0):
            for u in range(10):  # 10 user
                data = {"test": []}
                for i in range(1000):  # 1000 line of data
                    data["test"].append({"test_id": i, "title": "Testdata for %s message %s" % (u, i)})
                json.dump(data, open("%s/test_%s.json" % (config.data_dir, u), "w"))
                db.loadJson("%s/test_%s.json" % (config.data_dir, u))
                os.unlink("%s/test_%s.json" % (config.data_dir, u))
                yield "."

        with benchmark("Buffered insert x 100 x 100", 1.3):
            cur = db.getCursor()
            cur.execute("BEGIN")
            cur.logging = False
            for u in range(100, 200):  # 100 user
                data = {"test": []}
                for i in range(100):  # 1000 line of data
                    data["test"].append({"test_id": i, "title": "Testdata for %s message %s" % (u, i)})
                json.dump(data, open("%s/test_%s.json" % (config.data_dir, u), "w"))
                db.loadJson("%s/test_%s.json" % (config.data_dir, u), cur=cur)
                os.unlink("%s/test_%s.json" % (config.data_dir, u))
                if u % 10 == 0:
                    yield "."
            cur.execute("COMMIT")

        yield " - Total rows in db: %s<br>" % db.execute("SELECT COUNT(*) AS num FROM test").fetchone()[0]

        with benchmark("Indexed query x 1000", 0.25):
            found = 0
            cur = db.getCursor()
            cur.logging = False
            for i in range(1000):  # 1000x by test_id
                res = cur.execute("SELECT * FROM test WHERE test_id = %s" % i)
                for row in res:
                    found += 1
                if i % 100 == 0:
                    yield "."

            assert found == 20000, "Found: %s != 20000" % found

        with benchmark("Not indexed query x 100", 0.6):
            found = 0
            cur = db.getCursor()
            cur.logging = False
            for i in range(100):  # 1000x by test_id
                res = cur.execute("SELECT * FROM test WHERE json_id = %s" % i)
                for row in res:
                    found += 1
                if i % 10 == 0:
                    yield "."

            assert found == 18900, "Found: %s != 18900" % found

        with benchmark("Like query x 100", 1.8):
            found = 0
            cur = db.getCursor()
            cur.logging = False
            for i in range(100):  # 1000x by test_id
                res = cur.execute("SELECT * FROM test WHERE title LIKE '%%message %s%%'" % i)
                for row in res:
                    found += 1
                if i % 10 == 0:
                    yield "."

            assert found == 38900, "Found: %s != 11000" % found

        db.close()
        if os.path.isfile("%s/benchmark.db" % config.data_dir):
            os.unlink("%s/benchmark.db" % config.data_dir)

        gc.collect()  # Implicit grabage collection

        yield "<br>Done. Total: %.2fs" % (time.time() - t)

Example 86

Project: pants Source File: wsgi.py
    def __call__(self, request, *args):
        """
        Handle the given request.
        """
        # Make sure this plays nice with Web.
        request.auto_finish = False

        request._headers = None
        request._head_status = None
        request._chunk_it = False

        def write(data):
            if not request._started:
                # Before the first output, send the headers.
                # But before that, figure out if we've got a set length.
                for k,v in request._headers:
                    if k.lower() == 'content-length' or k.lower() == 'transfer-encoding':
                        break
                else:
                    request._headers.append(('Transfer-Encoding', 'chunked'))
                    request._chunk_it = True

                request.send_status(request._head_status)
                request.send_headers(request._headers)

            if request._chunk_it:
                request.write("%x\r\n%s\r\n" % (len(data), data))
            else:
                request.write(data)

        def start_response(status, head, exc_info=None):
            if exc_info:
                try:
                    if request._started:
                        raise exc_info[0], exc_info[1], exc_info[2]
                finally:
                    exc_info = None

            elif request._head_status is not None:
                raise RuntimeError("Headers already set.")

            if not isinstance(status, (int, str)):
                raise ValueError("status must be a string or int")
            if not isinstance(head, list):
                if isinstance(head, dict):
                    head = [(k,v) for k,v in head.iteritems()]
                else:
                    try:
                        head = list(head)
                    except ValueError:
                        raise ValueError("headers must be a list")

            request._head_status = status
            request._headers = head
            return write

        # Check for extra arguments that would mean we're being used
        # within Application.
        if hasattr(request, '_converted_match'):
            path = request._converted_match[-1]
            routing_args = request._converted_match[:-1]
        else:
            path = request.path
            if hasattr(request, 'match'):
                routing_args = request.match.groups()
            else:
                routing_args = None

        # Build an environment for the WSGI application.
        environ = {
            'REQUEST_METHOD'    : request.method,
            'SCRIPT_NAME'       : '',
            'PATH_INFO'         : path,
            'QUERY_STRING'      : request.query,
            'SERVER_NAME'       : request.headers.get('Host','127.0.0.1'),
            'SERVER_PROTOCOL'   : request.protocol,
            'SERVER_SOFTWARE'   : SERVER,
            'REMOTE_ADDR'       : request.remote_ip,
            'GATEWAY_INTERFACE' : 'WSGI/1.0',
            'wsgi.version'      : (1,0),
            'wsgi.url_scheme'   : request.scheme,
            'wsgi.input'        : cStringIO.StringIO(request.body),
            'wsgi.errors'       : sys.stderr,
            'wsgi.multithread'  : False,
            'wsgi.multiprocess' : False,
            'wsgi.run_once'     : False
        }

        if isinstance(request.connection.server.local_address, tuple):
            environ['SERVER_PORT'] = request.connection.server.local_address[1]

        if routing_args:
            environ['wsgiorg.routing_args'] = (routing_args, {})

        if 'Content-Type' in request.headers:
            environ['CONTENT_TYPE'] = request.headers['Content-Type']
        if 'Content-Length' in request.headers:
            environ['CONTENT_LENGTH'] = request.headers['Content-Length']

        for k,v in request.headers._data.iteritems():
            environ['HTTP_%s' % k.replace('-','_').upper()] = v

        # Run the WSGI Application.
        try:
            result = self.app(environ, start_response)

            if result:
                try:
                    if isinstance(result, str):
                        write(result)
                    else:
                        for data in result:
                            if data:
                                write(data)
                finally:
                    try:
                        if hasattr(result, 'close'):
                            result.close()
                    except Exception:
                        log.warning("Exception running result.close() for: "
                                    "%s %s", request.method, request.path,
                            exc_info=True)
                    result = None

        except Exception:
            log.exception('Exception running WSGI application for: %s %s',
                request.method, request.path)

            # If we've started, bad stuff.
            if request._started:
                # We can't recover, so close the connection.
                if request._chunk_it:
                    request.write("0\r\n\r\n\r\n")
                request.connection.close(True)
                return

            # Use the default behavior if we're not debugging.
            if not self.debug:
                raise

            resp = u''.join([
                u"<h2>Traceback</h2>\n",
                u"<pre>%s</pre>\n" % traceback.format_exc(),
                u"<h2>HTTP Request</h2>\n",
                request.__html__(),
                ])
            body, status, headers = error(resp, 500, request=request,
                debug=True)

            request.send_status(500)

            if not 'Content-Length' in headers:
                headers['Content-Length'] = len(body)

            request.send_headers(headers)
            request.write(body)
            request.finish()
            return

        # Finish up here.
        if not request._started:
            write('')
        if request._chunk_it:
            request.write("0\r\n\r\n\r\n")

        request.finish()

Example 87

Project: sonospy Source File: widget.py
def console():
    """ Defines the behavior of the console web2py execution """

    usage = "python web2py.py"

    description = """\
    web2py Web Framework startup script.
    ATTENTION: unless a password is specified (-a 'passwd') web2py will
    attempt to run a GUI. In this case command line options are ignored."""

    description = dedent(description)

    parser = OptionParser(usage, None, Option, ProgramVersion)

    parser.description = description

    parser.add_option('-i',
                      '--ip',
                      default='127.0.0.1',
                      dest='ip',
                      help='ip address of the server (127.0.0.1)')

    parser.add_option('-p',
                      '--port',
                      default='8000',
                      dest='port',
                      type='int',
                      help='port of server (8000)')

    msg = 'password to be used for administration'
    msg += ' (use -a "<recycle>" to reuse the last password))'
    parser.add_option('-a',
                      '--password',
                      default='<ask>',
                      dest='password',
                      help=msg)

    parser.add_option('-c',
                      '--ssl_certificate',
                      default='',
                      dest='ssl_certificate',
                      help='file that contains ssl certificate')

    parser.add_option('-k',
                      '--ssl_private_key',
                      default='',
                      dest='ssl_private_key',
                      help='file that contains ssl private key')

    parser.add_option('-d',
                      '--pid_filename',
                      default='httpserver.pid',
                      dest='pid_filename',
                      help='file to store the pid of the server')

    parser.add_option('-l',
                      '--log_filename',
                      default='httpserver.log',
                      dest='log_filename',
                      help='file to log connections')

    parser.add_option('-n',
                      '--numthreads',
                      default='10',
                      type='int',
                      dest='numthreads',
                      help='number of threads')

    parser.add_option('-s',
                      '--server_name',
                      default=socket.gethostname(),
                      dest='server_name',
                      help='server name for the web server')

    msg = 'max number of queued requests when server unavailable'
    parser.add_option('-q',
                      '--request_queue_size',
                      default='5',
                      type='int',
                      dest='request_queue_size',
                      help=msg)

    parser.add_option('-o',
                      '--timeout',
                      default='10',
                      type='int',
                      dest='timeout',
                      help='timeout for individual request (10 seconds)')

    parser.add_option('-z',
                      '--shutdown_timeout',
                      default='5',
                      type='int',
                      dest='shutdown_timeout',
                      help='timeout on shutdown of server (5 seconds)')
    parser.add_option('-f',
                      '--folder',
                      default=os.getcwd(),
                      dest='folder',
                      help='folder from which to run web2py')

    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      dest='verbose',
                      default=False,
                      help='increase --test verbosity')

    parser.add_option('-Q',
                      '--quiet',
                      action='store_true',
                      dest='quiet',
                      default=False,
                      help='disable all output')

    msg = 'set debug output level (0-100, 0 means all, 100 means none;'
    msg += ' default is 30)'
    parser.add_option('-D',
                      '--debug',
                      dest='debuglevel',
                      default=30,
                      type='int',
                      help=msg)

    msg = 'run web2py in interactive shell or IPython (if installed) with'
    msg += ' specified appname'
    parser.add_option('-S',
                      '--shell',
                      dest='shell',
                      metavar='APPNAME',
                      help=msg)

    msg = 'only use plain python shell; should be used with --shell option'
    parser.add_option('-P',
                      '--plain',
                      action='store_true',
                      default=False,
                      dest='plain',
                      help=msg)

    msg = 'auto import model files; default is False; should be used'
    msg += ' with --shell option'
    parser.add_option('-M',
                      '--import_models',
                      action='store_true',
                      default=False,
                      dest='import_models',
                      help=msg)

    msg = 'run PYTHON_FILE in web2py environment;'
    msg += ' should be used with --shell option'
    parser.add_option('-R',
                      '--run',
                      dest='run',
                      metavar='PYTHON_FILE',
                      default='',
                      help=msg)

    msg = 'run doctests in web2py environment; ' +\
        'TEST_PATH like a/c/f (c,f optional)'
    parser.add_option('-T',
                      '--test',
                      dest='test',
                      metavar='TEST_PATH',
                      default=None,
                      help=msg)

    parser.add_option('-W',
                      '--winservice',
                      dest='winservice',
                      default='',
                      help='-W install|start|stop as Windows service')

    msg = 'trigger a cron run manually; usually invoked from a system crontab'
    parser.add_option('-C',
                      '--cron',
                      action='store_true',
                      dest='extcron',
                      default=False,
                      help=msg)

    parser.add_option('-N',
                      '--no-cron',
                      action='store_true',
                      dest='nocron',
                      default=False,
                      help='do not start cron automatically')

    parser.add_option('-L',
                      '--config',
                      dest='config',
                      default='',
                      help='config file')

    parser.add_option('-F',
                      '--profiler',
                      dest='profiler_filename',
                      default=None,
                      help='profiler filename')

    parser.add_option('-t',
                      '--taskbar',
                      action='store_true',
                      dest='taskbar',
                      default=False,
                      help='use web2py gui and run in taskbar (system tray)')

    parser.add_option('',
                      '--nogui',
                      action='store_true',
                      default=False,
                      dest='nogui',
                      help='text-only, no GUI')

    parser.add_option('-A',
                      '--args',
                      action='store',
                      dest='args',
                      default='',
                      help='should be followed by a list of arguments to be passed to script, to be used with -S, -A must be the last option')

    if '-A' in sys.argv: k = sys.argv.index('-A')
    elif '--args' in sys.argv: k = sys.argv.index('--args')
    else: k=len(sys.argv)
    sys.argv, other_args = sys.argv[:k], sys.argv[k+1:]
    (options, args) = parser.parse_args()
    options.args = [options.run] + other_args

    if options.quiet:
        capture = cStringIO.StringIO()
        sys.stdout = capture
        logging.getLogger().setLevel(logging.CRITICAL + 1)
    else:
        logging.getLogger().setLevel(options.debuglevel)

    if options.config[-3:] == '.py':
        options.config = options.config[:-3]

    if not os.path.exists('applications'):
        os.mkdir('applications')

    if not os.path.exists('deposit'):
        os.mkdir('deposit')

    if not os.path.exists('site-packages'):
        os.mkdir('site-packages')

    sys.path.append(os.path.join(os.getcwd(),'site-packages'))

    # If we have the applications package or if we should upgrade
    if not os.path.exists('applications/__init__.py'):
        fp = open('applications/__init__.py', 'w')
        fp.write('')
        fp.close()

    if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'):
        w2p_pack('welcome.w2p','applications/welcome')
        os.unlink('NEWINSTALL')

    return (options, args)

Example 88

Project: django-nonrel Source File: trans_real.py
Function: templatize
def templatize(src, origin=None):
    """
    Turns a Django template into something that is understood by xgettext. It
    does so by translating the Django translation tags into standard gettext
    function invocations.
    """
    from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
            TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
    out = StringIO()
    intrans = False
    inplural = False
    singular = []
    plural = []
    incomment = False
    comment = []
    for t in Lexer(src, origin).tokenize():
        if incomment:
            if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
                content = u''.join(comment)
                translators_comment_start = None
                for lineno, line in enumerate(content.splitlines(True)):
                    if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
                        translators_comment_start = lineno
                for lineno, line in enumerate(content.splitlines(True)):
                    if translators_comment_start is not None and lineno >= translators_comment_start:
                        out.write(u' # %s' % line)
                    else:
                        out.write(u' #\n')
                incomment = False
                comment = []
            else:
                comment.append(t.contents)
        elif intrans:
            if t.token_type == TOKEN_BLOCK:
                endbmatch = endblock_re.match(t.contents)
                pluralmatch = plural_re.match(t.contents)
                if endbmatch:
                    if inplural:
                        out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
                        for part in singular:
                            out.write(blankout(part, 'S'))
                        for part in plural:
                            out.write(blankout(part, 'P'))
                    else:
                        out.write(' gettext(%r) ' % ''.join(singular))
                        for part in singular:
                            out.write(blankout(part, 'S'))
                    intrans = False
                    inplural = False
                    singular = []
                    plural = []
                elif pluralmatch:
                    inplural = True
                else:
                    filemsg = ''
                    if origin:
                        filemsg = 'file %s, ' % origin
                    raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
            elif t.token_type == TOKEN_VAR:
                if inplural:
                    plural.append('%%(%s)s' % t.contents)
                else:
                    singular.append('%%(%s)s' % t.contents)
            elif t.token_type == TOKEN_TEXT:
                contents = t.contents.replace('%', '%%')
                if inplural:
                    plural.append(contents)
                else:
                    singular.append(contents)
        else:
            if t.token_type == TOKEN_BLOCK:
                imatch = inline_re.match(t.contents)
                bmatch = block_re.match(t.contents)
                cmatches = constant_re.findall(t.contents)
                if imatch:
                    g = imatch.group(1)
                    if g[0] == '"': g = g.strip('"')
                    elif g[0] == "'": g = g.strip("'")
                    out.write(' gettext(%r) ' % g)
                elif bmatch:
                    for fmatch in constant_re.findall(t.contents):
                        out.write(' _(%s) ' % fmatch)
                    intrans = True
                    inplural = False
                    singular = []
                    plural = []
                elif cmatches:
                    for cmatch in cmatches:
                        out.write(' _(%s) ' % cmatch)
                elif t.contents == 'comment':
                    incomment = True
                else:
                    out.write(blankout(t.contents, 'B'))
            elif t.token_type == TOKEN_VAR:
                parts = t.contents.split('|')
                cmatch = constant_re.match(parts[0])
                if cmatch:
                    out.write(' _(%s) ' % cmatch.group(1))
                for p in parts[1:]:
                    if p.find(':_(') >= 0:
                        out.write(' %s ' % p.split(':',1)[1])
                    else:
                        out.write(blankout(p, 'F'))
            elif t.token_type == TOKEN_COMMENT:
                out.write(' # %s' % t.contents)
            else:
                out.write(blankout(t.contents, 'X'))
    return out.getvalue()

Example 89

Project: hortonworks-sandbox Source File: tests.py
  def test_create_table_import(self):
    """Test create table wizard"""
    RAW_FIELDS = [
      ['ta\tb', 'nada', 'sp ace'],
      ['f\too', 'bar', 'fred'],
      ['a\ta', 'bb', 'cc'] ]

    def write_file(filename, raw_fields, delim, do_gzip=False):
      lines = [ delim.join(row) for row in raw_fields ]
      data = '\n'.join(lines)
      if do_gzip:
        sio = cStringIO.StringIO()
        gzdat = gzip.GzipFile(fileobj=sio, mode='wb')
        gzdat.write(data)
        gzdat.close()
        data = sio.getvalue()
      f = self.cluster.fs.open(filename, "w")
      f.write(data)
      f.close()

    write_file('/tmp/spacé.dat'.decode('utf-8'), RAW_FIELDS, ' ')
    write_file('/tmp/tab.dat', RAW_FIELDS, '\t')
    write_file('/tmp/comma.dat', RAW_FIELDS, ',')
    write_file('/tmp/pipes.dat', RAW_FIELDS, '|')
    write_file('/tmp/comma.dat.gz', RAW_FIELDS, ',', do_gzip=True)

    # Test auto delim selection
    resp = self.client.post('/beeswax/create/import_wizard', {
      'submit_file': 'on',
      'path': '/tmp/comma.dat',
      'name': 'test_create_import',
    })
    assert_equal(resp.context['fields_list'], RAW_FIELDS)

    # Test same with gzip
    resp = self.client.post('/beeswax/create/import_wizard', {
      'submit_file': 'on',
      'path': '/tmp/comma.dat.gz',
      'name': 'test_create_import',
    })
    assert_equal(resp.context['fields_list'], RAW_FIELDS)

    # Make sure space works
    resp = self.client.post('/beeswax/create/import_wizard', {
      'submit_preview': 'on',
      'path': '/tmp/spacé.dat',
      'name': 'test_create_import',
      'delimiter_0': ' ',
      'delimiter_1': '',
      'file_type': 'text',
    })
    assert_equal(len(resp.context['fields_list'][0]), 4)

    # Make sure custom delimiters work
    resp = self.client.post('/beeswax/create/import_wizard', {
      'submit_preview': 'on',
      'path': '/tmp/pipes.dat',
      'name': 'test_create_import',
      'delimiter_0': '__other__',
      'delimiter_1': '|',
      'file_type': 'text',
    })
    assert_equal(len(resp.context['fields_list'][0]), 3)

    # Test column definition
    resp = self.client.post('/beeswax/create/import_wizard', {
      'submit_delim': 'on',
      'path': '/tmp/comma.dat.gz',
      'name': 'test_create_import',
      'delimiter_0': ',',
      'delimiter_1': '',
      'file_type': 'gzip',
    })
    # Should have 3 columns available
    assert_equal(len(resp.context['column_formset'].forms), 3)

    # Test table creation and data loading
    resp = self.client.post('/beeswax/create/import_wizard', {
      'submit_create': 'on',
      'path': '/tmp/comma.dat.gz',
      'name': 'test_create_import',
      'delimiter_0': ',',
      'delimiter_1': '',
      'file_type': 'gzip',
      'do_import': 'True',
      'cols-0-_exists': 'True',
      'cols-0-column_name': 'col_a',
      'cols-0-column_type': 'string',
      'cols-1-_exists': 'True',
      'cols-1-column_name': 'col_b',
      'cols-1-column_type': 'string',
      'cols-2-_exists': 'True',
      'cols-2-column_name': 'col_c',
      'cols-2-column_type': 'string',
      'cols-next_form_id': '3',
    }, follow=True)

    resp = wait_for_query_to_finish(self.client, resp, max=180.0)

    # Check data is in the table (by describing it)
    resp = self.client.get('/beeswax/table/test_create_import')
    sd = resp.context['table'].sd
    assert_equal(len(sd.cols), 3)
    assert_equal([ col.name for col in sd.cols ], [ 'col_a', 'col_b', 'col_c' ])
    assert_true("<td>nada</td>" in resp.content)
    assert_true("<td>sp ace</td>" in resp.content)

Example 90

Project: floof Source File: art.py
Function: handle_post
    @view_config(
        request_method='POST')
    def handle_post(self):
        request = self.request
        form = self.form

        if not form.validate():
            return self.respond_form_error()

        # Grab the file
        storage = request.storage
        uploaded_file = request.POST.get('file')

        # TODO can this be part of the form validation?
        try:
            fileobj = uploaded_file.file
        except AttributeError:
            self.request.session.flash(
                u'Please select a file to upload!',
                level=u'error')
            return self.respond_general_error()

        # Figure out mimetype (and if we even support it)
        mimetype = magic.Magic(mime=True).from_buffer(fileobj.read(1024)) \
            .decode('ascii')
        if mimetype not in (u'image/png', u'image/gif', u'image/jpeg'):
            # XXX this seems suboptimal, but...
            form.file.errors.append("Only PNG, GIF, and JPEG are supported at the moment.")
            return self.respond_form_invalid()

        # Hash the thing
        hasher = hashlib.sha256()
        file_size = 0
        fileobj.seek(0)
        while True:
            buffer = fileobj.read(HASH_BUFFER_SIZE)
            if not buffer:
                break

            file_size += len(buffer)
            hasher.update(buffer)
        hash = hasher.hexdigest().decode('ascii')

        # Assert that the thing is unique
        existing_artwork = model.session.query(model.Artwork) \
            .filter_by(hash = hash) \
            .limit(1) \
            .all()
        if existing_artwork:
            self.request.session.flash(
                u'This artwork has already been uploaded.',
                level=u'warning',
                icon=u'image-import')
            return self.respond_redirect(
                request.route_url('art.view', artwork=existing_artwork[0]))

        ### By now, all error-checking should be done.

        # OK, store the file.  Reset the file object first!
        fileobj.seek(0)
        storage.put(u'artwork', hash, fileobj)

        # Open the image, determine its size, and generate a thumbnail
        fileobj.seek(0)
        image = Image.open(fileobj)
        width, height = image.size

        # Thumbnailin'
        # NOTE: this logic is replicated in the upload JS; please keep in sync
        thumbnail_size = int(request.registry.settings['thumbnail_size'])
        # To avoid super-skinny thumbnails, don't let the aspect ratio go
        # beyond 2
        height = min(height, width * MAX_ASPECT_RATIO)
        width = min(width, height * MAX_ASPECT_RATIO)
        # crop() takes left, top, right, bottom
        cropped_image = image.crop((0, 0, width, height))
        # And resize...  if necessary
        if width > thumbnail_size or height > thumbnail_size:
            if width > height:
                new_size = (thumbnail_size, height * thumbnail_size // width)
            else:
                new_size = (width * thumbnail_size // height, thumbnail_size)

            thumbnail_image = cropped_image.resize(
                new_size, Image.ANTIALIAS)

        else:
            thumbnail_image = cropped_image

        # Dump the thumbnail in a buffer and save it, too
        buf = StringIO()
        if mimetype == u'image/png':
            thumbnail_format = 'PNG'
        elif mimetype == u'image/gif':
            thumbnail_format = 'GIF'
        elif mimetype == u'image/jpeg':
            thumbnail_format = 'JPEG'
        thumbnail_image.save(buf, thumbnail_format)
        buf.seek(0)
        storage.put(u'thumbnail', hash, buf)

        # Deal with user-supplied metadata
        # nb: it's perfectly valid to have no title or remark
        title = form.title.data.strip()
        remark = form.remark.data.strip()

        # Stuff it all in the db
        resource = model.Resource(type=u'artwork')
        discussion = model.Discussion(resource=resource)
        general_data = dict(
            title = title,
            hash = hash,
            uploader = request.user,
            original_filename = uploaded_file.filename,
            mime_type = mimetype,
            file_size = file_size,
            resource = resource,
            remark = remark,
        )
        artwork = model.MediaImage(
            height = height,
            width = width,
            number_of_colors = get_number_of_colors(image),
            **general_data
        )

        # Associate the uploader as an artist
        artwork.user_artwork.append(
            model.UserArtwork(
                user_id = request.user.id,
            )
        )

        # Attach tags and albums
        for tag in form.tags.data:
            artwork.tags.append(tag)

        for album in form.albums.data:
            artwork.albums.append(album)


        model.session.add_all([artwork, discussion, resource])
        model.session.flush()  # for primary keys

        self.request.session.flash(
            u'Uploaded!',
            level=u'success',
            icon=u'image--plus')

        # Success
        return self.respond_redirect(
            request.route_url('art.view', artwork=artwork))

Example 91

Project: report-ng Source File: gui.py
        def __init__(self, application=None, parent=None, *args,
                     **kwargs):  #style=wx.DEFAULT_FRAME_STYLE^wx.RESIZE_BORDER

            self.children = []
            #self.report = None
            self.report = Report()
            self.application = application
            self.scan = None
            self.save_into_directory = ''
            wx.Frame.__init__(self, parent, title=self.application.title + ' ' + self.application.version, *args,
                              **kwargs)  #style=style
            self.Bind(wx.EVT_CLOSE, lambda x: self.Destroy())

            myStream = cStringIO.StringIO(base64.b64decode(icon))
            myImage = wx.ImageFromStream(myStream)
            myBitmap = wx.BitmapFromImage(myImage)
            self.icon = wx.EmptyIcon()
            self.icon.CopyFromBitmap(myBitmap)
            self.SetIcon(self.icon)

            # Menu arrangement
            menu = wx.MenuBar()
            class Index(object):
                def __init__(self, current):
                    self.__current = current - 1
                @property
                def current(self):
                    return self.__current
                @current.setter
                def current(self, x):
                    self.__current = x
                def next(self):
                    self.__current += 1
                    return self.__current
            index = Index(100)
            menu_file = wx.Menu()
            menu_file.Append(index.next(), 'Open Report &Template...')
            self.Bind(wx.EVT_MENU, self.Open_Template, id=index.current)
            self.menu_file_open_c = menu_file.Append(index.next(), 'Open &Content...')
            self.menu_file_open_c.Enable(False)
            self.Bind(wx.EVT_MENU, self.Open_Content, id=index.current)
            menu_file.Append(index.next(), 'Open &Scan...')
            self.Bind(wx.EVT_MENU, self.Open_Scan, id=index.current)
            self.menu_file_open_k = menu_file.Append(index.next(), 'Open &Knowledge Base...')
            self.menu_file_open_k.Enable(False)
            self.Bind(wx.EVT_MENU, self.Open_Knowledge_Base, id=index.current)
            #menu_file.AppendSeparator()
            #self.menu_file_generate_c = menu_file.Append(index.next(), '&Generate Content')
            #self.menu_file_generate_c.Enable(False)
            #self.Bind(wx.EVT_MENU, self.Generate_Content, id=index.current)
            #self.menu_file_generate_k = menu_file.Append(index.next(), 'G&enerate Knowledge Base')
            #self.menu_file_generate_k.Enable(False)
            #self.Bind(wx.EVT_MENU, self.Generate_Knowledge_Base, id=index.current)
            #self.menu_file_generate_r = menu_file.Append(index.next(), 'Ge&nerate Report')
            #self.menu_file_generate_r.Enable(False)
            #self.Bind(wx.EVT_MENU, self.Generate_Report, id=index.current)
            menu_file.AppendSeparator()
            self.menu_file_save_t = menu_file.Append(index.next(), '&Save Template As...')
            self.menu_file_save_t.Enable(False)
            self.Bind(wx.EVT_MENU, self.Save_Template_As, id=index.current)
            self.menu_file_save_c = menu_file.Append(index.next(), 'Sav&e Content As...')
            self.menu_file_save_c.Enable(False)
            self.Bind(wx.EVT_MENU, self.Save_Content_As, id=index.current)
            #self.menu_file_save_k = menu_file.Append(index.next(), 'S&ave Knowledge Base As...')
            #self.menu_file_save_k.Enable(False)
            #self.Bind(wx.EVT_MENU, self.Save_Knowledge_Base_As, id=index.current)
            self.menu_file_save_s = menu_file.Append(index.next(), 'Sa&ve Scan As...')
            self.menu_file_save_s.Enable(False)
            self.Bind(wx.EVT_MENU, self.Save_Scan_As, id=index.current)
            self.menu_file_save_r = menu_file.Append(index.next(), 'Save &Report As...')
            self.menu_file_save_r.Enable(False)
            self.Bind(wx.EVT_MENU, self.Save_Report_As, id=index.current)
            menu_file.AppendSeparator()
            menu_file.Append(wx.ID_EXIT, 'E&xit\tCtrl+Q', 'Exit application')
            self.Bind(wx.EVT_MENU, self.Exit, id=wx.ID_EXIT)
            menu.Append(menu_file, '&File')
            menu_view = wx.Menu()
            self.menu_view_c = menu_view.Append(index.next(), 'C&lean template', kind=wx.ITEM_CHECK)
            self.Bind(wx.EVT_MENU, self.Clean_template, id=index.current)
            self.menu_view_c.Check(True)
            menu_view.AppendSeparator()
            self.menu_view_y = menu_view.Append(index.next(), '&yaml', kind=wx.ITEM_RADIO)
            self.Bind(wx.EVT_MENU, self.Use_yaml, id=index.current)
            self.menu_view_j = menu_view.Append(index.next(), '&json', kind=wx.ITEM_RADIO)
            self.Bind(wx.EVT_MENU, self.Use_json, id=index.current)
            self.menu_view_y.Check(True)
            menu.Append(menu_view, '&View')
            #menu_view.AppendSeparator()
            #self.menu_view_s = menu_view.Append(index.next(), '&Status Preview', kind=wx.ITEM_CHECK)
            #self.Bind(wx.EVT_MENU, self.Status_Preview, id=index.current)
            #self.menu_view_s.Check(False)
            menu_view.AppendSeparator()
            self.menu_view_v = menu_view.Append(index.next(), '&VulnParam highlighting', kind=wx.ITEM_CHECK)
            self.Bind(wx.EVT_MENU, self.VulnParam_highlighting, id=index.current)
            self.menu_view_v.Check(True)
            self.menu_view_i = menu_view.Append(index.next(), 'V&iewState truncation', 'Warning! Application performance will noticeably decrease!', kind=wx.ITEM_CHECK)
            self.Bind(wx.EVT_MENU, self.Viewstate_truncation, id=index.current)
            self.menu_view_i.Check(True)
            self.menu_view_r = menu_view.Append(index.next(), 'Include &requests and responses', 'Warning! Have a small scan or be very patient!', kind=wx.ITEM_CHECK)
            self.menu_view_r.Check(False)
            menu_view.AppendSeparator()
            self.menu_view_t = menu_view.Append(index.next(), 'Always on &top', kind=wx.ITEM_CHECK)
            self.Bind(wx.EVT_MENU, self.Always_on_top, id=index.current)
            self.menu_view_t.Check(True)
            menu_tools = wx.Menu()
            self.menu_tools_template_structure_preview = menu_tools.Append(index.next(), 'Te&mplate structure preview')
            self.menu_tools_template_structure_preview.Enable(False)
            self.Bind(wx.EVT_MENU, self.Template_Structure_Preview, id=index.current)
            self.menu_tools_merge_scan_into_content = menu_tools.Append(index.next(), 'Mer&ge Scan into Content')
            self.menu_tools_merge_scan_into_content.Enable(False)
            self.Bind(wx.EVT_MENU, self.Merge_Scan_Into_Content, id=index.current)
            self.menu_tools_generate_few_passwords = menu_tools.Append(index.next(), 'Generate &few passwords')
            self.Bind(wx.EVT_MENU, self.Generate_few_passwords, id=index.current)
            menu.Append(menu_tools, '&Tools')
            menu_help = wx.Menu()
            menu_help.Append(index.next(), '&Usage')
            self.Bind(wx.EVT_MENU, self.Usage, id=index.current)
            menu_help.Append(index.next(), '&Changelog')
            self.Bind(wx.EVT_MENU, self.Changelog, id=index.current)
            menu_help.AppendSeparator()
            menu_help.Append(wx.ID_ABOUT, '&About')
            self.Bind(wx.EVT_MENU, self.About, id=wx.ID_ABOUT)
            menu.Append(menu_help, '&Help')
            self.SetMenuBar(menu)

            # Frame layout arrangement
            class FileDropTarget(wx.FileDropTarget):
                def __init__(self, target, handler):
                    wx.FileDropTarget.__init__(self)
                    self.target = target
                    self.handler = handler
                def OnDropFiles(self, x, y, filenames):
                    self.handler(filenames)
            panel = wx.Panel(self)
            vbox = wx.BoxSizer(wx.VERTICAL)
            fgs = wx.FlexGridSizer(5, 2, 9, 25)

            # Template
            self.ctrl_st_t = wx.StaticText(panel, label='Template:')
            self.ctrl_st_t.Enable(False)
            self.ctrl_tc_t = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.TE_READONLY, size=(200, 3 * 17,))
            def ctrl_tc_t_OnFocus(e):
                self.ctrl_tc_t.ShowNativeCaret(False)
                # for unknown reason this refuse to work in wxpython 3.0
                e.Skip()
            def ctrl_tc_t_OnDoubleclick(e):
                if self.ctrl_st_t.IsEnabled():
                    self.application.TextWindow(self, title='Template Preview', content=self.ctrl_tc_t.GetValue())
                e.Skip()
            self.ctrl_tc_t.Bind(wx.EVT_SET_FOCUS, ctrl_tc_t_OnFocus)
            self.ctrl_tc_t.Bind(wx.EVT_LEFT_DCLICK, ctrl_tc_t_OnDoubleclick)
            def ctrl_tc_t_OnMouseOver(e):
                self.status('You might use drag & drop', hint=True)
                e.Skip()
            #def ctrl_tc_t_OnMouseLeave(e):
            #    self.status('')
            #    e.Skip()
            self.ctrl_tc_t.Bind(wx.EVT_ENTER_WINDOW, ctrl_tc_t_OnMouseOver)
            #self.ctrl_tc_t.Bind(wx.EVT_LEAVE_WINDOW, ctrl_tc_t_OnMouseLeave)
            def ctrl_tc_t_OnDropFiles(filenames):
                if len(filenames) != 1:
                    wx.MessageBox('Single file is expected!', 'Error', wx.OK | wx.ICON_ERROR)
                    return
                self._open_template(filenames[0])
            ctrl_tc_t_dt = FileDropTarget(self.ctrl_tc_t, ctrl_tc_t_OnDropFiles)
            self.ctrl_tc_t.SetDropTarget(ctrl_tc_t_dt)
            fgs.AddMany([(self.ctrl_st_t, 1, wx.EXPAND), (self.ctrl_tc_t, 1, wx.EXPAND)])

            # Content + Edit
            self.ctrl_st_c = wx.StaticText(panel, label='Content:')
            self.ctrl_st_c.Enable(False)
            self.ctrl_tc_c = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.TE_READONLY, size=(200, 3 * 17,))
            self.color_tc_bg_e = self.ctrl_tc_c.GetBackgroundColour()
            self.ctrl_tc_c.Enable(False)
            self.color_tc_bg_d = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE)
            self.ctrl_tc_c.SetBackgroundColour(self.color_tc_bg_d)
            def ctrl_tc_c_OnFocus(e):
                self.ctrl_tc_c.ShowNativeCaret(False)
                e.Skip()
            def ctrl_tc_c_OnDoubleclick(e):
                if self.ctrl_st_c.IsEnabled():
                    self.application.TextWindow(self, title='Content Preview', content=self.ctrl_tc_c.GetValue())
                e.Skip()
            self.ctrl_tc_c.Bind(wx.EVT_SET_FOCUS, ctrl_tc_c_OnFocus)
            self.ctrl_tc_c.Bind(wx.EVT_LEFT_DCLICK, ctrl_tc_c_OnDoubleclick)
            def ctrl_tc_c_b_onClick(e):
                self.application.YamledWindowWrapper(self, title='Content', content=self.report._content)
                pass
            def ctrl_tc_c_onResize(e):
                size = self.ctrl_tc_c.GetSize()
                self.ctrl_tc_c_b.SetPosition((size[0]-36-1, -1))
            self.ctrl_tc_c_b = wx.Button(self.ctrl_tc_c, index.next(), 'E', size=(16, 16))
            self.ctrl_tc_c_b.Bind(wx.EVT_BUTTON, ctrl_tc_c_b_onClick)
            self.ctrl_tc_c.Bind(wx.EVT_SIZE, ctrl_tc_c_onResize)
            self.ctrl_tc_c_b.Hide()
            def ctrl_tc_c_b_OnMouseOver(e):
                self.status('Send Content to Yaml Editor', hint=True)
                e.Skip()
            #def ctrl_tc_c_b_OnMouseLeave(e):
            #    self.status('')
            #    e.Skip()
            self.ctrl_tc_c_b.Bind(wx.EVT_ENTER_WINDOW, ctrl_tc_c_b_OnMouseOver)
            #self.ctrl_tc_c_b.Bind(wx.EVT_LEAVE_WINDOW, ctrl_tc_c_b_OnMouseLeave)
            def ctrl_tc_c_OnMouseOver(e):
                if self.ctrl_st_c.IsEnabled(): # Yamled
                    self.ctrl_tc_c_b.Show()
                self.status('You might use drag & drop', hint=True)
                e.Skip()
            #def ctrl_tc_c_OnMouseLeave(e):
            #    self.status('')
            #    e.Skip()
            self.ctrl_tc_c.Bind(wx.EVT_ENTER_WINDOW, ctrl_tc_c_OnMouseOver)
            #self.ctrl_tc_c.Bind(wx.EVT_LEAVE_WINDOW, ctrl_tc_c_OnMouseLeave)
            def ctrl_tc_c_OnDropFiles(filenames):
                if len(filenames) != 1:
                    wx.MessageBox('Single file is expected!', 'Error', wx.OK | wx.ICON_ERROR)
                    return
                self._open_content(filenames[0])
                if self.ctrl_st_c.IsEnabled(): # Yamled
                    self.ctrl_tc_c_b.Show()
            ctrl_tc_c_dt = FileDropTarget(self.ctrl_tc_c, ctrl_tc_c_OnDropFiles)
            self.ctrl_tc_c.SetDropTarget(ctrl_tc_c_dt)
            fgs.AddMany([(self.ctrl_st_c, 1, wx.EXPAND), (self.ctrl_tc_c, 1, wx.EXPAND)])

            # Scan + Edit button
            self.ctrl_st_s = wx.StaticText(panel, label='Scan:')
            self.ctrl_st_s.Enable(False)
            self.ctrl_tc_s = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.TE_READONLY, size=(200, 3 * 17,))
            def ctrl_tc_s_OnFocus(e):
                self.ctrl_tc_s.ShowNativeCaret(False)
                e.Skip()
            def ctrl_tc_s_OnDoubleclick(e):
                if self.ctrl_st_s.IsEnabled():
                    self.application.TextWindow(self, title='Scan Preview', content=self.ctrl_tc_s.GetValue())
                e.Skip()
            self.ctrl_tc_s.Bind(wx.EVT_SET_FOCUS, ctrl_tc_s_OnFocus)
            self.ctrl_tc_s.Bind(wx.EVT_LEFT_DCLICK, ctrl_tc_s_OnDoubleclick)
            def ctrl_tc_s_b_onClick(e):
                self.application.YamledWindowWrapper(self, title='Scan', content=self.scan._scan)
                pass
            def ctrl_tc_s_onResize(e):
                size = self.ctrl_tc_s.GetSize()
                self.ctrl_tc_s_b.SetPosition((size[0]-36-1, -1))
            self.ctrl_tc_s_b = wx.Button(self.ctrl_tc_s, index.next(), 'E', size=(16, 16))
            self.ctrl_tc_s_b.Bind(wx.EVT_BUTTON, ctrl_tc_s_b_onClick)
            self.ctrl_tc_s.Bind(wx.EVT_SIZE, ctrl_tc_s_onResize)
            self.ctrl_tc_s_b.Hide()
            def ctrl_tc_s_b_OnMouseOver(e):
                self.status('Send Scan to Yaml Editor', hint=True)
                e.Skip()
            #def ctrl_tc_s_b_OnMouseLeave(e):
            #    self.status('')
            #    e.Skip()
            self.ctrl_tc_s_b.Bind(wx.EVT_ENTER_WINDOW, ctrl_tc_s_b_OnMouseOver)
            #self.ctrl_tc_s_b.Bind(wx.EVT_LEAVE_WINDOW, ctrl_tc_s_b_OnMouseLeave)
            def ctrl_tc_s_OnMouseOver(e):
                if self.ctrl_st_s.IsEnabled(): # Yamled
                    self.ctrl_tc_s_b.Show()
                self.status('You might use drag & drop', hint=True)
                e.Skip()
            #def ctrl_tc_s_OnMouseLeave(e):
            #    #self.ctrl_tc_s_b.Hide()
            #    self.status('')
            #    e.Skip()
            self.ctrl_tc_s.Bind(wx.EVT_ENTER_WINDOW, ctrl_tc_s_OnMouseOver)
            #self.ctrl_tc_s.Bind(wx.EVT_LEAVE_WINDOW, ctrl_tc_s_OnMouseLeave)
            def ctrl_tc_s_OnDropFiles(filenames):
                if len(filenames) != 1:
                    wx.MessageBox('Single file is expected!', 'Error', wx.OK | wx.ICON_ERROR)
                    return
                self._open_scan(filenames[0])
                if self.ctrl_st_s.IsEnabled(): # Yamled
                    self.ctrl_tc_s_b.Show()
            ctrl_tc_s_dt = FileDropTarget(self.ctrl_tc_s, ctrl_tc_s_OnDropFiles)
            self.ctrl_tc_s.SetDropTarget(ctrl_tc_s_dt)
            fgs.AddMany([(self.ctrl_st_s, 1, wx.EXPAND), (self.ctrl_tc_s, 1, wx.EXPAND)])

            # Knowledge Base
            self.ctrl_st_k = wx.StaticText(panel, label='Knowledge Base:')
            self.ctrl_st_k.Enable(False)
            self.ctrl_tc_k = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.TE_READONLY, size=(200, 3 * 17,))
            self.ctrl_tc_k.Enable(False)
            self.ctrl_tc_k.SetBackgroundColour(self.color_tc_bg_d)
            def ctrl_tc_k_OnFocus(e):
                self.ctrl_tc_k.ShowNativeCaret(False)
                e.Skip()
            def ctrl_tc_k_OnDoubleclick(e):
                if self.ctrl_st_k.IsEnabled():
                    self.application.TextWindow(self, title='KB Preview', content=self.ctrl_tc_k.GetValue())
                e.Skip()
            self.ctrl_tc_k.Bind(wx.EVT_SET_FOCUS, ctrl_tc_k_OnFocus)
            self.ctrl_tc_k.Bind(wx.EVT_LEFT_DCLICK, ctrl_tc_k_OnDoubleclick)
            def ctrl_tc_k_OnMouseOver(e):
                self.status('You might use drag & drop', hint=True)
                e.Skip()
            def ctrl_tc_k_OnMouseLeave(e):
                self.status('')
                e.Skip()
            self.ctrl_tc_k.Bind(wx.EVT_ENTER_WINDOW, ctrl_tc_k_OnMouseOver)
            self.ctrl_tc_k.Bind(wx.EVT_LEAVE_WINDOW, ctrl_tc_k_OnMouseLeave)
            def ctrl_tc_k_OnDropFiles(filenames):
                if len(filenames) != 1:
                    wx.MessageBox('Single file is expected!', 'Error', wx.OK | wx.ICON_ERROR)
                    return
                self._open_kb(filenames[0])
            ctrl_tc_k_dt = FileDropTarget(self.ctrl_tc_k, ctrl_tc_k_OnDropFiles)
            self.ctrl_tc_k.SetDropTarget(ctrl_tc_k_dt)
            fgs.AddMany([(self.ctrl_st_k, 1, wx.EXPAND), (self.ctrl_tc_k, 1, wx.EXPAND)])
            def panel_OnMouseOver(e):
                self.status('')
                self.ctrl_tc_c_b.Hide()
                self.ctrl_tc_s_b.Hide()
                e.Skip()
            panel.Bind(wx.EVT_ENTER_WINDOW, panel_OnMouseOver)

            # Report
            #self.ctrl_st_r = wx.StaticText(panel, label='Report:')
            #self.ctrl_st_r.Enable (False)
            #self.ctrl_tc_r = wx.TextCtrl(panel, style=wx.TE_MULTILINE|wx.TE_READONLY, size=(200, 3*17,))
            #self.ctrl_tc_r.Enable(False)
            #self.ctrl_tc_r.SetBackgroundColour (self.color_tc_bg_d)
            #def ctrl_tc_r_OnFocus (e):
            #    self.ctrl_tc_r.ShowNativeCaret (False)
            #    e.Skip()
            #self.ctrl_tc_r.Bind (wx.EVT_SET_FOCUS, ctrl_tc_r_OnFocus)
            #fgs.AddMany ([(self.ctrl_st_r, 1, wx.EXPAND), (self.ctrl_tc_r, 1, wx.EXPAND)])
            fgs.AddGrowableRow(0, 1)
            fgs.AddGrowableRow(1, 1)
            fgs.AddGrowableRow(2, 1)
            fgs.AddGrowableRow(3, 1)
            #fgs.AddGrowableRow (4, 1)
            fgs.AddGrowableCol(1, 1)
            vbox.Add(fgs, proportion=1, flag=wx.ALL | wx.EXPAND, border=10)
            #data = wx.TextCtrl(panel)
            #hbox1 = wx.BoxSizer (wx.HORIZONTAL)
            #hbox1.Add(data, proportion=1, flag=wx.LEFT|wx.RIGHT|wx.BOTTOM|wx.EXPAND, border=10)
            #vbox.Add (hbox1, 0, wx.ALL|wx.EXPAND, 0)
            panel.SetSizer(vbox)
            vbox.Fit(self)
            #self.SetMinSize(self.GetSize())
            self.statusbar = self.CreateStatusBar()
            self._statusbar_h = self.statusbar.GetSize()[1]
            #self.statusbar.Hide()
            self.status('Started')
            self.SetMinSize((self.GetSize()[0],self.GetSize()[1]+self._statusbar_h,))
            #panel = wx.Panel (self)
            #vbox = wx.BoxSizer (wx.VERTICAL)
            #hbox1 = wx.BoxSizer (wx.HORIZONTAL)
            ##st1 = wx.StaticText (panel, wx.ID_ANY, label='Not yet ready')
            #st1 = wx.StaticText (panel, wx.ID_ANY, label='Template:', size=(100, -1,))
            #hbox1.Add (st1, 0, wx.ALL, 5)
            #tc1 = wx.TextCtrl (panel, wx.ID_ANY, size=(300, -1,))
            #hbox1.Add (tc1, 1, wx.ALL|wx.EXPAND, 0)
            #vbox.Add (hbox1, 0, wx.ALL|wx.EXPAND, 0)
            #hbox2 = wx.BoxSizer (wx.HORIZONTAL)
            #st2 = wx.StaticText (panel, wx.ID_ANY, label='Scan:', size=(100, -1,))
            #hbox2.Add (st2, 0, wx.ALL, 5)
            #tc2 = wx.TextCtrl (panel, wx.ID_ANY, size=(300, -1,))
            #hbox2.Add (tc2, 1, wx.ALL|wx.EXPAND, 0)
            #vbox.Add (hbox2, 0, wx.ALL|wx.EXPAND, 0)
            ##vbox.Add (hbox1, 0, wx.CENTER, 5)
            #panel.SetSizer (vbox)
            #vbox.Fit (self)
            #self.Center()
            self.alignVMiddleRight()
            self.Show()
            self.Always_on_top(None)

Example 92

Project: pol Source File: psafe3.py
def load(f, password):
    l.debug('Reading header ...')
    tag = f.read(4)
    if tag != TAG:
        raise PSafe3FormatError("Tag is wrong.  Is this a PSafe3 file?")
    salt = f.read(32)
    niter = struct.unpack("<I", f.read(4))[0]

    l.debug('Stretching password ...')
    P2 = stretch_key(password, salt, niter)
    HP2 = hashlib.sha256(P2).digest()
    if HP2 != f.read(32):
        raise BadPasswordError

    l.debug('Reading header ...')
    m = twofish.Twofish(P2)
    K = m.decrypt(f.read(16)) + m.decrypt(f.read(16))
    L = m.decrypt(f.read(16)) + m.decrypt(f.read(16))
    IV = f.read(16)

    m = twofish.Twofish(K)
    prev_ct = IV

    l.debug('Decrypting ...')
    plaintext = ''
    hmac_data = ''
    while True:
        ct = f.read(16)
        if ct == EOF:
            break
        plaintext += sxor(m.decrypt(ct), prev_ct)
        prev_ct = ct

    l.debug('Reading decrypted header ...')
    g = StringIO.StringIO(plaintext)
    in_header = True
    header = {}
    record = {}
    records = []
    had = set()
    while True:
        field = g.read(5)
        if not field:
            break
        length, t = struct.unpack("<IB", field)
        d = g.read(length)
        hmac_data += d
        if t in had:
            l.warn("Field type %s occurs twice", t)
        had.add(t)
        if in_header:
            if t == 0:
                header['version']  = struct.unpack("<H", d)[0]
            elif t == 1:
                header['uuid'] = uuid.UUID(bytes=d)
            elif t == 2:
                header['non-default-preferences'] = d
            elif t == 3:
                header['tree-display-status'] = d
            elif t == 4:
                header['last-save'] = unpack_ts(d)
            elif t == 5:
                header['last-save-who'] = d
            elif t == 6:
                header['last-save-what'] = d
            elif t == 7:
                header['last-save-by-user'] = d
            elif t == 8:
                header['last-save-on-host'] = d
            elif t == 9:
                header['database-name'] = d
            elif t == 10:
                header['database-description'] = d
            elif t == 11:
                header['database-filters'] = d
            elif t == 15:
                header['recently-used-filters'] = d
            elif t == 16:
                header['named-password-policies'] = d
            elif t == 17:
                header['empty-groups'] = d
            elif t == 255:
                in_header = False
                had = set()
            else:
                l.warn("Unknown header field: type %s; data %s",
                            t, repr(d))
        else:
            if t == 1:
                record['uuid'] = uuid.UUID(bytes=d)
            elif t == 2:
                record['group'] = d
            elif t == 3:
                record['title'] = d
            elif t == 4:
                record['username'] = d
            elif t == 5:
                record['notes'] = d
            elif t == 6:
                record['password'] = d
            elif t == 7:
                record['creation-time'] = unpack_ts(d)
            elif t == 8:
                record['password-modification-time'] = unpack_ts(d)
            elif t == 9:
                record['last-access-time'] = unpack_ts(d)
            elif t == 10:
                record['password-expiry-time'] = unpack_ts(d)
            elif t == 12:
                record['last-modification-time'] = unpack_ts(d)
            elif t == 13:
                record['url'] = d
            elif t == 14:
                record['autotype'] = d
            elif t == 15:
                record['password-history'] = d
            elif t == 16:
                record['password-policy'] = d
            elif t == 17:
                record['password-expiry-interval'] = d
            elif t == 18:
                record['run-command'] = d
            elif t == 19:
                record['double-click-action'] = d
            elif t == 20:
                record['email-address'] = d
            elif t == 21:
                record['protected-entry'] = (d != chr(0))
            elif t == 22:
                record['own-symbols-for-password'] = d
            elif t == 23:
                record['shift-double-click-action'] = d
            elif t == 24:
                record['password-policy-name'] = d
            elif t == 255:
                records.append(record)
                record = {}
                had = set()
            else:
                l.warn("Unknown record field: type %s; data %s",
                            t, repr(d))
        tl = length + 5
        if tl % 16 != 0:
            g.read(16 - (tl % 16))
    l.debug('Checking HMAC ...')
    if hmac.new(L, hmac_data, hashlib.sha256).digest() != f.read(32):
        raise IntegrityError
    return (header, records)

Example 93

Project: LibrERP Source File: export_partner_product.py
    def export_partner_product(self, cr, uid, ids, context={}):
        report = self.browse(cr, uid, ids[0], context)

        name = context.get('name', 'Partner-Product')
        file_name = 'report_{0}_{1}.xls'.format(name, datetime.datetime.now().strftime('%Y-%m-%d'))

        book = Workbook(encoding='utf-8')
        #ws = book.add_sheet(name, cell_overwrite_ok=True)
        ws = book.add_sheet(name)

        for index, column in self.table_layout.iteritems():
            ws.col(index).width = column['width']

        ws, row_number = self.write_header(ws, report.year)

        # partner_ids = self.pool['res.partner'].search(cr, uid, [])
        # partners = self.pool['res.partner'].browse(cr, uid, partner_ids, context)
        #
        # partners = {partner.id: partner for partner in partners}

        row_number += 1

        query = """
            SELECT partner.name, product.name_template, s_order.partner_id, line.product_id, product.default_code, SUM(product_uom_qty)
            FROM sale_order_line AS line
            LEFT JOIN sale_order AS s_order ON line.order_id=s_order.id
            LEFT JOIN res_partner AS partner ON s_order.partner_id=partner.id
            LEFT JOIN product_product AS product ON line.product_id=product.id
            WHERE s_order.state IN ('confirmed', 'progress', 'done')
            AND s_order.date_order >= '{year}-01-01'
            AND s_order.date_order <= '{year}-12-31'
            AND s_order.active = 'true'
            GROUP BY s_order.partner_id, line.product_id, partner.name, product.name_template, product.default_code
            ORDER BY partner.name
        """.format(year=report.year)

        cr.execute(query)
        results = cr.dictfetchall()

        report_table = collections.OrderedDict({})

        for row in results:
            if not row['partner_id'] in report_table:
                report_table[row['partner_id']] = collections.OrderedDict({})

            report_table[row['partner_id']][row['product_id']] = row

        for month in range(1, 13):
            month_query = self.get_month_query(month, int(report.year))

            cr.execute(month_query)
            month_results = cr.dictfetchall()

            for row in month_results:
                report_table[row['partner_id']][row['product_id']][month] = row

        for partner_id, partner_data in report_table.items():
            partner_address = self.get_address(cr, uid, partner_id, 'default', context)
            partner_delivery_address = self.get_address(cr, uid, partner_id, 'delivery', context)
            for product_id, product_row in partner_data.items():
                product = self.pool['product.product'].browse(cr, uid, product_id, context)

                xls_row = {
                    0: row_number + 1,
                    1: '',
                    2: '',
                    3: '',
                    4: '',
                    5: product_row['name'],  # Partner
                    6: partner_address.country_id and partner_address.country_id.name or '',
                    7: partner_delivery_address.country_id and partner_delivery_address.country_id.name or '',
                    8: product.product_tmpl_id.categ_id and product.product_tmpl_id.categ_id.name or '',  # Product
                    # 9: '',  # subcategory
                    10: product_row['default_code'],
                }

                for month, column in enumerate(range(11, 83, 6), 1):
                    if product_row.get(month) and product_row[month]['qty']:
                        date_start, date_end = self.get_month_start_end(month, int(report.year))
                        sale_order_line_ids = self.pool['sale.order.line'].search(cr, uid, [
                            ('order_id.partner_id', '=', partner_id),
                            ('product_id', '=', product_id),
                            ('order_id.date_order', '>=', date_start),
                            ('order_id.date_order', '<=', date_end)
                        ])

                        if sale_order_line_ids:
                            sale_order_line = self.pool['sale.order.line'].browse(cr, uid, sale_order_line_ids[0], context)

                            # This is not a complete check. We should also control if there is active pricelist in selected period
                            if sale_order_line.order_id.pricelist_id.version_id:
                                list_price = sale_order_line.order_id.pricelist_id.price_get(
                                    prod_id=product_id,
                                    qty=1,
                                    partner=partner_id,
                                    context={
                                        'uom': sale_order_line.product_uom.id,
                                        'date': date_start
                                    }
                                )[sale_order_line.order_id.pricelist_id.id]
                            else:
                                # print sale_order_line.order_id.name, product_row['name'], sale_order_line.order_id.pricelist_id.name
                                list_price = product.product_tmpl_id.list_price

                            user = self.pool['res.users'].browse(cr, uid, uid, context)

                            if sale_order_line.order_id.pricelist_id.currency_id.id == user.company_id.currency_id.id:
                                sale_price = sale_order_line.price_unit
                                purchase_price = sale_order_line.purchase_price
                            else:
                                sale_price = self.pool['res.currency'].compute(
                                    cr, uid,
                                    from_currency_id=sale_order_line.order_id.pricelist_id.currency_id.id,
                                    to_currency_id=user.company_id.currency_id.id,
                                    from_amount=sale_order_line.price_unit
                                )
                                purchase_price = self.pool['res.currency'].compute(
                                    cr, uid,
                                    from_currency_id=sale_order_line.order_id.pricelist_id.currency_id.id,
                                    to_currency_id=user.company_id.currency_id.id,
                                    from_amount=sale_order_line.purchase_price
                                )
                                list_price = self.pool['res.currency'].compute(
                                    cr, uid,
                                    from_currency_id=sale_order_line.order_id.pricelist_id.currency_id.id,
                                    to_currency_id=user.company_id.currency_id.id,
                                    from_amount=list_price
                                )

                            xls_row.update({
                                column: product_row.get(month) and product_row[month]['qty'] or '',
                                column + 1: list_price or '',
                                column + 2: sale_price or '',
                                column + 3: purchase_price or '',  # purchase_price
                                column + 4: '',  # Provvigioni
                                column + 5: sale_order_line.order_id.payment_term and sale_order_line.order_id.payment_term.name or ''  # payment
                            })
                        else:
                            xls_row.update({
                                column: 'Error'
                            })

                self.write_row(ws, row_number, xls_row, 'row')

                row_number += 1

        """PARSING DATA AS STRING """
        file_data = StringIO()
        book.save(file_data)
        """STRING ENCODE OF DATA IN WKSHEET"""
        out = file_data.getvalue()
        out = out.encode("base64")
        return self.write(cr, uid, ids, {'state': 'get', 'data': out, 'name': file_name}, context=context)

Example 94

Project: anima Source File: h2a.py
def curves2ass(node, hair_name, min_pixel_width=0.5, mode='ribbon',
               export_motion=False):
    """exports the node content to ass file
    """
    sample_count = 2 if export_motion else 1
    template_vars = dict()
    geo = node.geometry()

    base_template = """
curves
{
 name %(name)s
 num_points %(curve_count)i %(sample_count)s UINT
  %(number_of_points_per_curve)s
 points %(point_count)s %(sample_count)s b85POINT
 %(point_positions)s

 radius %(radius_count)s 1 b85FLOAT
 %(radius)s
 basis "catmull-rom"
 mode "%(mode)s"
 min_pixel_width %(min_pixel_width)s
 visibility 65535
 receive_shadows on
 self_shadows on
 matrix 1 %(sample_count)s MATRIX
  %(matrix)s
 opaque on
 declare uparamcoord uniform FLOAT
 uparamcoord %(curve_count)i %(sample_count)s b85FLOAT
 %(uparamcoord)s
 declare vparamcoord uniform FLOAT
 vparamcoord %(curve_count)i %(sample_count)s b85FLOAT
 %(vparamcoord)s
 declare curve_id uniform UINT
 curve_id %(curve_count)i %(sample_count)s UINT
  %(curve_ids)s
}
"""

    number_of_curves = geo.intrinsicValue('primitivecount')
    real_point_count = geo.intrinsicValue('pointcount')

    # The root and tip points are going to be used twice for the start and end tangents
    # so there will be 2 extra points per curve
    point_count = real_point_count + number_of_curves * 2

    # write down the radius for the tip twice
    radius_count = real_point_count

    real_number_of_points_in_one_curve = real_point_count / number_of_curves
    number_of_points_in_one_curve = real_number_of_points_in_one_curve + 2
    number_of_points_per_curve = [`number_of_points_in_one_curve`] * number_of_curves

    curve_ids = ' '.join(`id_` for id_ in xrange(number_of_curves))

    radius = None

    pack = struct.pack

    # try to find the width as a point attribute to speed things up
    getting_radius_start = time.time()
    radius_attribute = geo.findPointAttrib('width')
    if radius_attribute:
        # this one works 100 times faster then iterating over each vertex
        radius = geo.pointFloatAttribValuesAsString('width')
    else:
        # no radius in points, so iterate over each vertex
        radius_i = 0
        radius_str_buffer = []
        radius_file_str = StringIO()
        radius_file_str_write = radius_file_str.write
        radius_str_buffer_append = radius_str_buffer.append
        for prim in geo.prims():
            prim_vertices = prim.vertices()

            # radius
            radius_i += real_number_of_points_in_one_curve
            if radius_i >= 1000:
                radius_file_str_write(''.join(radius_str_buffer))
                radius_str_buffer = []
                radius_str_buffer_append = radius_str_buffer.append
                radius_i = 0

            for vertex in prim_vertices:
                radius_str_buffer_append(pack('f', vertex.attribValue('width')))

        # do flushes again before getting the values
        radius_file_str_write(''.join(radius_str_buffer))
        radius = radius_file_str.getvalue()
    getting_radius_end = time.time()
    print('Getting Radius Info          : %3.3f' %
          (getting_radius_end - getting_radius_start))

    # point positions
    encode_start = time.time()

    # for motion blur use pprime
    getting_point_positions_start = time.time()
    point_positions = geo.pointFloatAttribValuesAsString('P')

    if export_motion:
        point_prime_positions = geo.pointFloatAttribValuesAsString('pprime')
        point_positions = '%s%s' % (point_positions, point_prime_positions)

    getting_point_positions_end = time.time()
    print('Getting Point Position       : %3.3f' %
          (getting_point_positions_end - getting_point_positions_start))

    # repeat every first and last point coordinates
    # (3 value each 3 * 4 = 12 characters) of every curve
    zip_start = time.time()
    point_positions = ''.join(
        map(
            lambda x: '%s%s%s' % (x[:12], x, x[-12:]),
            map(
                ''.join,
                zip(*[iter(point_positions)] * (real_number_of_points_in_one_curve*4*3)))
        )
    )
    zip_end = time.time()
    print('Zipping Point Position       : %3.3f' % (zip_end - zip_start))

    # encoded_point_positions = base85.arnold_b85_encode_multithreaded(point_positions)
    encoded_point_positions = base85.arnold_b85_encode(point_positions)
    encode_end = time.time()
    print('Encoding Point Position      : %3.3f' % (encode_end - encode_start))

    split_start = time.time()
    splitted_point_positions = split_data(encoded_point_positions, 500)
    split_end = time.time()
    print('Splitting Point Positions    : %3.3f' % (split_end - split_start))

    # radius
    encode_start = time.time()
    encoded_radius = base85.arnold_b85_encode(radius)
    encode_end = time.time()
    print('Radius encode                : %3.3f' % (encode_end - encode_start))

    split_start = time.time()
    splitted_radius = split_data(encoded_radius, 500)
    # extend for motion blur
    # if export_motion:
    #     splitted_radius = '%(data)s%(data)s' % {'data': splitted_radius}
    split_end = time.time()
    print('Splitting Radius             : %3.3f' % (split_end - split_start))

    # uv
    getting_uv_start = time.time()
    u = geo.primFloatAttribValuesAsString('uv_u')
    v = geo.primFloatAttribValuesAsString('uv_v')
    getting_uv_end = time.time()
    print('Getting uv                   : %3.3f' %
          (getting_uv_end - getting_uv_start))

    encode_start = time.time()
    encoded_u = base85.arnold_b85_encode(u)
    encode_end = time.time()
    print('Encoding UParamcoord         : %3.3f' % (encode_end - encode_start))

    split_start = time.time()
    splitted_u = split_data(encoded_u, 500)
    if export_motion:
        splitted_u = '%(data)s%(data)s' % {'data': splitted_u}
    split_end = time.time()
    print('Splitting UParamCoord        : %3.3f' % (split_end - split_start))

    encode_start = time.time()
    encoded_v = base85.arnold_b85_encode(v)
    encode_end = time.time()
    print('Encoding VParamcoord         : %3.3f' % (encode_end - encode_start))

    split_start = time.time()
    splitted_v = split_data(encoded_v, 500)
    if export_motion:
        splitted_v = '%(data)s%(data)s' % {'data': splitted_v}
    split_end = time.time()
    print('Splitting VParamCoord        : %3.3f' % (split_end - split_start))

    print('len(encoded_point_positions) : %s' % len(encoded_point_positions))
    print('(p + 2 * c) * 5 * 3          : %s' % (point_count * 5 * 3))
    print('len(encoded_radius)          : %s' % len(encoded_radius))
    print('len(uv)                      : %s' % len(u))
    print('len(encoded_u)               : %s' % len(encoded_u))
    print('len(encoded_v)               : %s' % len(encoded_v))

    # extend for motion blur
    matrix = """1 0 0 0
  0 1 0 0
  0 0 1 0
  0 0 0 1
"""
    if export_motion:
        number_of_points_per_curve.extend(number_of_points_per_curve)
        matrix += matrix

    template_vars.update({
        'name': node.path().replace('/', '_'),
        'curve_count': number_of_curves,
        'real_point_count': real_point_count,
        'number_of_points_per_curve': ' '.join(number_of_points_per_curve),
        'point_count': point_count,
        'point_positions': splitted_point_positions,
        'radius': splitted_radius,
        'radius_count': radius_count,
        'curve_ids': curve_ids,
        'uparamcoord': splitted_u,
        'vparamcoord': splitted_v,
        'min_pixel_width': min_pixel_width,
        'mode': mode,
        'sample_count': sample_count,
        'matrix': matrix
    })

    rendered_curve_data = base_template % template_vars

    del geo

    return rendered_curve_data


def split_data(data, chunk_size):
    """Splits the given data in to evenly sized chunks

    :param str data: A string of data
    :param int chunk_size: An integer showing from which element to split
    :return:
    """
    list_splitted_data = []
    for i in range(0, len(data), chunk_size):
        list_splitted_data.append(data[i:i + chunk_size])
    return '\n'.join(list_splitted_data)

Example 95

Project: Sick-Beard Source File: pyopenssl.py
    def readline(self, size=-1):
        buf = self._rbuf
        buf.seek(0, 2)  # seek end
        if buf.tell() > 0:
            # check if we already have it in our buffer
            buf.seek(0)
            bline = buf.readline(size)
            if bline.endswith('\n') or len(bline) == size:
                self._rbuf = StringIO()
                self._rbuf.write(buf.read())
                return bline
            del bline
        if size < 0:
            # Read until \n or EOF, whichever comes first
            if self._rbufsize <= 1:
                # Speed up unbuffered case
                buf.seek(0)
                buffers = [buf.read()]
                self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
                data = None
                recv = self._sock.recv
                while True:
                    try:
                        while data != "\n":
                            data = recv(1)
                            if not data:
                                break
                            buffers.append(data)
                    except OpenSSL.SSL.WantReadError:
                        continue
                    break
                return "".join(buffers)

            buf.seek(0, 2)  # seek end
            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
            while True:
                try:
                    data = self._sock.recv(self._rbufsize)
                except OpenSSL.SSL.WantReadError:
                    continue
                if not data:
                    break
                nl = data.find('\n')
                if nl >= 0:
                    nl += 1
                    buf.write(data[:nl])
                    self._rbuf.write(data[nl:])
                    del data
                    break
                buf.write(data)
            return buf.getvalue()
        else:
            # Read until size bytes or \n or EOF seen, whichever comes first
            buf.seek(0, 2)  # seek end
            buf_len = buf.tell()
            if buf_len >= size:
                buf.seek(0)
                rv = buf.read(size)
                self._rbuf = StringIO()
                self._rbuf.write(buf.read())
                return rv
            self._rbuf = StringIO()  # reset _rbuf.  we consume it via buf.
            while True:
                try:
                    data = self._sock.recv(self._rbufsize)
                except OpenSSL.SSL.WantReadError:
                        continue
                if not data:
                    break
                left = size - buf_len
                # did we just receive a newline?
                nl = data.find('\n', 0, left)
                if nl >= 0:
                    nl += 1
                    # save the excess data to _rbuf
                    self._rbuf.write(data[nl:])
                    if buf_len:
                        buf.write(data[:nl])
                        break
                    else:
                        # Shortcut.  Avoid data copy through buf when returning
                        # a substring of our first recv().
                        return data[:nl]
                n = len(data)
                if n == size and not buf_len:
                    # Shortcut.  Avoid data copy through buf when
                    # returning exactly all of our first recv().
                    return data
                if n >= left:
                    buf.write(data[:left])
                    self._rbuf.write(data[left:])
                    break
                buf.write(data)
                buf_len += n
                #assert buf_len == buf.tell()
            return buf.getvalue()

Example 96

Project: pol Source File: keepass.py
def load(f, password, keyfile=None):
    if keyfile:
        l.debug('Reading keyfile ...')
        keyfile_bit = keyfile.read()
        if len(keyfile_bit) == 32:
            pass
        elif len(keyfile_bit) == 64:
            keyfile_bit = binascii.unhexlify(keyfile_bit)
        else:
            keyfile_bit = hashlib.sha256(keyfile_bit).digest()
    else:
        keyfile_bit = None

    l.debug('Reading header ...')
    signature = f.read(8)
    if signature != SIGNATURE:
        raise KeePassFormatError('Invalid signature.  Is this a KeePass file?')
    flags, version = struct.unpack('<II', f.read(8))
    master_seed = f.read(16)
    encryption_iv = f.read(16)
    ngroups, nentries  = struct.unpack('<II', f.read(8))
    contents_hash = f.read(32)
    master_seed2 = f.read(32)
    key_enc_rounds = struct.unpack('<I', f.read(4))[0]

    if flags != FLAG_SHA2 | FLAG_RIJNDAEL:
        raise NotImplementedError

    l.debug('Deriving finalkey ...')
    if keyfile_bit:
        compositekey = hashlib.sha256(password).digest() + keyfile_bit
    else:
        compositekey = password
    finalkey = masterkey_to_finalkey(compositekey, master_seed, master_seed2,
                        key_enc_rounds)

    l.debug('Reading remaining ciphertext ...')
    ciphertext = f.read()

    l.debug('Decrypting ...')
    cipher = Crypto.Cipher.AES.new(finalkey, Crypto.Cipher.AES.MODE_CBC,
                                            encryption_iv)
    padded_plaintext = cipher.decrypt(ciphertext)
    plaintext = padded_plaintext[:-ord(padded_plaintext[-1])]

    l.debug('Verifying hash ...')
    if hashlib.sha256(plaintext).digest() != contents_hash:
        raise BadPasswordError

    l.debug('Parsing groups ...')
    groups_found = 0
    g = StringIO.StringIO(plaintext)
    groups = {}
    current_group = {}
    had = set()
    while groups_found < ngroups:
        field_type, field_size = struct.unpack('<HI', g.read(6))
        if field_type in had:
            raise KeePassFormatError("Same field type occurs twice")
        had.add(field_type)
        data = g.read(field_size)
        if field_type == 0:
            l.debug(' comment %s', field_type, repr(data))
        elif field_type == 1:
            if len(data) != 4:
                raise KeePassFormatError("Group ID data must be 4 bytes")
            value = struct.unpack('<I', data)[0]
            current_group['id'] = value
            l.debug(' id %s', value)
        elif field_type == 2:
            value = data[:-1].decode('utf-8')
            current_group['name'] = value
            l.debug(' name %s', value)
        elif field_type == 3:
            value = unpack_datetime(data)
            current_group['creation-time'] = value
            l.debug(' creation-time %s', value)
        elif field_type == 4:
            value = unpack_datetime(data)
            current_group['last-modification-time'] = value
            l.debug(' last-modification-time %s', value)
        elif field_type == 5:
            value = unpack_datetime(data)
            current_group['last-access-time'] = value
            l.debug(' last-access-time %s', value)
        elif field_type == 6:
            value = unpack_datetime(data)
            current_group['expiration-time'] = value
            l.debug(' expiration-time %s', value)
        elif field_type == 7:
            if len(data) != 4:
                raise KeePassFormatError("Image ID data must be 4 bytes")
            value = struct.unpack('<I', data)[0]
            current_group['image-id'] = value
            l.debug(' image-id %s', value)
        elif field_type == 8:
            if len(data) != 2:
                raise KeePassFormatError("Level data must be 2 bytes")
            value = struct.unpack('<H', data)[0]
            current_group['level'] = value
            l.debug(' level %s', value)
        elif field_type == 9:
            if len(data) != 4:
                raise KeePassFormatError("Flags data must be 2 bytes")
            value = struct.unpack('<I', data)[0]
            current_group['flags'] = value
            l.debug(' flags %s', bin(value))
        elif field_type == 0xffff:
            l.debug(' end-of-group')
            groups_found += 1
            groups[current_group['id']] = current_group
            had = set()
            current_group = {}
        else:
            l.warn(' unknown field %s %s', field_type, repr(data))

    l.debug('Parsing entries ...')
    entries_found = 0
    entries = []
    current_entry = {}
    had = set()
    while entries_found < nentries:
        field_type, field_size = struct.unpack('<HI', g.read(6))
        if field_type in had:
            raise KeePassFormatError("Same field type occurs twice")
        had.add(field_type)
        data = g.read(field_size)
        if field_type == 0:
            l.debug(' comment %s', field_type, repr(data))
        elif field_type == 1:
            if len(data) != 16:
                raise KeePassFormatError("UUID data must be 16 bytes")
            value = uuid.UUID(bytes=data)
            current_entry['uuid'] = value
            l.debug(' uuid %s', value)
        elif field_type == 2:
            if len(data) != 4:
                raise KeePassFormatError("Group ID data must be 16 bytes")
            value = struct.unpack("<I", data)[0]
            current_entry['group'] = value
            l.debug(' group %s', value)
        elif field_type == 3:
            if len(data) != 4:
                raise KeePassFormatError("Image ID data must be 16 bytes")
            value = struct.unpack("<I", data)[0]
            current_entry['image-id'] = value
            l.debug(' image-id %s', value)
        elif field_type == 4:
            value = data[:-1].decode('utf-8')
            current_entry['title'] = value
            l.debug(' title %s', value)
        elif field_type == 5:
            value = data[:-1].decode('utf-8')
            current_entry['url'] = value
            l.debug(' url %s', value)
        elif field_type == 6:
            value = data[:-1].decode('utf-8')
            current_entry['username'] = value
            l.debug(' username %s', value)
        elif field_type == 7:
            value = data[:-1].decode('utf-8')
            current_entry['password'] = value
            l.debug(' password %s', value)
        elif field_type == 8:
            value = data[:-1].decode('utf-8')
            current_entry['notes'] = value
            l.debug(' notes %s', value)
        elif field_type == 9:
            value = unpack_datetime(data)
            current_entry['creation-time'] = value
            l.debug(' creation-time %s', value)
        elif field_type == 10:
            value = unpack_datetime(data)
            current_entry['last-modification-time'] = value
            l.debug(' last-modification-time %s', value)
        elif field_type == 11:
            value = unpack_datetime(data)
            current_entry['last-access-time'] = value
            l.debug(' last-access-time %s', value)
        elif field_type == 12:
            value = unpack_datetime(data)
            current_entry['expiration-time'] = value
            l.debug(' expiration-time %s', value)
        elif field_type == 13:
            value = data[:-1].decode('utf-8')
            current_entry['binary-description'] = value
            l.debug(' binary-description %s', value)
        elif field_type == 14:
            value = data
            current_entry['binary-data'] = value
            l.debug(' binary-data %s', repr(value))
        elif field_type == 0xffff:
            l.debug(' end-of-entry')
            entries_found += 1
            entries.append(current_entry)
            had = set()
            current_entry = {}
        else:
            l.warn(' unknown field %s %s', field_type, repr(data))
    return (groups, entries)

Example 97

Project: pol Source File: test_import_keepass.py
    def test_keepass_load1(self):
        self.assertEqual(pol.importers.keepass.load(
                                StringIO.StringIO(TEST_DB2),
                                'test', StringIO.StringIO(TEST_KEYFILE2)),
            ({489459835: {'creation-time': datetime(2999, 12, 28, 23, 59, 59),
                          'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
                          'flags': 0,
                          'id': 489459835,
                          'image-id': 0,
                          'last-access-time': datetime(2999, 12, 28, 23, 59, 59),
                          'last-modification-time': datetime(2999, 12, 28, 23, 59, 59),
                          'level': 0,
                          'name': u'Group 2'},
              2437480029: {'creation-time': datetime(2999, 12, 28, 23, 59, 59),
                           'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
                           'flags': 0,
                           'id': 2437480029,
                           'image-id': 0,
                           'last-access-time': datetime(2999, 12, 28, 23, 59, 59),
                           'last-modification-time': datetime(2999, 12, 28, 23, 59, 59),
                           'level': 0,
                           'name': u'Group 1'},
              2922083484: {'creation-time': datetime(2999, 12, 28, 23, 59, 59),
                           'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
                           'flags': 0,
                           'id': 2922083484,
                           'image-id': 0,
                           'last-access-time': datetime(2999, 12, 28, 23, 59, 59),
                           'last-modification-time': datetime(2999, 12, 28, 23, 59, 59),
                           'level': 1,
                           'name': u'Group 1.1'}},
             [{'binary-data': '',
               'binary-description': u'',
               'creation-time': datetime(2013, 4, 19, 20, 39, 18),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 489459835,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 39, 34),
               'last-modification-time': datetime(2013, 4, 19, 20, 39, 34),
               'notes': u'comment 5',
               'password': u'j:4O_nuR;Q-drfx\\9cddd(N;h=NpCVO<',
               'title': u'passphrase 5',
               'url': u'url 5',
               'username': u'username 5',
               'uuid': UUID('568f7151-3c51-498f-2417-c8bb802f97a1')},
              {'binary-data': '',
               'binary-description': u'',
               'creation-time': datetime(2013, 4, 19, 20, 39, 37),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 489459835,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 39, 50),
               'last-modification-time': datetime(2013, 4, 19, 20, 39, 50),
               'notes': u'comment 6',
               'password': u'#4XOkEEcH7-C%ON.YzI<8`9V_8"]Py:N',
               'title': u'passphrase 6',
               'url': u'url 6',
               'username': u'username 6',
               'uuid': UUID('698f7151-cdc7-3271-7da8-025a3f253fd4')},
              {'binary-data': '',
               'binary-description': u'',
               'creation-time': datetime(2013, 4, 19, 20, 38, 55),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 2922083484,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 39, 12),
               'last-modification-time': datetime(2013, 4, 19, 20, 39, 12),
               'notes': u'comment 4',
               'password': u'"fw6,Ll!TcCH3N&+_H>har5--Ja(f17!',
               'title': u'passphrase 4',
               'url': u'url 4',
               'username': u'username 4',
               'uuid': UUID('3f8f7151-ea1e-3f4b-5814-4ad9f6b533e7')},
              {'binary-data': '',
               'binary-description': u'',
               'creation-time': datetime(2013, 4, 19, 20, 38, 22),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 2437480029,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 38, 38),
               'last-modification-time': datetime(2013, 4, 19, 20, 38, 38),
               'notes': u'comment 2',
               'password': u"{wt_Xv'inhmSRlCpi-t}%)s}bt=8x:?^",
               'title': u'passphrase 2',
               'url': u'url 2',
               'username': u'username 2',
               'uuid': UUID('1e8f7151-cf05-ea7b-6bcd-be2bb591d496')},
              {'binary-data': '',
               'binary-description': u'',
               'creation-time': datetime(2013, 4, 19, 20, 37, 39),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 2437480029,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 38, 14),
               'last-modification-time': datetime(2013, 4, 19, 20, 38, 14),
               'notes': u'comment 1',
               'password': u"3d,,~{66JWKw'-3_yx'-cE>'h70hO%bO",
               'title': u'passphrase 1',
               'url': u'url 1',
               'username': u'username 1',
               'uuid': UUID('f38e7151-a29d-81ae-415e-e8f3e39a9592')},
              {'binary-data': '',
               'binary-description': u'',
               'creation-time': datetime(2013, 4, 19, 20, 38, 41),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 2922083484,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 38, 53),
               'last-modification-time': datetime(2013, 4, 19, 20, 38, 53),
               'notes': u'comment 3',
               'password': u"Mx\\_L]}>,B_:$2u3}(XqQ'IT^P-n8~%Q",
               'title': u'passphrase 3',
               'url': u'url 3',
               'username': u'username 3',
               'uuid': UUID('318f7151-42be-0221-dde5-343e7ed49742')},
              {'binary-data': '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
               'binary-description': u'bin-stream',
               'creation-time': datetime(2013, 4, 19, 20, 40, 5),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 2437480029,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 40, 5),
               'last-modification-time': datetime(2013, 4, 19, 20, 40, 5),
               'notes': u'KPX_CUSTOM_ICONS_4',
               'password': u'',
               'title': u'Meta-Info',
               'url': u'$',
               'username': u'SYSTEM',
               'uuid': UUID('00000000-0000-0000-0000-000000000000')},
              {'binary-data': '\x03\x00\x00\x00]\xfeH\x91\x01{\x90,\x1d\x00\x9ct+\xae\x00',
               'binary-description': u'bin-stream',
               'creation-time': datetime(2013, 4, 19, 20, 40, 5),
               'expiration-time': datetime(2999, 12, 28, 23, 59, 59),
               'group': 2437480029,
               'image-id': 0,
               'last-access-time': datetime(2013, 4, 19, 20, 40, 5),
               'last-modification-time': datetime(2013, 4, 19, 20, 40, 5),
               'notes': u'KPX_GROUP_TREE_STATE',
               'password': u'',
               'title': u'Meta-Info',
               'url': u'$',
               'username': u'SYSTEM',
               'uuid': UUID('00000000-0000-0000-0000-000000000000')}]))

Example 98

Project: teuthology Source File: kernel.py
def install_and_reboot(ctx, config):
    """
    Install and reboot the kernel.  This mostly performs remote
    installation operations.   The code does check for Arm images
    and skips grub operations if the kernel is Arm.  Otherwise, it
    extracts kernel titles from submenu entries and makes the appropriate
    grub calls.   The assumptions here are somewhat simplified in that
    it expects kernel entries to be present under submenu entries.

    :param ctx: Context
    :param config: Configuration
    """
    procs = {}
    kernel_title = ''
    for role, src in config.iteritems():
        (role_remote,) = ctx.cluster.only(role).remotes.keys()
        if isinstance(src, str) and src.find('distro') >= 0:
            log.info('Installing distro kernel on {role}...'.format(role=role))
            install_kernel(role_remote, version=src)
            continue

        log.info('Installing kernel {src} on {role}...'.format(src=src,
                                                               role=role))
        package_type = role_remote.os.package_type
        if package_type == 'rpm':
            proc = role_remote.run(
                args=[
                    'sudo',
                    'rpm',
                    '-ivh',
                    '--oldpackage',
                    '--replacefiles',
                    '--replacepkgs',
                    remote_pkg_path(role_remote),
                ])
            install_kernel(role_remote, remote_pkg_path(role_remote))
            continue

        # TODO: Refactor this into install_kernel() so that it handles all
        # cases for both rpm and deb packages.
        proc = role_remote.run(
            args=[
                # install the kernel deb
                'sudo',
                'dpkg',
                '-i',
                remote_pkg_path(role_remote),
                ],
            )

        # collect kernel image name from the .deb
        kernel_title = get_image_version(role_remote,
                                         remote_pkg_path(role_remote))
        log.info('searching for kernel {}'.format(kernel_title))

        if kernel_title.endswith("-highbank"):
            _no_grub_link('vmlinuz', role_remote, kernel_title)
            _no_grub_link('initrd.img', role_remote, kernel_title)
            proc = role_remote.run(
                args=[
                    'sudo',
                    'shutdown',
                    '-r',
                    'now',
                    ],
                wait=False,
            )
            procs[role_remote.name] = proc
            continue

        # look for menuentry for our kernel, and collect any
        # submenu entries for their titles.  Assume that if our
        # kernel entry appears later in the file than a submenu entry,
        # it's actually nested under that submenu.  If it gets more
        # complex this will totally break.

        cmdout = StringIO()
        proc = role_remote.run(
            args=[
                'egrep',
                '(submenu|menuentry.*' + kernel_title + ').*{',
                '/boot/grub/grub.cfg'
               ],
            stdout = cmdout,
            )
        submenu_title = ''
        default_title = ''
        for l in cmdout.getvalue().split('\n'):
            fields = shlex.split(l)
            if len(fields) >= 2:
                command, title = fields[:2]
                if command == 'submenu':
                    submenu_title = title + '>'
                if command == 'menuentry':
                    if title.endswith(kernel_title):
                        default_title = title
                        break
        cmdout.close()
        log.info('submenu_title:{}'.format(submenu_title))
        log.info('default_title:{}'.format(default_title))

        proc = role_remote.run(
            args=[
                # use the title(s) to construct the content of
                # the grub menu entry, so we can default to it.
                '/bin/echo',
                '-e',
                r'cat <<EOF\nset default="' + submenu_title + \
                    default_title + r'"\nEOF\n',
                # make it look like an emacs backup file so
                # unfortunately timed update-grub runs don't pick it
                # up yet; use sudo tee so we are able to write to /etc
                run.Raw('|'),
                'sudo',
                'tee',
                '--',
                '/etc/grub.d/01_ceph_kernel.tmp~',
                run.Raw('>/dev/null'),
                run.Raw('&&'),
                'sudo',
                'chmod',
                'a+x',
                '--',
                '/etc/grub.d/01_ceph_kernel.tmp~',
                run.Raw('&&'),
                'sudo',
                'mv',
                '--',
                '/etc/grub.d/01_ceph_kernel.tmp~',
                '/etc/grub.d/01_ceph_kernel',
                # update grub again so it accepts our default
                run.Raw('&&'),
                'sudo',
                'update-grub',
                run.Raw('&&'),
                'rm',
                remote_pkg_path(role_remote),
                run.Raw('&&'),
                'sudo',
                'shutdown',
                '-r',
                'now',
                ],
            wait=False,
            )
        procs[role_remote.name] = proc

    for name, proc in procs.iteritems():
        log.debug('Waiting for install on %s to complete...', name)
        proc.wait()

Example 99

Project: Petrel Source File: package.py
Function: build_jar
def build_jar(source_jar_path, dest_jar_path, config, venv=None, definition=None, logdir=None):
    """Build a StormTopology .jar which encapsulates the topology defined in
    topology_dir. Optionally override the module and function names. This
    feature supports the definition of multiple topologies in a single
    directory."""

    if definition is None:
        definition = 'create.create'

    # Prepare data we'll use later for configuring parallelism.
    config_yaml = read_yaml(config)
    parallelism = dict((k.split('.')[-1], v) for k, v in config_yaml.iteritems()
        if k.startswith('petrel.parallelism'))

    pip_options = config_yaml.get('petrel.pip_options', '')

    module_name, dummy, function_name = definition.rpartition('.')
    
    topology_dir = os.getcwd()

    # Make a copy of the input "jvmpetrel" jar. This jar acts as a generic
    # starting point for all Petrel topologies.
    source_jar_path = os.path.abspath(source_jar_path)
    dest_jar_path = os.path.abspath(dest_jar_path)
    if source_jar_path == dest_jar_path:
        raise ValueError("Error: Destination and source path are the same.")
    shutil.copy(source_jar_path, dest_jar_path)
    jar = zipfile.ZipFile(dest_jar_path, 'a', compression=zipfile.ZIP_DEFLATED)
    
    added_path_entry = False
    try:
        # Add the files listed in manifest.txt to the jar.
        with open(os.path.join(topology_dir, MANIFEST), 'r') as f:
            for fn in f.readlines():
                # Ignore blank and comment lines.
                fn = fn.strip()
                if len(fn) and not fn.startswith('#'):

                    add_item_to_jar(jar, os.path.expandvars(fn.strip()))

        # Add user and machine information to the jar.
        add_to_jar(jar, '__submitter__.yaml', '''
petrel.user: %s
petrel.host: %s
''' % (getpass.getuser(),socket.gethostname()))
        
        # Also add the topology configuration to the jar.
        with open(config, 'r') as f:
            config_text = f.read()
        add_to_jar(jar, '__topology__.yaml', config_text)
    
        # Call module_name/function_name to populate a Thrift topology object.
        builder = TopologyBuilder()
        module_dir = os.path.abspath(topology_dir)
        if module_dir not in sys.path:
            sys.path[:0] = [ module_dir ]
            added_path_entry = True
        module = __import__(module_name)
        getattr(module, function_name)(builder)

        # Add the spout and bolt Python scripts to the jar. Create a
        # setup_<script>.sh for each Python script.

        # Add Python scripts and any other per-script resources.
        for k, v in chain(builder._spouts.iteritems(), builder._bolts.iteritems()):
            add_file_to_jar(jar, topology_dir, v.script)

            # Create a bootstrap script.
            if venv is not None:
                # Allow overriding the execution command from the "petrel"
                # command line. This is handy if the server already has a
                # virtualenv set up with the necessary libraries.
                v.execution_command = os.path.join(venv, 'bin/python')

            # If a parallelism value was specified in the configuration YAML,
            # override any setting provided in the topology definition script.
            if k in parallelism:
                builder._commons[k].parallelism_hint = int(parallelism.pop(k))

            v.execution_command, v.script = \
                intercept(venv, v.execution_command, os.path.splitext(v.script)[0],
                          jar, pip_options, logdir)

        if len(parallelism):
            raise ValueError(
                'Parallelism settings error: There are no components named: %s' %
                ','.join(parallelism.keys()))

        # Build the Thrift topology object and serialize it to the .jar. Must do
        # this *after* the intercept step above since that step may modify the
        # topology definition.
        io = StringIO()
        topology = builder.write(io)
        add_to_jar(jar, 'topology.ser', io.getvalue())
    finally:
        jar.close()
        if added_path_entry:
            # Undo our sys.path change.
            sys.path[:] = sys.path[1:]

Example 100

Project: ganeti Source File: process.py
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, noclose_fds,
                input_fd, postfork_fn=None,
                _linger_timeout=constants.CHILD_LINGER_TIMEOUT):
  """Run a command and return its output.

  @type  cmd: string or list
  @param cmd: Command to run
  @type env: dict
  @param env: The environment to use
  @type via_shell: bool
  @param via_shell: if we should run via the shell
  @type cwd: string
  @param cwd: the working directory for the program
  @type interactive: boolean
  @param interactive: Run command interactive (without piping)
  @type timeout: int
  @param timeout: Timeout after the programm gets terminated
  @type noclose_fds: list
  @param noclose_fds: list of additional (fd >=3) file descriptors to leave
                      open for the child process
  @type input_fd: C{file}-like object containing an actual file descriptor
                  or numeric file descriptor
  @param input_fd: File descriptor for process' standard input
  @type postfork_fn: Callable receiving PID as parameter
  @param postfork_fn: Function run after fork but before timeout
  @rtype: tuple
  @return: (out, err, status)

  """
  poller = select.poll()

  if interactive:
    stderr = None
    stdout = None
  else:
    stderr = subprocess.PIPE
    stdout = subprocess.PIPE

  if input_fd:
    stdin = input_fd
  elif interactive:
    stdin = None
  else:
    stdin = subprocess.PIPE

  if noclose_fds:
    preexec_fn = lambda: CloseFDs(noclose_fds)
    close_fds = False
  else:
    preexec_fn = None
    close_fds = True

  child = subprocess.Popen(cmd, shell=via_shell,
                           stderr=stderr,
                           stdout=stdout,
                           stdin=stdin,
                           close_fds=close_fds, env=env,
                           cwd=cwd,
                           preexec_fn=preexec_fn)

  if postfork_fn:
    postfork_fn(child.pid)

  out = StringIO()
  err = StringIO()

  linger_timeout = None

  if timeout is None:
    poll_timeout = None
  else:
    poll_timeout = utils_algo.RunningTimeout(timeout, True).Remaining

  msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
                 (cmd, child.pid))
  msg_linger = ("Command %s (%d) run into linger timeout, killing" %
                (cmd, child.pid))

  timeout_action = _TIMEOUT_NONE

  # subprocess: "If the stdin argument is PIPE, this attribute is a file object
  # that provides input to the child process. Otherwise, it is None."
  assert (stdin == subprocess.PIPE) ^ (child.stdin is None), \
    "subprocess' stdin did not behave as docuemented"

  if not interactive:
    if child.stdin is not None:
      child.stdin.close()
    poller.register(child.stdout, select.POLLIN)
    poller.register(child.stderr, select.POLLIN)
    fdmap = {
      child.stdout.fileno(): (out, child.stdout),
      child.stderr.fileno(): (err, child.stderr),
      }
    for fd in fdmap:
      utils_wrapper.SetNonblockFlag(fd, True)

    while fdmap:
      if poll_timeout:
        pt = poll_timeout() * 1000
        if pt < 0:
          if linger_timeout is None:
            logging.warning(msg_timeout)
            if child.poll() is None:
              timeout_action = _TIMEOUT_TERM
              utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid,
                                                  signal.SIGTERM)
            linger_timeout = \
              utils_algo.RunningTimeout(_linger_timeout, True).Remaining
          pt = linger_timeout() * 1000
          if pt < 0:
            break
      else:
        pt = None

      pollresult = utils_wrapper.RetryOnSignal(poller.poll, pt)

      for fd, event in pollresult:
        if event & select.POLLIN or event & select.POLLPRI:
          data = fdmap[fd][1].read()
          # no data from read signifies EOF (the same as POLLHUP)
          if not data:
            poller.unregister(fd)
            del fdmap[fd]
            continue
          fdmap[fd][0].write(data)
        if (event & select.POLLNVAL or event & select.POLLHUP or
            event & select.POLLERR):
          poller.unregister(fd)
          del fdmap[fd]

  if timeout is not None:
    assert callable(poll_timeout)

    # We have no I/O left but it might still run
    if child.poll() is None:
      _WaitForProcess(child, poll_timeout())

    # Terminate if still alive after timeout
    if child.poll() is None:
      if linger_timeout is None:
        logging.warning(msg_timeout)
        timeout_action = _TIMEOUT_TERM
        utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
        lt = _linger_timeout
      else:
        lt = linger_timeout()
      _WaitForProcess(child, lt)

    # Okay, still alive after timeout and linger timeout? Kill it!
    if child.poll() is None:
      timeout_action = _TIMEOUT_KILL
      logging.warning(msg_linger)
      utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)

  out = out.getvalue()
  err = err.getvalue()

  status = child.wait()
  return out, err, status, timeout_action
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3