StringIO.StringIO

Here are the examples of the python api StringIO.StringIO taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

143 Examples 7

Example 51

Project: python-ilorest-library Source File: v1.py
    def _rest_request(self, path, method='GET', args=None, body=None, \
                    headers=None, optionalpassword=None, providerheader=None):
        """Rest request main function

        :param path: path within tree
        :type path: str
        :param method: method to be implemented
        :type method: str
        :param args: the arguments for method
        :type args: dict
        :param body: body payload for the rest call
        :type body: dict
        :param headers: provide additional headers
        :type headers: dict
        :param optionalpassword: provide password for authentication
        :type optionalpassword: str
        :param provideheader: provider id for the header
        :type providerheader: str
        :returns: returns a RestResponse object

        """
        headers = self._get_req_headers(headers, providerheader, \
                                                            optionalpassword)
        reqpath = path.replace('//', '/')

        if body is not None:
            if isinstance(body, dict) or isinstance(body, list):
                headers['Content-Type'] = u'application/json'
                body = json.dumps(body)
            else:
                headers['Content-Type'] = u'application/x-www-form-urlencoded'
                body = urllib.urlencode(body)

            if method == 'PUT':
                resp = self._rest_request(path=path)

                try:
                    if resp.getheader('content-encoding') == 'gzip':
                        buf = StringIO()
                        gfile = gzip.GzipFile(mode='wb', fileobj=buf)

                        try:
                            gfile.write(str(body))
                        finally:
                            gfile.close()

                        compresseddata = buf.getvalue()
                        if compresseddata:
                            data = bytearray()
                            data.extend(buffer(compresseddata))
                            body = data
                except BaseException as excp:
                    LOGGER.error('Error occur while compressing body: %s', excp)
                    raise

            headers['Content-Length'] = len(body)

        if args:
            if method == 'GET':
                reqpath += '?' + urllib.urlencode(args)
            elif method == 'PUT' or method == 'POST' or method == 'PATCH':
                headers['Content-Type'] = u'application/x-www-form-urlencoded'
                body = urllib.urlencode(args)

        restreq = RestRequest(reqpath, method=method, body=body)

        attempts = 0
        while attempts < self.MAX_RETRY:
            if logging.getLogger().isEnabledFor(logging.DEBUG):
                try:
                    LOGGER.debug('HTTP REQUEST: %s\n\tPATH: %s\n\tBODY: %s'% \
                                (restreq.method, restreq.path, restreq.body))
                except:
                    LOGGER.debug('HTTP REQUEST: %s\n\tPATH: %s\n\tBODY: %s'% \
                                (restreq.method, restreq.path, 'binary body'))
            attempts = attempts + 1
            LOGGER.info('Attempt %s of %s', attempts, path)

            try:
                while True:
                    if self._conn is None:
                        self.__init_connection()

                    self._conn.request(method.upper(), reqpath, body=body, \
                                                                headers=headers)
                    self._conn_count += 1

                    inittime = time.clock()
                    resp = self._conn.getresponse()
                    endtime = time.clock()
                    LOGGER.info('Response Time to %s: %s seconds.'% \
                                        (restreq.path, str(endtime-inittime)))

                    if resp.getheader('Connection') == 'close':
                        self.__destroy_connection()
                    if resp.status not in range(300, 399) or \
                                                            resp.status == 304:
                        break

                    newloc = resp.getheader('location')
                    newurl = urlparse2.urlparse(newloc)

                    reqpath = newurl.path
                    self.__init_connection(newurl)

                restresp = RestResponse(restreq, resp)

                try:
                    if restresp.getheader('content-encoding') == "gzip":
                        compressedfile = StringIO(restresp.text)
                        decompressedfile = gzip.GzipFile(fileobj=compressedfile)
                        restresp.text = decompressedfile.read()
                except Exception as excp:
                    LOGGER.error('Error occur while decompressing body: %s', \
                                                                        excp)
                    raise DecompressResponseError()
            except Exception as excp:
                if isinstance(excp, DecompressResponseError):
                    raise

                LOGGER.info('Retrying %s [%s]'% (path, excp))
                time.sleep(1)

                self.__init_connection()
                continue
            else:
                break

        self.__destroy_connection()
        if attempts < self.MAX_RETRY:
            if logging.getLogger().isEnabledFor(logging.DEBUG):
                headerstr = ''

                for header in restresp._http_response.msg.headers:
                    headerstr += '\t' + header.rstrip() + '\n'

                try:
                    LOGGER.debug('HTTP RESPONSE for %s:\nCode: %s\nHeaders:\n' \
                             '%s\nBody Response of %s: %s'%\
                             (restresp.request.path,\
                            str(restresp._http_response.status)+ ' ' + \
                            restresp._http_response.reason, \
                            headerstr, restresp.request.path, restresp.read))
                except:
                    LOGGER.debug('HTTP RESPONSE:\nCode:%s', (restresp))

            return restresp
        else:
            raise RetriesExhaustedError()

Example 52

Project: word_cloud Source File: gen_rst.py
Function: generate_file_rst
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
    """ Generate the rst file for a given example.
    """
    base_image_name = os.path.splitext(fname)[0]
    image_fname = '%s_%%s.png' % base_image_name

    this_template = rst_template
    last_dir = os.path.split(src_dir)[-1]
    # to avoid leading . in file names, and wrong names in links
    if last_dir == '.' or last_dir == 'examples':
        last_dir = ''
    else:
        last_dir += '_'
    short_fname = last_dir + fname
    src_file = os.path.join(src_dir, fname)
    example_file = os.path.join(target_dir, fname)
    shutil.copyfile(src_file, example_file)

    # The following is a list containing all the figure names
    figure_list = []

    image_dir = os.path.join(target_dir, 'images')
    thumb_dir = os.path.join(image_dir, 'thumb')
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(thumb_dir):
        os.makedirs(thumb_dir)
    image_path = os.path.join(image_dir, image_fname)
    stdout_path = os.path.join(image_dir,
                               'stdout_%s.txt' % base_image_name)
    time_path = os.path.join(image_dir,
                               'time_%s.txt' % base_image_name)
    thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
    time_elapsed = 0
    if plot_gallery:
        # generate the plot as png image if it is more recent than an
        # existing image.
        first_image_file = image_path % 1
        if os.path.exists(stdout_path):
            stdout = open(stdout_path).read()
        else:
            stdout = ''
        if os.path.exists(time_path):
            time_elapsed = float(open(time_path).read())

        if (not os.path.exists(first_image_file) or
                os.stat(first_image_file).st_mtime <=
                                    os.stat(src_file).st_mtime):
            # We need to execute the code
            print 'plotting %s' % fname
            t0 = time()
            import matplotlib.pyplot as plt
            plt.close('all')
            cwd = os.getcwd()
            try:
                # First CD in the original example dir, so that any file
                # created by the example get created in this directory
                orig_stdout = sys.stdout
                os.chdir(os.path.dirname(src_file))
                my_buffer = StringIO()
                my_stdout = Tee(sys.stdout, my_buffer)
                sys.stdout = my_stdout
                my_globals = {'pl': plt, '__file__': src_file}
                execfile(os.path.basename(src_file), my_globals)
                time_elapsed = time() - t0
                sys.stdout = orig_stdout
                my_stdout = my_buffer.getvalue()

                # get variables so we can later add links to the docuementation
                example_code_obj = {}
                for var_name, var in my_globals.iteritems():
                    if not hasattr(var, '__module__'):
                        continue
                    if not isinstance(var.__module__, basestring):
                        continue
                    if var.__module__.split('.')[0] not in DOCMODULES:
                        continue

                    # get the type as a string with other things stripped
                    tstr = str(type(var))
                    tstr = (tstr[tstr.find('\'')
                            + 1:tstr.rfind('\'')].split('.')[-1])
                    # get shortened module name
                    module_short = get_short_module_name(var.__module__,
                                                         tstr)
                    cobj = {'name': tstr, 'module': var.__module__,
                            'module_short': module_short,
                            'obj_type': 'object'}
                    example_code_obj[var_name] = cobj

                # find functions so we can later add links to the docuementation
                funregex = re.compile('[\w.]+\(')
                with open(src_file, 'rt') as fid:
                    for line in fid.readlines():
                        if line.startswith('#'):
                            continue
                        for match in funregex.findall(line):
                            fun_name = match[:-1]
                            try:
                                exec('this_fun = %s' % fun_name, my_globals)
                            except Exception:
                                #print 'extracting function failed'
                                #print err
                                continue
                            this_fun = my_globals['this_fun']
                            if not callable(this_fun):
                                continue
                            if not hasattr(this_fun, '__module__'):
                                continue
                            if not isinstance(this_fun.__module__, basestring):
                                continue
                            if (this_fun.__module__.split('.')[0]
                                    not in DOCMODULES):
                                continue

                            # get shortened module name
                            fun_name_short = fun_name.split('.')[-1]
                            module_short = get_short_module_name(
                                this_fun.__module__, fun_name_short)
                            cobj = {'name': fun_name_short,
                                    'module': this_fun.__module__,
                                    'module_short': module_short,
                                    'obj_type': 'function'}
                            example_code_obj[fun_name] = cobj
                fid.close()

                if len(example_code_obj) > 0:
                    # save the dictionary, so we can later add hyperlinks
                    codeobj_fname = example_file[:-3] + '_codeobj.pickle'
                    with open(codeobj_fname, 'wb') as fid:
                        cPickle.dump(example_code_obj, fid,
                                     cPickle.HIGHEST_PROTOCOL)
                    fid.close()

                if '__doc__' in my_globals:
                    # The __doc__ is often printed in the example, we
                    # don't with to echo it
                    my_stdout = my_stdout.replace(
                                            my_globals['__doc__'],
                                            '')
                my_stdout = my_stdout.strip()
                if my_stdout:
                    stdout = '**Script output**::\n\n  %s\n\n' % (
                        '\n  '.join(my_stdout.split('\n')))
                open(stdout_path, 'w').write(stdout)
                open(time_path, 'w').write('%f' % time_elapsed)
                os.chdir(cwd)

                # In order to save every figure we have two solutions :
                # * iterate from 1 to infinity and call plt.fignum_exists(n)
                #   (this requires the figures to be numbered
                #    incrementally: 1, 2, 3 and not 1, 2, 5)
                # * iterate over [fig_mngr.num for fig_mngr in
                #   matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
                for fig_num in (fig_mngr.num for fig_mngr in
                        matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
                    # Set the fig_num figure as the current figure as we can't
                    # save a figure that's not the current figure.
                    plt.figure(fig_num)
                    plt.savefig(image_path % fig_num)
                    figure_list.append(image_fname % fig_num)
            except:
                print 80 * '_'
                print '%s is not compiling:' % fname
                traceback.print_exc()
                print 80 * '_'
            finally:
                os.chdir(cwd)
                sys.stdout = orig_stdout

            print " - time elapsed : %.2g sec" % time_elapsed
        else:
            figure_list = [f[len(image_dir):]
                            for f in glob.glob(image_path % '[1-9]')]
                            #for f in glob.glob(image_path % '*')]

        # generate thumb file
        this_template = plot_rst_template
        if os.path.exists(first_image_file):
            make_thumbnail(first_image_file, thumb_file, 200, 140)

    if not os.path.exists(thumb_file):
        # create something to replace the thumbnail
        make_thumbnail('images/no_image.png', thumb_file, 200, 140)

    docstring, short_desc, end_row = extract_docstring(example_file)

    # Depending on whether we have one or more figures, we're using a
    # horizontal list or a single rst call to 'image'.
    if len(figure_list) == 1:
        figure_name = figure_list[0]
        image_list = SINGLE_IMAGE % figure_name.lstrip('/')
    else:
        image_list = HLIST_HEADER
        for figure_name in figure_list:
            image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')

    f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
    f.write(this_template % locals())
    f.flush()

Example 53

Project: nudge Source File: publisher.py
    def __call__(self, environ, start_response):
        '''
            This is called by each request to the server.
            This MUST return a valid HTTP response under all circuemstances.

            We expect environ to be a valid wgsi python dictionary.
        '''
        req = WSGIRequest(environ)

#        if isinstance(environ, types.DictType):
#            req = WSGIRequest(environ)
#        else:
#            req = environ

        # main exception handler to ensure client gets valid response.
        # defer any mutation of the request object (incl. writes to the client)
        # until you're sure all exception prone activities have been performed
        # successfully (aka: "basic exception guarantee")
        code = None
        final_content = ""
        endpoint = None
        try:
            # allow '_method' query arg to override method
            method = req.method
            if '_method' in req.arguments:
                method = req.arguments['_method'][0].upper()
                del req.arguments['_method']

            # find appropriate endpoint
            reqline = method + urllib.unquote(req.path)
            match = None
            for endpoint in self._endpoints:
                match = endpoint.match(reqline)
                if match:
                    break

            if not match:
                if self._fallbackapp:
                    _log.debug("Using fallback app for request: (%s) (%s)" % \
                               (method, req.uri))
                    # Since recreating a stringio from the request body can be
                    # expensive, we only want to do this if we fallback.
                    environ['wsgi.input'] = StringIO.StringIO(req.body)
                    return self._fallbackapp(environ, start_response)
                else:
                    raise HTTPException(404)

            # convert all values in req.arguments from lists to scalars,
            # then combine with path args.
            arguments = dict((k, v[0]) for k, v in req.arguments.iteritems()\
                if isinstance(v, list) and len(v) > 0)
            inargs = dict(match.groupdict(), **arguments)

            # compile positional arguments
            args = []
            for arg in endpoint.sequential:
                args.append(arg.argspec(req, inargs))

            # compile keyword arguments
            kwargs = {}
            for argname, arg in endpoint.named.iteritems():
                r = arg.argspec(req, inargs)
                if r != None:
                    kwargs[argname] = r

            # invoke the service endpoint
            result = endpoint(*args, **kwargs)

            # TODO make sure this works with unicode
            # This is real, super annoying, lets only use if in debug mode
            if self._debug:
                _log.debug(_gen_trace_str(
                    endpoint.function, args, kwargs, result))

            if isinstance(endpoint.renderer, RequestAwareRenderer):
                r = endpoint.renderer(req, result)
            else:
                r = endpoint.renderer(result)
            content, content_type, code, extra_headers = \
                r.content, r.content_type, r.http_status, r.headers

        except (Exception), e:
            error_response = None
            logged_trace = False
            #
            # Try to use this endpoint's exception handler(s)
            # If the raised exception is not mapped in this endpoint, or
            # this endpoint raises an exception when trying to handle,
            # we will then try to the default handler, and ultimately
            # fallback to the self._options.default_error_response, which
            # is guaranteed to be valid at app initialization.
            #
            if endpoint and endpoint.exceptions:
                try:
                    error_response = handle_exception(
                        e, endpoint.exceptions,
                        default_handler=self._options.default_error_handler
                    )
                    if not error_response:
                        raise
                except (Exception), e:
                    _log.exception(
                        "Endpoint (%s) failed to handle exception" % endpoint.name
                    )
                    logged_trace = True

            if not error_response:
                try:
                    # Try one more time to handle a base exception
                    handler = self._options.default_error_handler()
                    error_response = handler(e, req)
                except (Exception), e:
                    _log.error(
                        "Default error handler failed to handle exception")
                    if logged_trace is False:
                        _log.exception(e)

            code, content_type, content, extra_headers = \
                error_response or self._options.default_error_response

        final_content = _finish_request(
            req,
            start_response,
            code,
            content_type,
            content,
            extra_headers
        )

        return [final_content + "\r\n"]

Example 54

Project: python-compat-runtime Source File: urlfetch_stub.py
  @staticmethod
  def _RetrieveURL(url, payload, method, headers, request, response,
                   follow_redirects=True, deadline=_API_CALL_DEADLINE,
                   validate_certificate=_API_CALL_VALIDATE_CERTIFICATE_DEFAULT):
    """Retrieves a URL over network.

    Args:
      url: String containing the URL to access.
      payload: Request payload to send, if any; None if no payload.
        If the payload is unicode, we assume it is utf-8.
      method: HTTP method to use (e.g., 'GET')
      headers: List of additional header objects to use for the request.
      request: A urlfetch_service_pb.URLFetchRequest proto object from
          original request.
      response: A urlfetch_service_pb.URLFetchResponse proto object to
          populate with the response data.
      follow_redirects: optional setting (defaulting to True) for whether or not
        we should transparently follow redirects (up to MAX_REDIRECTS)
      deadline: Number of seconds to wait for the urlfetch to finish.
      validate_certificate: If true, do not send request to server unless the
        certificate is valid, signed by a trusted CA and the hostname matches
        the certificate.

    Raises:
      Raises an apiproxy_errors.ApplicationError exception with
      INVALID_URL_ERROR in cases where:
        - The protocol of the redirected URL is bad or missing.
        - The port is not in the allowable range of ports.
      Raises an apiproxy_errors.ApplicationError exception with
      TOO_MANY_REDIRECTS in cases when MAX_REDIRECTS is exceeded
    """
    last_protocol = ''
    last_host = ''
    if isinstance(payload, unicode):
      payload = payload.encode('utf-8')

    for redirect_number in xrange(MAX_REDIRECTS + 1):
      parsed = urlparse.urlsplit(url)
      protocol, host, path, query, fragment = parsed







      port = urllib.splitport(urllib.splituser(host)[1])[1]

      if not _IsAllowedPort(port):
        logging.error(
          'urlfetch received %s ; port %s is not allowed in production!' %
          (url, port))





        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.INVALID_URL)

      if protocol and not host:

        logging.error('Missing host on redirect; target url is %s' % url)
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.INVALID_URL)




      if not host and not protocol:
        host = last_host
        protocol = last_protocol






      adjusted_headers = {
          'User-Agent':
          ('AppEngine-Google; (+http://code.google.com/appengine; appid: %s)'
           % os.getenv('APPLICATION_ID')),
          'Host': host,
          'Accept-Encoding': 'gzip',
      }
      if payload is not None:


        adjusted_headers['Content-Length'] = str(len(payload))


      if method == 'POST' and payload:
        adjusted_headers['Content-Type'] = 'application/x-www-form-urlencoded'

      passthrough_content_encoding = False
      for header in headers:
        if header.key().title().lower() == 'user-agent':
          adjusted_headers['User-Agent'] = (
              '%s %s' %
              (header.value(), adjusted_headers['User-Agent']))
        else:
          if header.key().lower() == 'accept-encoding':
            passthrough_content_encoding = True
          adjusted_headers[header.key().title()] = header.value()

      if payload is not None:
        escaped_payload = payload.encode('string_escape')
      else:
        escaped_payload = ''
      logging.debug('Making HTTP request: host = %r, '
                    'url = %r, payload = %.1000r, headers = %r',
                    host, url, escaped_payload, adjusted_headers)
      try:
        proxy_host = None

        if protocol == 'http':
          connection_class = httplib.HTTPConnection
          default_port = 80

          if os.environ.get('HTTP_PROXY') and not _IsLocalhost(host):
            _, proxy_host, _, _, _ = (
                urlparse.urlsplit(os.environ.get('HTTP_PROXY')))
        elif protocol == 'https':
          if (validate_certificate and _CanValidateCerts() and
              CERT_PATH):

            connection_class = fancy_urllib.create_fancy_connection(
                ca_certs=CERT_PATH)
          else:
            connection_class = httplib.HTTPSConnection

          default_port = 443

          if (_CONNECTION_SUPPORTS_SSL_TUNNEL and
              os.environ.get('HTTPS_PROXY') and not _IsLocalhost(host)):
            _, proxy_host, _, _, _ = (
                urlparse.urlsplit(os.environ.get('HTTPS_PROXY')))
        else:

          error_msg = 'Redirect specified invalid protocol: "%s"' % protocol
          logging.error(error_msg)
          raise apiproxy_errors.ApplicationError(
              urlfetch_service_pb.URLFetchServiceError.INVALID_URL, error_msg)






        connection_kwargs = (
            {'timeout': deadline} if _CONNECTION_SUPPORTS_TIMEOUT else {})

        if proxy_host:
          proxy_address, _, proxy_port = proxy_host.partition(':')
          connection = connection_class(
              proxy_address, proxy_port if proxy_port else default_port,
              **connection_kwargs)
          full_path = urlparse.urlunsplit((protocol, host, path, query, ''))

          if protocol == 'https':
            connection.set_tunnel(host)
        else:
          connection = connection_class(host, **connection_kwargs)
          full_path = urlparse.urlunsplit(('', '', path, query, ''))



        last_protocol = protocol
        last_host = host

        if not _CONNECTION_SUPPORTS_TIMEOUT:
          orig_timeout = socket.getdefaulttimeout()
        try:
          if not _CONNECTION_SUPPORTS_TIMEOUT:


            socket.setdefaulttimeout(deadline)
          connection.request(method, full_path, payload, adjusted_headers)
          http_response = connection.getresponse()
          if method == 'HEAD':
            http_response_data = ''
          else:
            http_response_data = http_response.read()
        finally:
          if not _CONNECTION_SUPPORTS_TIMEOUT:
            socket.setdefaulttimeout(orig_timeout)
          connection.close()
      except _fancy_urllib_InvalidCertException, e:
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.SSL_CERTIFICATE_ERROR,
          str(e))
      except _fancy_urllib_SSLError, e:





        app_error = (
            urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED
            if 'timed out' in e.message else
            urlfetch_service_pb.URLFetchServiceError.SSL_CERTIFICATE_ERROR)
        raise apiproxy_errors.ApplicationError(app_error, str(e))
      except socket.timeout, e:
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED, str(e))
      except (httplib.error, socket.error, IOError), e:
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, str(e))




      if http_response.status in REDIRECT_STATUSES and follow_redirects:

        url = http_response.getheader('Location', None)
        if url is None:
          error_msg = 'Redirecting response was missing "Location" header'
          logging.error(error_msg)
          raise apiproxy_errors.ApplicationError(
              urlfetch_service_pb.URLFetchServiceError.MALFORMED_REPLY,
              error_msg)



        if (http_response.status != httplib.TEMPORARY_REDIRECT and
            method not in PRESERVE_ON_REDIRECT):
          logging.warn('Received a %s to a %s. Redirecting with a GET',
                       http_response.status, method)
          method = 'GET'
          payload = None
      else:
        response.set_statuscode(http_response.status)
        if (http_response.getheader('content-encoding') == 'gzip' and
            not passthrough_content_encoding):
          gzip_stream = StringIO.StringIO(http_response_data)
          gzip_file = gzip.GzipFile(fileobj=gzip_stream)
          http_response_data = gzip_file.read()
        response.set_content(http_response_data[:MAX_RESPONSE_SIZE])


        for header_key in http_response.msg.keys():
          for header_value in http_response.msg.getheaders(header_key):
            if (header_key.lower() == 'content-encoding' and
                header_value == 'gzip' and
                not passthrough_content_encoding):
              continue
            if header_key.lower() == 'content-length' and method != 'HEAD':
              header_value = str(len(response.content()))
            header_proto = response.add_header()
            header_proto.set_key(header_key)
            header_proto.set_value(header_value)

        if len(http_response_data) > MAX_RESPONSE_SIZE:
          response.set_contentwastruncated(True)



        if request.url() != url:
          response.set_finalurl(url)


        break
    else:
      error_msg = 'Too many repeated redirects'
      logging.error(error_msg)
      raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.TOO_MANY_REDIRECTS,
          error_msg)

Example 55

Project: wolnelektury Source File: models.py
    @classmethod
    def from_xml_file(cls, xml_file, image_file=None, image_store=None, overwrite=False):
        """
        Import xml and it's accompanying image file.
        If image file is missing, it will be fetched by librarian.picture.ImageStore
        which looks for an image file in the same directory the xml is, with extension matching
        its mime type.
        """
        from sortify import sortify
        from django.core.files import File
        from librarian.picture import WLPicture, ImageStore
        close_xml_file = False
        close_image_file = False

        if image_file is not None and not isinstance(image_file, File):
            image_file = File(open(image_file))
            close_image_file = True

        if not isinstance(xml_file, File):
            xml_file = File(open(xml_file))
            close_xml_file = True

        with transaction.atomic():
            # use librarian to parse meta-data
            if image_store is None:
                image_store = ImageStore(picture_storage.path('images'))
            picture_xml = WLPicture.from_file(xml_file, image_store=image_store)

            picture, created = Picture.objects.get_or_create(slug=picture_xml.slug[:120])
            if not created and not overwrite:
                raise Picture.AlreadyExists('Picture %s already exists' % picture_xml.slug)

            picture.areas.all().delete()
            picture.title = unicode(picture_xml.picture_info.title)
            picture.extra_info = picture_xml.picture_info.to_dict()

            picture_tags = set(catalogue.models.Tag.tags_from_info(picture_xml.picture_info))
            motif_tags = set()
            thing_tags = set()

            area_data = {'themes': {}, 'things': {}}

            # Treat all names in picture XML as in default language.
            lang = settings.LANGUAGE_CODE

            for part in picture_xml.partiter():
                if picture_xml.frame:
                    c = picture_xml.frame[0]
                    part['coords'] = [[p[0] - c[0], p[1] - c[1]] for p in part['coords']]
                if part.get('object', None) is not None:
                    _tags = set()
                    for objname in part['object'].split(','):
                        objname = objname.strip().capitalize()
                        tag, created = catalogue.models.Tag.objects.get_or_create(
                            slug=slughifi(objname), category='thing')
                        if created:
                            tag.name = objname
                            setattr(tag, 'name_%s' % lang, tag.name)
                            tag.sort_key = sortify(tag.name)
                            tag.save()
                        # thing_tags.add(tag)
                        area_data['things'][tag.slug] = {
                            'object': objname,
                            'coords': part['coords'],
                            }

                        _tags.add(tag)
                    area = PictureArea.rectangle(picture, 'thing', part['coords'])
                    area.save()
                    area.tags = _tags
                else:
                    _tags = set()
                    for motifs in part['themes']:
                        for motif in motifs.split(','):
                            tag, created = catalogue.models.Tag.objects.get_or_create(
                                slug=slughifi(motif), category='theme')
                            if created:
                                tag.name = motif
                                tag.sort_key = sortify(tag.name)
                                tag.save()
                            # motif_tags.add(tag)
                            _tags.add(tag)
                            area_data['themes'][tag.slug] = {
                                'theme': motif,
                                'coords': part['coords']
                                }

                    logging.debug("coords for theme: %s" % part['coords'])
                    area = PictureArea.rectangle(picture, 'theme', part['coords'])
                    area.save()
                    area.tags = _tags.union(picture_tags)

            picture.tags = picture_tags.union(motif_tags).union(thing_tags)
            picture.areas_json = area_data

            if image_file is not None:
                img = image_file
            else:
                img = picture_xml.image_file()

            modified = cls.crop_to_frame(picture_xml, img)
            modified = cls.add_source_note(picture_xml, modified)

            picture.width, picture.height = modified.size

            modified_file = StringIO()
            modified.save(modified_file, format='JPEG', quality=95)
            # FIXME: hardcoded extension - detect from DC format or orginal filename
            picture.image_file.save(path.basename(picture_xml.image_path), File(modified_file))

            picture.xml_file.save("%s.xml" % picture.slug, File(xml_file))
            picture.save()
            tasks.generate_picture_html(picture.id)

        if close_xml_file:
            xml_file.close()
        if close_image_file:
            image_file.close()

        return picture

Example 56

Project: DIRAC Source File: Job.py
  def _toJDL( self, xmlFile = '', jobDescriptionObject = None ):  # messy but need to account for xml file being in /tmp/guid dir
    """Creates a JDL representation of itself as a Job.
    """
    #Check if we have to do old bootstrap...
    classadJob = ClassAd( '[]' )

    paramsDict = {}
    params = self.workflow.parameters # ParameterCollection object

    paramList = params
    for param in paramList:
      paramsDict[param.getName()] = {'type':param.getType(), 'value':param.getValue()}

    arguments = []
    scriptName = 'jobDescription.xml'

    if jobDescriptionObject is None:
      # if we are here it's because there's a real file, on disk, that is named 'jobDescription.xml'
      if self.script:
        if os.path.exists( self.script ):
          scriptName = os.path.abspath( self.script )
          self.log.verbose( 'Found script name %s' % scriptName )
        else:
          self.log.error( "File not found", self.script )
      else:
        if xmlFile:
          self.log.verbose( 'Found XML File %s' % xmlFile )
          scriptName = xmlFile
      self.addToInputSandbox.append( scriptName )

    elif isinstance( jobDescriptionObject, StringIO.StringIO ):
      self.log.verbose( "jobDescription is passed in as a StringIO object" )

    else:
      self.log.error( "Where's the job description?" )

    arguments.append( os.path.basename( scriptName ) )
    if paramsDict.has_key( 'LogLevel' ):
      if paramsDict['LogLevel']['value']:
        arguments.append( '-o LogLevel=%s' % ( paramsDict['LogLevel']['value'] ) )
      else:
        self.log.warn( 'Job LogLevel defined with null value' )
    if paramsDict.has_key( 'DIRACSetup' ):
      if paramsDict['DIRACSetup']['value']:
        arguments.append( '-o DIRAC/Setup=%s' % ( paramsDict['DIRACSetup']['value'] ) )
      else:
        self.log.warn( 'Job DIRACSetup defined with null value' )
    if paramsDict.has_key( 'JobMode' ):
      if paramsDict['JobMode']['value']:
        arguments.append( '-o JobMode=%s' % ( paramsDict['JobMode']['value'] ) )
      else:
        self.log.warn( 'Job Mode defined with null value' )
    if paramsDict.has_key( 'JobConfigArgs' ):
      if paramsDict['JobConfigArgs']['value']:
        arguments.append( '%s' % ( paramsDict['JobConfigArgs']['value'] ) )
      else:
        self.log.warn( 'JobConfigArgs defined with null value' )
    if self.parametricWFArguments:
      for name, value in self.parametricWFArguments.items():
        arguments.append( "-p %s='%s'" % ( name, value ) )

    classadJob.insertAttributeString( 'Executable', self.executable )
    self.addToOutputSandbox.append( self.stderr )
    self.addToOutputSandbox.append( self.stdout )

    #Extract i/o sandbox parameters from steps and any input data parameters
    #to do when introducing step-level api...

    #To add any additional files to input and output sandboxes
    if self.addToInputSandbox:
      extraFiles = ';'.join( self.addToInputSandbox )
      if paramsDict.has_key( 'InputSandbox' ):
        currentFiles = paramsDict['InputSandbox']['value']
        finalInputSandbox = currentFiles + ';' + extraFiles
        uniqueInputSandbox = uniqueElements( finalInputSandbox.split( ';' ) )
        paramsDict['InputSandbox']['value'] = ';'.join( uniqueInputSandbox )
        self.log.verbose( 'Final unique Input Sandbox %s' % ( ';'.join( uniqueInputSandbox ) ) )
      else:
        paramsDict['InputSandbox'] = {}
        paramsDict['InputSandbox']['value'] = extraFiles
        paramsDict['InputSandbox']['type'] = 'JDL'

    if self.addToOutputSandbox:
      extraFiles = ';'.join( self.addToOutputSandbox )
      if paramsDict.has_key( 'OutputSandbox' ):
        currentFiles = paramsDict['OutputSandbox']['value']
        finalOutputSandbox = currentFiles + ';' + extraFiles
        uniqueOutputSandbox = uniqueElements( finalOutputSandbox.split( ';' ) )
        paramsDict['OutputSandbox']['value'] = ';'.join( uniqueOutputSandbox )
        self.log.verbose( 'Final unique Output Sandbox %s' % ( ';'.join( uniqueOutputSandbox ) ) )
      else:
        paramsDict['OutputSandbox'] = {}
        paramsDict['OutputSandbox']['value'] = extraFiles
        paramsDict['OutputSandbox']['type'] = 'JDL'

    if self.addToInputData:
      extraFiles = ';'.join( self.addToInputData )
      if paramsDict.has_key( 'InputData' ):
        currentFiles = paramsDict['InputData']['value']
        finalInputData = extraFiles
        if currentFiles:
          finalInputData = currentFiles + ';' + extraFiles
        uniqueInputData = uniqueElements( finalInputData.split( ';' ) )
        paramsDict['InputData']['value'] = ';'.join( uniqueInputData )
        self.log.verbose( 'Final unique Input Data %s' % ( ';'.join( uniqueInputData ) ) )
      else:
        paramsDict['InputData'] = {}
        paramsDict['InputData']['value'] = extraFiles
        paramsDict['InputData']['type'] = 'JDL'

    # Handle parameter sequences
    if self.numberOfParameters > 0:
      paramsDict, arguments = self._handleParameterSequences( paramsDict, arguments )

    classadJob.insertAttributeString( 'Arguments', ' '.join( arguments ) )

    #Add any JDL parameters to classad obeying lists with ';' rule
    for name, props in paramsDict.iteritems():
      ptype = props['type']
      value = props['value']
      if isinstance( value, basestring) and re.search( ';', value ):
        value = value.split( ';' )
      if name.lower() == 'requirements' and ptype == 'JDL':
        self.log.verbose( 'Found existing requirements: %s' % ( value ) )

      if re.search( '^JDL', ptype ):
        if isinstance( value, list ):
          if isinstance( value[0], list ):
            classadJob.insertAttributeVectorStringList( name, value )
          else:
            classadJob.insertAttributeVectorInt( name, value )
        elif isinstance( value, basestring ) and value:
          classadJob.insertAttributeInt( name, value )
        elif isinstance( value, ( int, long, float ) ):
          classadJob.insertAttributeInt( name, value )

    if self.numberOfParameters > 0:
      classadJob.insertAttributeInt( 'Parameters', self.numberOfParameters )

    for fToBeRemoved in [scriptName, self.stdout, self.stderr]:
      try:
        self.addToInputSandbox.remove( fToBeRemoved )
      except ValueError:
        pass

    jdl = classadJob.asJDL()
    start = jdl.find( '[' )
    end = jdl.rfind( ']' )
    return jdl[( start + 1 ):( end - 1 )]

Example 57

Project: kay Source File: media_compiler.py
def compile_js_(tag_name, js_config, force):
  if IS_APPSERVER:
    return

  def needs_update(media_info):
    if js_config['tool'] != 'goog_calcdeps':
      # update if target file does not exist
      target_path = make_output_path_(js_config, js_config['subdir'],
                                      js_config['output_filename'])
      if not os.path.exists(target_path):
        return True

    # update if it lacks required info in _media.yaml
    last_info = media_info.get(js_config['subdir'], tag_name)
    if not last_info:
      return True
    last_config = last_info.get('config')
    if not last_config:
      return True

    # update if any configuration setting is changed
    if not equal_object_(last_config, js_config):
      return True

    if 'related_files' not in last_info:
      return True
    for path, mtime in last_info['related_files']:
      if mtime != os.path.getmtime(path):
        return True
      
  def jsminify(js_path):
    from StringIO import StringIO
    from kay.ext.media_compressor.jsmin import JavascriptMinify
    ifile = open(js_path)
    outs = StringIO()
    JavascriptMinify().minify(ifile, outs)
    ret = outs.getvalue()
    if len(ret) > 0 and ret[0] == '\n':
      ret = ret[1:]
    return ret

  def concat(js_path):
    print_status(" concat %s" % js_path)
    ifile = open(js_path)
    js = ifile.read()
    ifile.close()
    return js

  def goog_calcdeps():
    deps_config = copy.deepcopy(js_config['goog_common'])
    deps_config.update(js_config['goog_calcdeps'])

    if deps_config.get('method') not in \
          ['separate', 'concat', 'concat_refs', 'compile']:
      print_status("COMPILE_MEDIA_JS['goog_calcdeps']['method'] setting is"
                   " invalid; unknown method `%s'" % deps_config.get('method'))
      sys.exit(1)

    output_urls = []
    if deps_config['method'] == 'separate':
      source_files, output_urls = goog_calcdeps_separate(deps_config)
    elif deps_config['method'] == 'concat':
      source_files, output_urls = goog_calcdeps_concat(deps_config)
    elif deps_config['method'] == 'concat_refs':
      source_files, output_urls = goog_calcdeps_concat_refs(deps_config)
    elif deps_config['method'] == 'compile':
      source_files, output_urls = goog_calcdeps_compile(deps_config)
      source_files = [file[0] for file in source_files]

    related_files = union_list(source_files, 
                               [make_input_path_(path)
                                  for path in js_config['source_files']])
    related_file_info = [(path, os.path.getmtime(path))
                           for path in related_files]
    
    # create yaml info
    last_info = {'config': copy.deepcopy(js_config),
                 'related_files': related_file_info,
                 'result_urls': output_urls}
    media_info.set(js_config['subdir'], tag_name, last_info)
    media_info.save()

  def goog_calcdeps_separate(deps_config):
    source_files = goog_calcdeps_list(deps_config)
    (output_urls, extern_urls) = goog_calcdeps_copy_files(deps_config,
                                                          source_files)
    return (source_files, extern_urls + output_urls)

  def goog_calcdeps_concat(deps_config):
    source_files = goog_calcdeps_list(deps_config)
    (output_urls, extern_urls) = goog_calcdeps_concat_files(deps_config,
                                                            source_files)
    return (source_files, extern_urls + output_urls)

  def goog_calcdeps_concat_refs(deps_config):
    source_files = goog_calcdeps_list(deps_config)
    original_files = [make_input_path_(path)
                      for path in js_config['source_files']]
    ref_files = [path for path in source_files if path not in original_files]
    (output_urls, extern_urls) = goog_calcdeps_concat_files(deps_config,
                                                            ref_files)
    original_urls = [path[len(kay.PROJECT_DIR):] for path in original_files]
    return (source_files, extern_urls + output_urls + original_urls)

  def goog_calcdeps_compile(deps_config):
    comp_config = copy.deepcopy(js_config['goog_common'])
    comp_config.update(js_config['goog_compiler'])

    source_files = []
    extern_urls = []

    command = '%s -o compiled -c "%s" ' % (deps_config['path'],
                                                 comp_config['path'])
    for path in deps_config.get('search_paths', []):
      command += '-p %s ' % make_input_path_(path)
    for path in js_config['source_files']:
      path = make_input_path_(path)
      command += '-i %s ' % path
      source_files.append((path, os.path.getmtime(path)))

    if comp_config['level'] == 'minify':
      level = 'WHITESPACE_ONLY'
    elif comp_config['level'] == 'advanced':
      level = 'ADVANCED_OPTIMIZATIONS'
    else:
      level = 'SIMPLE_OPTIMIZATIONS'
    flags = '--compilation_level=%s' % level
#    for path in comp_config.get('externs', []):
#      flags += '--externs=%s ' % make_input_path_(path)
#    if comp_config.get('externs'):
#      flags += ' --externs=%s ' % " ".join(comp_config['externs'])
    command += '-f "%s" ' % flags
    print_status(command)
    command_output = os.popen(command).read()

    output_path = make_output_path_(js_config, js_config['subdir'],
                                    js_config['output_filename'])
    ofile = create_file_(output_path)
    try:
      for path in comp_config.get('externs', []):
        if re.match(r'^https?://', path):
          extern_urls.append(path)
          continue
        path = make_input_path_(path)
        ifile = open(path)
        try:
          ofile.write(ifile.read())
        finally:
          ifile.close()
        source_files.append((path, os.path.getmtime(path)))
      ofile.write(command_output)
    finally:
      ofile.close()
    return (source_files, extern_urls + [output_path[len(kay.PROJECT_DIR):]])

  def goog_calcdeps_list(deps_config):
    source_files = []

    command = '%s -o list ' % deps_config['path']
    for path in deps_config['search_paths']:
      command += '-p %s ' % make_input_path_(path)
    for path in js_config['source_files']:
      command += '-i %s ' % make_input_path_(path)
    print_status(command)
    command_output = os.popen(command).read()
    for path in command_output.split("\n"):
      if path == '': continue
      source_files.append(path)
    return source_files

  def goog_calcdeps_copy_files(deps_config, source_files):
    extern_urls = []
    output_urls = []

    output_dir_base = make_output_path_(js_config, 'separated_js')

    if not os.path.exists(output_dir_base):
      os.makedirs(output_dir_base)
    if not deps_config.get('use_dependency_file', True):
      output_path = os.path.join(output_dir_base, '__goog_nodeps.js')
      ofile = open(output_path, "w")
      output_urls.append(output_path[len(kay.PROJECT_DIR):])
      try:
        ofile.write('CLOSURE_NO_DEPS = true;')
      finally:
        ofile.close()

    output_dirs = {}
    search_paths = [make_input_path_(path)
                    for path in deps_config['search_paths']]
    for path in search_paths:
      output_dirs[path] = os.path.join(output_dir_base,
                                       md5.new(path).hexdigest())

    all_paths = [make_input_path_(path)
                 for path in deps_config.get('externs', [])]
    all_paths.extend(source_files)
    for path in all_paths:
      if re.match(r'^https?://', path):
        extern_urls.append(path)
        continue

      path = make_input_path_(path)
      output_path = os.path.join(output_dir_base, re.sub('^/', '', path))
      for dir in search_paths:
        if path[0:len(dir)] == dir:
          output_path = os.path.join(output_dirs[dir],
                                     re.sub('^/', '', path[len(dir):]))
          break
      output_dir = os.path.dirname(output_path)

      if not os.path.exists(output_dir):
        os.makedirs(output_dir)
      shutil.copy2(path, output_path)
      output_urls.append(output_path[len(kay.PROJECT_DIR):])
    return (output_urls, extern_urls)
    
  def goog_calcdeps_concat_files(deps_config, source_files):
    extern_urls = []

    output_path = make_output_path_(js_config, js_config['subdir'],
                                    js_config['output_filename'])
    ofile = create_file_(output_path)
    try:
      if not deps_config.get('use_dependency_file', True):
        ofile.write('CLOSURE_NO_DEPS = true;')
      all_paths = [make_input_path_(path)
                   for path in deps_config.get('externs', [])]
      all_paths.extend(source_files)
      for path in all_paths:
        if re.match(r'^https?://', path):
          extern_urls.append(path)
          continue
        ifile = open(make_input_path_(path))
        ofile.write(ifile.read())
        ifile.close()
    finally:
      ofile.close()

    return ([output_path[len(kay.PROJECT_DIR):]], extern_urls)

  selected_tool = js_config['tool']

  if selected_tool not in \
        (None, 'jsminify', 'concat', 'goog_calcdeps', 'goog_compiler'):
    print_status("COMPILE_MEDIA_JS['tool'] setting is invalid;"
                 " unknown tool `%s'" % selected_tool)
    sys.exit(1)

  global media_info
  if media_info is None:
    media_info = MediaInfo.load()

  if not force and not needs_update(media_info):
    print_status(' up to date.')
    return

  if selected_tool == 'goog_calcdeps':
    return goog_calcdeps()

  if selected_tool is None:
    last_info = {'config': copy.deepcopy(js_config),
                 'result_urls': ['/'+f for f in js_config['source_files']]}
    media_info.set(js_config['subdir'], tag_name, last_info)
    media_info.save()
    return

  dest_path = make_output_path_(js_config, js_config['subdir'],
                                js_config['output_filename'])
  ofile = create_file_(dest_path)
  try:
    if selected_tool == 'jsminify':
      for path in js_config['source_files']:
        src_path = make_input_path_(path)
        ofile.write(jsminify(src_path))
    elif selected_tool == 'concat':
      for path in js_config['source_files']:
        src_path = make_input_path_(path)
        ofile.write(concat(src_path))
  finally:
    ofile.close()
  
  if selected_tool == 'goog_compiler':
    comp_config = copy.deepcopy(js_config['goog_common'])
    comp_config.update(js_config['goog_compiler'])
    if comp_config['level'] == 'minify':
      level = 'WHITESPACE_ONLY'
    elif comp_config['level'] == 'advanced':
      level = 'ADVANCED_OPTIMIZATIONS'
    else:
      level = 'SIMPLE_OPTIMIZATIONS'
    command_args = '--compilation_level=%s' % level
    for path in js_config['source_files']:
      command_args += ' --js %s' % make_input_path_(path)
    command_args += ' --js_output_file %s' % dest_path
    command = 'java -jar %s %s' % (comp_config['path'], command_args)
    command_output = os.popen(command).read()

  info = copy.deepcopy(js_config)
  info['output_filename'] = make_output_path_(js_config, js_config['subdir'],
                                              js_config['output_filename'],
                                              relative=True)
  info['result_urls'] = ['/'+info['output_filename']]
  media_info.set(js_config['subdir'], tag_name, info)
  media_info.save()

Example 58

Project: openface Source File: websocket-server.py
Function: process_frame
    def processFrame(self, dataURL, identity):
        head = "data:image/jpeg;base64,"
        assert(dataURL.startswith(head))
        imgdata = base64.b64decode(dataURL[len(head):])
        imgF = StringIO.StringIO()
        imgF.write(imgdata)
        imgF.seek(0)
        img = Image.open(imgF)

        buf = np.fliplr(np.asarray(img))
        rgbFrame = np.zeros((300, 400, 3), dtype=np.uint8)
        rgbFrame[:, :, 0] = buf[:, :, 2]
        rgbFrame[:, :, 1] = buf[:, :, 1]
        rgbFrame[:, :, 2] = buf[:, :, 0]

        if not self.training:
            annotatedFrame = np.copy(buf)

        # cv2.imshow('frame', rgbFrame)
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     return

        identities = []
        # bbs = align.getAllFaceBoundingBoxes(rgbFrame)
        bb = align.getLargestFaceBoundingBox(rgbFrame)
        bbs = [bb] if bb is not None else []
        for bb in bbs:
            # print(len(bbs))
            landmarks = align.findLandmarks(rgbFrame, bb)
            alignedFace = align.align(args.imgDim, rgbFrame, bb,
                                      landmarks=landmarks,
                                      landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
            if alignedFace is None:
                continue

            phash = str(imagehash.phash(Image.fromarray(alignedFace)))
            if phash in self.images:
                identity = self.images[phash].identity
            else:
                rep = net.forward(alignedFace)
                # print(rep)
                if self.training:
                    self.images[phash] = Face(rep, identity)
                    # TODO: Transferring as a string is suboptimal.
                    # content = [str(x) for x in cv2.resize(alignedFace, (0,0),
                    # fx=0.5, fy=0.5).flatten()]
                    content = [str(x) for x in alignedFace.flatten()]
                    msg = {
                        "type": "NEW_IMAGE",
                        "hash": phash,
                        "content": content,
                        "identity": identity,
                        "representation": rep.tolist()
                    }
                    self.sendMessage(json.dumps(msg))
                else:
                    if len(self.people) == 0:
                        identity = -1
                    elif len(self.people) == 1:
                        identity = 0
                    elif self.svm:
                        identity = self.svm.predict(rep)[0]
                    else:
                        print("hhh")
                        identity = -1
                    if identity not in identities:
                        identities.append(identity)

            if not self.training:
                bl = (bb.left(), bb.bottom())
                tr = (bb.right(), bb.top())
                cv2.rectangle(annotatedFrame, bl, tr, color=(153, 255, 204),
                              thickness=3)
                for p in openface.AlignDlib.OUTER_EYES_AND_NOSE:
                    cv2.circle(annotatedFrame, center=landmarks[p], radius=3,
                               color=(102, 204, 255), thickness=-1)
                if identity == -1:
                    if len(self.people) == 1:
                        name = self.people[0]
                    else:
                        name = "Unknown"
                else:
                    name = self.people[identity]
                cv2.putText(annotatedFrame, name, (bb.left(), bb.top() - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75,
                            color=(152, 255, 204), thickness=2)

        if not self.training:
            msg = {
                "type": "IDENTITIES",
                "identities": identities
            }
            self.sendMessage(json.dumps(msg))

            plt.figure()
            plt.imshow(annotatedFrame)
            plt.xticks([])
            plt.yticks([])

            imgdata = StringIO.StringIO()
            plt.savefig(imgdata, format='png')
            imgdata.seek(0)
            content = 'data:image/png;base64,' + \
                urllib.quote(base64.b64encode(imgdata.buf))
            msg = {
                "type": "ANNOTATED",
                "content": content
            }
            plt.close()
            self.sendMessage(json.dumps(msg))

Example 59

Project: aclhound Source File: deploy_ios.py
def deploy(hostname=None, acls=None, transport='ssh', save_config=False,
           timeout=60):
    """
    Deploy code in a safe way o a Cisco IOS device.
    """
    try:
        username, enable_pass, password = \
            netrc.netrc().authenticators(hostname)
        account = Account(name=username, password=password,
                          password2=enable_pass)
    except:
        print("ERROR: could not find device in ~/.netrc file")
        print("HINT: either update .netrc or enter username + pass now.")
        try:
            account = read_login()
        except EOFError:
            print("ERROR: could not find proper username + pass")
            print("HINT: set username & pass in ~/.netrc for device %s"
                  % hostname)
            import sys
            sys.exit(2)

    def s(conn, line):
        print("   %s" % line)
        conn.execute(line)

    def collect_interfaces(conn):
        template = """# textfsm
Value Required Interface ([^ ]+)
Value Inbound (.*)
Value Outbound (.*)

Start
  ^${Interface} is up
  ^  Outgoing access list is ${Outbound}
  ^  Inbound  access list is ${Inbound} -> Record Start

"""
        template_file = StringIO(template)
        table = textfsm.TextFSM(template_file)
        s(conn, 'show ip int | inc ine pro|list is')
        interface_acl_v4 = table.ParseText(conn.response)

        template = """# textfsm
Value Required Interface ([^ ]+)
Value Inbound (.*)
Value Outbound (.*)

Start
  ^${Interface} is up
  ^  Inbound access list ${Inbound}
  ^  Outgoing access list ${Outbound} -> Record Start

"""
        template_file = StringIO(template)
        table = textfsm.TextFSM(template_file)
        s(conn, 'show ipv6 int  | i ine pro|access list')
        interface_acl_v6 = table.ParseText(conn.response)
        template = """# textfsm
Value Required Vty (\d+\s\d+)
Value Inbound4 ([^ ]+)
Value Outbound4 ([^ ]+)
Value Inbound6 ([^ ]+)
Value Outbound6 ([^ ]+)

Start
  ^line vty ${Vty}
  ^ access-class ${Inbound4} in
  ^ access-class ${Outbound4} out
  ^ ipv6 access-class ${Inbound6} in
  ^ ipv6 access-class ${Outbound6} out -> Record Start

"""
        template_file = StringIO(template)
        table = textfsm.TextFSM(template_file)
        s(conn, 'show run | begin ^line vty')
        interface_acl_vty = table.ParseText(conn.response)

        results = {4: interface_acl_v4, 6: interface_acl_v6}
        # add vty lines
        for vty in interface_acl_vty:
            # v4 inbound
            v4_inbound = vty[1] if vty[1] else "not set"
            v4_outbound = vty[2] if vty[1] else "not set"
            v6_inbound = vty[3] if vty[1] else "not set"
            v6_outbound = vty[4] if vty[1] else "not set"
            results[4].append(["vty %s" % vty[0], v4_inbound, v4_outbound])
            results[6].append(["vty %s" % vty[0], v6_inbound, v6_outbound])
        return results

    # main flow of the program starts here
    if transport == 'ssh':
        conn = SSH2(verify_fingerprint=False, debug=0, timeout=timeout)
    elif transport == 'telnet':
        conn = Telnet(debug=0)
    else:
        print("ERROR: Unknown transport mechanism: %s"
              % transport)
        sys.exit(2)
    conn.set_driver('ios')
    conn.connect(hostname)
    conn.login(account)
    conn.execute('terminal length 0')
    conn.auto_app_authorize(account)
    capabilities = {}
    s(conn, "show ipv6 cef")
    capabilities['ipv6'] = False if "%IPv6 CEF not running" in conn.response else True
    if capabilities['ipv6']:
        print("INFO: IPv6 support detected")
    else:
        print("INFO: NO IPv6 support detected, skipping IPv6 ACLs")
    # map into structure:
    # policyname { (int, afi, direction) }
    map_pol_int = {}
    interfaces_overview = collect_interfaces(conn)
    for afi in interfaces_overview:
        for interface, inbound, outbound in interfaces_overview[afi]:
            # add inbound rules to map
            if inbound not in map_pol_int.keys():
                map_pol_int[inbound] = [{"int": interface,
                                        "afi": afi,
                                        "dir": "in"}]
            else:
                map_pol_int[inbound].append({"int": interface,
                                             "afi": afi,
                                             "dir": "in"})
            # add outbound
            if outbound not in map_pol_int.keys():
                map_pol_int[outbound] = [{"int": interface,
                                          "afi": afi,
                                          "dir": "in"}]
            else:
                map_pol_int[outbound].append({"int": interface,
                                             "afi": afi,
                                             "dir": "out"})
    print("INFO: interface / policy mapping:")
    pprint(map_pol_int)

    def lock_step(lock, pol, capabilities):
        name = acls[pol]['name']
        afi = acls[pol]['afi']
        if afi == 6 and not capabilities['ipv6']:
            return
        policy = acls[pol]['policy']
        print("INFO: uploading name: %s, afi: %s" % (name, afi))
        s(conn, 'configure terminal')
        if afi == 4:
            try:
                s(conn, "no ip access-list extended %s%s" % (lock, name))
            except:
                pass
            s(conn, "ip access-list extended %s%s" % (lock, name))
            for line in policy.split('\n'):
                s(conn, line)
        if afi == 6:
            try:
                s(conn, "no ipv6 access-list %s%s" % (lock, name))
            except:
                pass
            s(conn, "ipv6 access-list %s%s" % (lock, name))
            for line in policy.split('\n'):
                s(conn, line)
        s(conn, "end")

        # then replace ACL on all interfaces / VTYs
        if name in map_pol_int:
            for entry in map_pol_int[name]:
                if not entry['afi'] == afi:
                    continue
                print("INFO: lockstepping policy %s afi %s" % (name, afi))
                s(conn, "configure terminal")
                if entry['int'].startswith('vty '):
                    s(conn, "line %s" % entry['int'])
                    if afi == 4:
                        s(conn, "access-class %s%s %s"
                          % (lock, name, entry['dir']))
                    if afi == 6:
                        s(conn, "ipv6 access-class %s%s %s"
                          % (lock, name, entry['dir']))
                else:
                    s(conn, "interface %s" % entry['int'])
                    if afi == 4:
                        s(conn, "ip access-group %s%s %s"
                          % (lock, name, entry['dir']))
                    if afi == 6:
                        s(conn, "ipv6 traffic-filter %s%s %s"
                          % (lock, name, entry['dir']))
                s(conn, "end")

    for policy in acls:
        for lock in ["LOCKSTEP-", ""]:
            lock_step(lock, policy, capabilities)
        # cleanup
        s(conn, "configure terminal")
        if acls[policy]['afi'] == 4:
            s(conn, "no ip access-list extended LOCKSTEP-%s"
              % acls[policy]['name'])
        if acls[policy]['afi'] == 6 and capabilities['ipv6']:
            s(conn, "no ipv6 access-list LOCKSTEP-%s"
              % acls[policy]['name'])
        s(conn, "end")

    if save_config == True:
        s(conn, "write")

Example 60

Project: pymtl Source File: cpp.py
def CLogicTransl( model, o=sys.stdout ):

    c_functions = StringIO.StringIO()
    c_variables = StringIO.StringIO()

    signals    = sim_utils.collect_signals( model )
    nets, _    = sim_utils.signals_to_nets( signals )
    seq_blocks = sim_utils.register_seq_blocks( model )
    # TODO: update translation so that this is unneeded?
    sim_utils.insert_signal_values( None, nets )

    # Visit tick functions, save register information
    ast_next  = []
    localvars = {}
    for func in seq_blocks:
      r, l = translate_func( func, c_functions )
      ast_next.extend( r )
      localvars.update( l )

    # Print signal declarations, use reg information
    top_ports, all_ports, shadows = declare_signals(nets, ast_next, c_variables)

    inport_names   = ['top_'+mangle_name(x.name) for x in model.get_inports() ]
    outport_names  = ['top_'+mangle_name(x.name) for x in model.get_outports()]
    top_inports    = []
    top_outports   = []

    for x in model.get_ports():
      x.cpp_name = 'top_'+mangle_name( x.name )

    # Separate input and output ports
    for port in top_ports:
      name = port[0]
      if   name in inport_names:
        top_inports.append( port )
      elif name in outport_names:
        top_outports.append( port )
      #else:
      #    raise Exception("Unknown port detected!")

    # print locals
    print >> c_variables
    print >> c_variables, '/* LOCALS ' + '-'*60 + '*/'
    for var, obj in localvars.items():
      rvar = var.replace('.','_')
      if rvar not in all_ports:
        if   isinstance( obj, int ):
          var_type = get_type( obj, o )
          print >> c_variables, "{} {} = {};".format( var_type, rvar, obj )
        # TODO: super hacky handling of lists
        elif isinstance( obj, list ):
          var_type = get_type( obj[0], o )
          split = var.split('.')
          pfx = '_'.join( split[0:2] )
          sfx = '_'+'_'.join( split[2:]  ) if split[2:] else ''
          vx = [ '&{}_IDX{:03}{}'.format(pfx,i,sfx) for i in range(len(obj)) ]
          # Declare the variables if they don't exist yet
          for x in vx:
            if x[1:] not in all_ports:
              print >> c_variables, "{} {};".format( var_type, x[1:])
          print >> c_variables, "{} * {}[] = {{ {} }};".format(
              var_type, rvar, ', '.join(vx ) )
        else:
          var_type = get_type( obj, o )
          print >> c_variables, "{} {};".format( var_type, rvar )


    # Declare parameters
    params = []
    for name, net, type_ in top_inports[:2]:
      params.append( '    {}   _{}'.format( type_, name ) )
    params  .append( '    iface_t * top') 
    params = ',\n'.join( params )

    # Create the C header
    print >> o, '#include <stdio.h>'
    print >> o, '#include <assert.h>'
    print >> o, '#include <queue>'
    print >> o, '#define  True  true'
    print >> o, '#define  False false'
    print >> o

    print >> o, gen_cheader( params, top_ports[2:] )
    print >> o, c_variables.getvalue()
    print >> o, c_functions.getvalue()

    print   >> o, 'unsigned int ncycles;\n\n'

    # Create the cycle function
    print   >> o, '/* cycle */'
    print   >> o, 'void cycle({}) {{'.format( '\n'+params+'\n' )

    # Set input ports from params
    print   >> o
    print   >> o, '  /* Set inports */'
    print   >> o, '  top_clk   = _top_clk;'
    print   >> o, '  top_reset = _top_reset;'
    for name, _, _ in top_inports[2:]:
      print >> o, '  {} = top->{};'.format( name, name[4:] )

    # Execute all ticks
    print   >> o
    print   >> o, '  /* Execute all ticks */'
    for x in seq_blocks:
      print >> o, '  {}_{}();'.format( mangle_idxs(x._model.name), x.func_name)
    print   >> o, '  ncycles++;'

    # Update all registers
    print   >> o
    print   >> o, '  /* Update all registers */'
    for s in shadows:
      print >> o, '  {0} = {0}_next;'.format( s )

    # Update params from output ports
    print   >> o
    print   >> o, '  /* Assign all outputs */'
    for name, _, _ in top_outports:
      print >> o, '  top->{} = {};'.format( name[4:], name )

    print   >> o, '}'
    print   >> o

    # Create the cdef and Python wrapper
    cdef        = gen_cdef( params, top_ports[2:] )
    CSimWrapper = gen_pywrapper( top_inports, top_outports )

    return cdef, CSimWrapper

Example 61

Project: golismero Source File: spiderfoot.py
    def run(self, info):

        # Get the base URL to the SpiderFoot API.
        base_url = Config.plugin_args["url"]

        # Find out if we should delete the scan when we're done.
        must_delete = Config.audit_config.boolean(
                            Config.plugin_args.get("delete", "y"))

        # We need to catch SystemExit in order to stop and delete the scan.
        scan_id = None
        try:

            # Create a new scan.
            resp = post(urljoin(base_url, "startscan"), {
                "scanname": Config.audit_name,
                "scantarget": info.hostname,
                "modulelist": self.get_list("modulelist", "module_"),
                "typelist": self.get_list("typelist", "type_"),
                "usecase": Config.plugin_args.get("usecase", "all")
            })
            if resp.status_code != 200:
                r = resp.content
                p = r.find("<div class=\"alert alert-error\">")
                if p >= 0:
                    p = r.find("<h4>", p) + 4
                    q = r.find("</h4>", p)
                    m = r[p:q].strip()
                    raise RuntimeError("Could not start scan, reason: " + m)

            # Wait until the scan is finished.
            try:
                interval = float(Config.plugin_args.get("interval", "5.0"))
            except Exception:
                interval = 5.0
            url_scanlist = urljoin(base_url, "scanlist")
            last_msg = ""
            is_created = False
            scan_id = None
            create_checks = Config.plugin_args.get("create_checks",60)
            checks = 0
            while True:
                resp = get(url_scanlist)
                checks += 1
                if resp.status_code != 200:
                    status = "ERROR-FAILED"
                    break
                scanlist = resp.json()
                found = False
                for scan in scanlist:
                    scan_id, scan_name = scan[:2]
                    status, count = scan[-2:]
                    if scan_name == Config.audit_name:
                        found = True
                        break
                if found:
                    is_created = True
                    is_finished = status in ("FINISHED", "ABORTED", "ERROR-FAILED")
                    msg = "Status: %s (%s elements%s)" % (
                        status, count,
                        " so far" if not is_finished else ""
                    )
                    if msg != last_msg:
                        last_msg = msg
                        Logger.log_verbose(msg)
                    if is_finished:
                        break
                else:
                    if not is_created:
                        Logger.log_verbose("Status: CREATING")
                        if checks == create_checks:
                            Logger.log_error(
                                "Scan not found within %s checks, \
                                aborting!" % create_checks
                            )
                            return
                    else:
                        Logger.log_verbose("Status: DELETED")
                        Logger.log_error(
                            "Scan deleted from the SpiderFoot UI, aborting!")
                        return
                sleep(interval)

            # Tell the user if the scan didn't finish correctly.
            results = None
            try:
                has_partial = is_created and int(count) > 0
            except Exception:
                has_partial = is_created

            try:

                # Get the scan results.
                if has_partial:
                    Logger.log_error("Scan didn't finish correctly!")
                    Logger.log("Attempting to load partial results...")
                    parser = SpiderFootParser()
                    url = parse_url("scaneventresultexport", base_url)
                    url.query_params = {"id": scan_id, "type": "ALL"}
                    resp = get(url.url)
                    if resp.status_code != 200:
                        Logger.log_error(
                            "Could not get scan results, error code: %s"
                            % resp.status_code)
                    else:
                        results = parser.parse(StringIO(resp.content))
                        if results:
                            if len(results) == 1:
                                Logger.log("Loaded 1 result.")
                            else:
                                Logger.log("Loaded %d results." % len(results))
                        else:
                            Logger.log("No results loaded.")
                else:
                    Logger.log_error("Scan didn't finish correctly, aborting!")

            finally:

                # Delete the scan.
                try:
                    if is_created and must_delete:
                        url = parse_url("scandelete", base_url)
                        url.query_params = {"id": scan_id, "confirm": "1"}
                        get(url.url)
                        ##if resp.status_code != 200:
                        ##    Logger.log_error_more_verbose(
                        ##        "Could not delete scan, error code: %s"
                        ##        % resp.status_code)
                except Exception, e:
                    tb = format_exc()
                    Logger.log_error_verbose(str(e))
                    Logger.log_error_more_verbose(tb)

            # Return the results.
            return results

        # If we caught SystemExit, that means GoLismero is shutting down.
        # Just stop and delete the scan in SpiderFoot without logging
        # anything nor calling the GoLismero API (it won't work anymore).
        except SystemExit:
            if scan_id is not None:
                try:
                    url = parse_url("stopscan", base_url)
                    url.query_params = {"id": scan_id}
                    get(url.url)
                finally:
                    if must_delete:
                        url = parse_url("scandelete", base_url)
                        url.query_params = {"id": scan_id, "confirm": "1"}
                        get(url.url)
            raise

Example 62

Project: ganga Source File: AthenaLCGRTHandler.py
    def master_prepare( self, app, appconfig):
        """Prepare the master job"""

        job = app._getParent() # Returns job or subjob object
        logger.debug('AthenaLCGRTHandler master_prepare called: %s', job.id )


        if job._getRoot().subjobs:
            jobid = "%d" % (job._getRoot().id)
        else:
            jobid = "%d" % job.id

        # Generate output dataset name
        if job.outputdata:
            if job.outputdata._name=='DQ2OutputDataset':
                dq2_datasetname = job.outputdata.datasetname
                dq2_isGroupDS = job.outputdata.isGroupDS
                dq2_groupname = job.outputdata.groupname
            else:
                dq2_datasetname = ''
                dq2_isGroupDS = False
                dq2_groupname = ''
            self.output_datasetname, self.output_lfn = dq2outputdatasetname(dq2_datasetname, jobid, dq2_isGroupDS, dq2_groupname)

        # Check if all sites are in the same cloud
        if job.backend.requirements.sites:
            firstCloud = whichCloud(job.backend.requirements.sites[0])
            for site in job.backend.requirements.sites:
                cloud = whichCloud(site)
                if cloud != firstCloud:
                    printout = 'Job submission failed ! Site specified with j.backend.requirements.sites=%s are not in the same cloud !' %(job.backend.requirements.sites)
                    raise ApplicationConfigurationError(None,printout )


        #this next for loop instructs ganga to use option_files that live in the appropriate shared directory (the job
        #will already have been prepared
        #(if is_prepared is True, then we've most likely submitted a job via GangaRobot. We know what we're doing.
        #if app.is_prepared is not True:
        #    for position in xrange(len(app.option_file)):
        #        app.option_file[position]=File(os.path.join(app.is_prepared.name,os.path.basename(app.option_file[position].name)))
        # Expand Athena jobOptions
        if not app.atlas_exetype in ['EXE']:
            athena_options = ' '.join([os.path.basename(opt_file.name) for opt_file in app.option_file])
            #if app.options: athena_options = ' -c ' + app.options + ' ' + athena_options
            if app.options:
                athena_options = app.options + ' ' + athena_options
                
            inputbox = [ File(opt_file.name) for opt_file in app.option_file ]
        else:
            athena_options = ' '.join([os.path.basename(opt_file.name) for opt_file in app.option_file])
            inputbox = []
            
        athena_usersetupfile = os.path.basename(app.user_setupfile.name)

#       prepare input sandbox

        
        inputbox.append( File(os.path.join(__directory__,'athena-utility.sh')) )

        if job.inputdata and job.inputdata._name == "AMIDataset" and job.inputdata.goodRunListXML.name != '':
            inputbox.append( File( job.inputdata.goodRunListXML.name ) )
    
        if job.inputdata and job.inputdata._name == 'ATLASDataset':
            if job.inputdata.lfc:
                _append_files(inputbox,'ganga-stagein-lfc.py')
            else:
                _append_files(inputbox,'ganga-stagein.py')
            
        if app.user_area.name: 
            #we will now use the user_area that's stored in the users shared directory
            if app.is_prepared is not True:
                tmp_user_name = os.path.join(os.path.join(shared_path,app.is_prepared.name),os.path.basename(app.user_area.name))
                inputbox.append(File(tmp_user_name))
            else:
                inputbox.append(File(app.user_area.name))

        #if app.group_area.name: inputbox += [ File(app.group_area.name) ]
        if app.group_area.name and str(app.group_area.name).find('http')<0:
            #we will now use the group_area that's stored in the users shared directory
            if app.is_prepared is not True:
                tmp_group_name = os.path.join(os.path.join(shared_path,app.is_prepared.name),os.path.basename(app.group_area.name))
                inputbox.append(File(tmp_group_name))
            else:
                inputbox.append(File(app.group_area.name))
    
        if app.user_setupfile.name: inputbox.append(File(app.user_setupfile.name))

        # CN: added TNTJobSplitter clause  

        if job.inputdata and (job.inputdata._name in [ 'DQ2Dataset', 'AMIDataset', 'EventPicking' ] ) or (job._getRoot().splitter and job._getRoot().splitter._name == 'TNTJobSplitter'):
            _append_files(inputbox,'ganga-stage-in-out-dq2.py','dq2_get','dq2info.tar.gz')
            if job.inputdata and job.inputdata.type == 'LFC' and not (job._getRoot().splitter and job._getRoot().splitter._name == 'TNTJobSplitter'):
                _append_files(inputbox,'dq2_get_old')

        if job.inputdata and job.inputdata._name ==  'ATLASTier3Dataset':
            _append_files(inputbox,'ganga-stage-in-out-dq2.py','dq2info.tar.gz')

        ## insert more scripts to inputsandbox for FileStager
        if job.inputdata and (job.inputdata._name in [ 'DQ2Dataset', 'AMIDataset', 'EventPicking']) and job.inputdata.type in ['FILE_STAGER']:
            _append_files(inputbox,'make_filestager_joption.py','dm_util.py','fs-copy.py')
            #_append_files(inputbox,'make_filestager_joption.py','dm_util.py')

        if job.outputdata and job.outputdata._name == 'DQ2OutputDataset':
            #if not job.outputdata.location:
            #    raise ApplicationConfigurationError(None,'j.outputdata.location is empty - Please specify a DQ2 output location - job not submitted !')
            if not 'ganga-stage-in-out-dq2.py' in [ os.path.basename(file.name) for file in inputbox ]:
                _append_files(inputbox,'ganga-stage-in-out-dq2.py')
            _append_files(inputbox,'ganga-joboption-parse.py')
            if not 'dq2info.tar.gz' in [os.path.basename(file.name) for file in inputbox ]:
                _append_files(inputbox,'dq2info.tar.gz') 

        #       add libDCache.so and libRFIO.so to fix broken access in athena 12.0.x
        if not 'ganga-stage-in-out-dq2.py' in [ os.path.basename(file.name) for file in inputbox ]:
            _append_files(inputbox, 'ganga-stage-in-out-dq2.py')
        if not 'dq2tracerreport.py' in [ os.path.basename(file.name) for file in inputbox ]:
            _append_files(inputbox, 'dq2tracerreport.py')
        if not 'db_dq2localid.py' in [ os.path.basename(file.name) for file in inputbox ]:
            _append_files(inputbox, 'db_dq2localid.py')
        if not 'getstats.py' in [ os.path.basename(file.name) for file in inputbox ]:
            _append_files(inputbox, 'getstats.py')


        if str(app.atlas_release).find('12.')>=0:
            _append_files(inputbox, 'libDCache.so','libRFIO.so','libdcap.so')
        elif str(app.atlas_release).find('13.')>=0:
            _append_files(inputbox,'libdcap.so')
        else:
            _append_files(inputbox,'libdcap.so')

        if job.inputsandbox: inputbox += job.inputsandbox
            
#       prepare environment

        if not app.atlas_release: 
            raise ApplicationConfigurationError(None,'j.application.atlas_release is empty - No ATLAS release version found. Run prepare() or specify a version explictly.')

        environment={ 
            'ATLAS_RELEASE'  : app.atlas_release,
            'ATHENA_OPTIONS' : athena_options,
            'ATHENA_USERSETUPFILE' : athena_usersetupfile,
            'ATLAS_PROJECT' : app.atlas_project,
            'ATLAS_EXETYPE' : app.atlas_exetype,
            'GANGA_VERSION' : configSystem['GANGA_VERSION']
        }

        environment['DCACHE_RA_BUFFER'] = config['DCACHE_RA_BUFFER']

        if app.atlas_environment:
            for var in app.atlas_environment:
                try:
                    vars = re.match("^(\w+)=(.*)",var).group(1)
                    value = re.match("^(\w+)=(.*)",var).group(2)
                    environment[vars]=value
                except:
                    logger.warning('Athena.atlas_environment variable not correctly configured: %s', var)
                    pass

        if app.atlas_production and app.atlas_release.find('12.')>=0 and app.atlas_project != 'AtlasPoint1':
            temp_atlas_production = re.sub('\.','_',app.atlas_production)
            prod_url = config['PRODUCTION_ARCHIVE_BASEURL']+'/AtlasProduction_'+ temp_atlas_production +'_noarch.tar.gz'
            logger.info('Using Production cache from: %s', prod_url)
            environment['ATLAS_PRODUCTION_ARCHIVE'] = prod_url

        if app.atlas_production and (app.atlas_project == 'AtlasPoint1' or app.atlas_release.find('12.')<=0):
            environment['ATLAS_PRODUCTION'] = app.atlas_production
        
        if app.user_area.name: environment['USER_AREA'] = os.path.basename(app.user_area.name)
        #if app.group_area.name: environment['GROUP_AREA']=os.path.basename(app.group_area.name)
        if app.group_area.name:
            if str(app.group_area.name).find('http')>=0:
                environment['GROUP_AREA_REMOTE'] = str(app.group_area.name)
            else:
                environment['GROUP_AREA'] = os.path.basename(app.group_area.name)

        if app.max_events:
            if (app.max_events != -999) and (app.max_events > -2):
                environment['ATHENA_MAX_EVENTS'] = str(app.max_events)
        
        if job.backend.requirements._name == 'AtlasLCGRequirements':
            requirements = AtlasLCGRequirements()
        elif job.backend.requirements._name == 'AtlasCREAMRequirements':
            requirements = AtlasCREAMRequirements()
        else:
            requirements = AtlasLCGRequirements()
        
        if job.inputdata and job.inputdata._name == 'ATLASDataset':
            if job.inputdata.lfc:
                environment['GANGA_LFC_HOST'] = job.inputdata.lfc

        if 'ganga-stage-in-out-dq2.py' in [ os.path.basename(file.name) for file in inputbox ]:
            environment['DQ2_URL_SERVER'] = configDQ2['DQ2_URL_SERVER']
            environment['DQ2_URL_SERVER_SSL'] = configDQ2['DQ2_URL_SERVER_SSL']
        
        if job.inputdata and (job.inputdata._name in [ 'DQ2Dataset', 'AMIDataset', 'EventPicking']):
            if job.inputdata.dataset:
                datasetname = job.inputdata.dataset
                environment['DATASETNAME'] = ':'.join(datasetname)
                environment['DATASETLOCATION'] = ':'.join(job.inputdata.get_locations())
                environment['DQ2_URL_SERVER'] = configDQ2['DQ2_URL_SERVER']
                environment['DQ2_URL_SERVER_SSL'] = configDQ2['DQ2_URL_SERVER_SSL']
                environment['DATASETTYPE'] = job.inputdata.type
                if job.inputdata.failover:
                    environment['DATASETFAILOVER'] = 1
                environment['DATASETDATATYPE'] = job.inputdata.datatype
                if job.inputdata.accessprotocol:
                    environment['DQ2_LOCAL_PROTOCOL'] = job.inputdata.accessprotocol
                if job.inputdata.check_md5sum:
                    environment['GANGA_CHECKMD5SUM'] = 1
                    
            else:
                raise ApplicationConfigurationError(None,'j.inputdata.dataset is empty - DQ2 dataset name needs to be specified.')

            # Raise submission exception
            if (not job.backend.CE and 
                not (job.backend.requirements._name in [ 'AtlasLCGRequirements', 'AtlasCREAMRequirements' ] and job.backend.requirements.sites) and
                not (job.splitter and job.splitter._name == 'DQ2JobSplitter') and
                not (job.splitter and job.splitter._name == 'TNTJobSplitter') and
                not (job.splitter and job.splitter._name == 'AnaTaskSplitterJob') and
                not (job.splitter and job.splitter._name == 'ATLASTier3Splitter')):

                raise ApplicationConfigurationError(None,'Job submission failed ! Please use DQ2JobSplitter or specify j.backend.requirements.sites or j.backend.requirements.CE !')

            if job.inputdata.match_ce_all or job.inputdata.min_num_files>0:
                raise ApplicationConfigurationError(None,'Job submission failed ! Usage of j.inputdata.match_ce_all or min_num_files is obsolete ! Please use DQ2JobSplitter or specify j.backend.requirements.sites or j.backend.requirements.CE !')
            #if job.inputdata.number_of_files and (job.splitter and job.splitter._name == 'DQ2JobSplitter'):
            #    allLoc = job.inputdata.get_locations(complete=0)
            #    completeLoc = job.inputdata.get_locations(complete=1)
            #    incompleteLoc = []
            #    for loc in allLoc:
            #        if loc not in completeLoc:
            #            incompleteLoc.append(loc)
            #    if incompleteLoc:
            #        raise ApplicationConfigurationError(None,'Job submission failed ! Dataset is incomplete ! Usage of j.inputdata.number_of_files and DQ2JobSplitter is not allowed for incomplete datasets !')

            # Add TAG datasetname
            if job.inputdata.tagdataset:
                environment['TAGDATASETNAME'] = ':'.join(job.inputdata.tagdataset)

#       prepare job requirements
        requirementsSoftware = getLCGReleaseTag( app )

        releaseBlacklist = job.backend.requirements.list_release_blacklist()     
        if requirementsSoftware and  requirementsSoftware[0] in releaseBlacklist:
            logger.error('The athena release %s you are using is not recommended for distributed analysis !', requirementsSoftware[0])
            logger.error('For details, please have a look at https://twiki.cern.ch/twiki/bin/view/Atlas/DAGangaFAQ#Athena_Versions_Issues or ask for help and advice on the distributed analysis help list !')
            requirements.software = requirementsSoftware
        else:
            requirements.software = requirementsSoftware

        # Set athena architecture: 32 or 64 bit    
        environment['ATLAS_ARCH'] = '32'
        if requirementsSoftware and requirementsSoftware[0].find('x86_64')>=0:
            environment['ATLAS_ARCH'] = '64'
            
        #       add software requirement of dq2clients
        if job.inputdata and job.inputdata._name in [ 'DQ2Dataset', 'AMIDataset', 'EventPicking' ]  and job.inputdata.type in [ 'TNT_DOWNLOAD', 'DQ2_COPY', 'FILE_STAGER'] or app.atlas_dbrelease or configDQ2['USE_ACCESS_INFO']:
            try:
                # override the default one if the dq2client_version is presented 
                # in the job backend's requirements object
                dq2client_version = job.backend.requirements.dq2client_version
            except AttributeError:
                pass
            if dq2client_version:
                #requirements.software += ['VO-atlas-dq2clients-%s' % dq2client_version]
                environment['DQ2_CLIENT_VERSION'] = dq2client_version

        if app.atlas_dbrelease:
            if not app._name == "AthenaTask" and not (job.splitter and (job.splitter._name == 'DQ2JobSplitter' or job.splitter._name == 'ATLASTier3Splitter')):
                raise ApplicationConfigurationError(None,'Job submission failed ! Please use DQ2JobSplitter if you are using j.application.atlas_dbrelease !')
            try:
                environment['ATLAS_DBRELEASE'] = app.atlas_dbrelease.split(':')[0]
                environment['ATLAS_DBFILE'] = app.atlas_dbrelease.split(':')[1]
            except:
                logger.warning('Problems with the atlas_dbrelease configuration')


        # Fill AtlasLCGRequirements access mode 
        if configDQ2['USE_ACCESS_INFO']:
            logger.warning("config['DQ2']['USE_ACCESS_INFO']=True - You are using the improved worker node input access method - make sure you are using at least athena version 15.0.0 or the latest FileStager tag !" )
            import pickle, StringIO
            #if job.backend.requirements.sites:
            info = job.backend.requirements.list_access_info()
            fileHandle = StringIO.StringIO()
            pickle.dump(info,fileHandle)
            fileHandle.seek(-1)
            lines = fileHandle.read()
            inputbox.append(FileBuffer( 'access_info.pickle', lines))
            _append_files(inputbox, 'access_info.py')
            if not 'make_filestager_joption.py' in [ os.path.basename(file.name) for file in inputbox ]:
                _append_files(inputbox,'make_filestager_joption.py','dm_util.py','fs-copy.py')

#       jobscript

        exe = os.path.join(__directory__,'run-athena-lcg.sh')

#       output sandbox
        outputbox = [
            'output_guids',
            'output_location',
            'output_data',
            'stats.pickle'
        ]

        ## retrieve the FileStager log
        if configDQ2['USE_ACCESS_INFO'] or (job.inputdata and (job.inputdata._name in [ 'DQ2Dataset', 'AMIDataset', 'EventPicking']) and job.inputdata.type in ['FILE_STAGER']):
            outputbox += ['FileStager.out', 'FileStager.err']
            
        if job.outputsandbox: outputbox += job.outputsandbox

        # Switch for DEBUG print-out in logfiles
        if app.useNoDebugLogs:
            environment['GANGA_LOG_DEBUG'] = '0'
        else:
            environment['GANGA_LOG_DEBUG'] = '1'
            
        return LCGJobConfig(File(exe),inputbox,[],outputbox,environment,[],requirements) 

Example 63

Project: databus Source File: cgi.py
Function: init
    def __init__(self, fp=None, headers=None, outerboundary="",
                 environ=os.environ, keep_blank_values=0, strict_parsing=0):
        """Constructor.  Read multipart/* until last part.

        Arguments, all optional:

        fp              : file pointer; default: sys.stdin
            (not used when the request method is GET)

        headers         : header dictionary-like object; default:
            taken from environ as per CGI spec

        outerboundary   : terminating multipart boundary
            (for internal use only)

        environ         : environment dictionary; default: os.environ

        keep_blank_values: flag indicating whether blank values in
            URL encoded forms should be treated as blank strings.
            A true value indicates that blanks should be retained as
            blank strings.  The default false value indicates that
            blank values are to be ignored and treated as if they were
            not included.

        strict_parsing: flag indicating what to do with parsing errors.
            If false (the default), errors are silently ignored.
            If true, errors raise a ValueError exception.

        """
        method = 'GET'
        self.keep_blank_values = keep_blank_values
        self.strict_parsing = strict_parsing
        if 'REQUEST_METHOD' in environ:
            method = environ['REQUEST_METHOD'].upper()
        self.qs_on_post = None
        if method == 'GET' or method == 'HEAD':
            if 'QUERY_STRING' in environ:
                qs = environ['QUERY_STRING']
            elif sys.argv[1:]:
                qs = sys.argv[1]
            else:
                qs = ""
            fp = StringIO(qs)
            if headers is None:
                headers = {'content-type':
                           "application/x-www-form-urlencoded"}
        if headers is None:
            headers = {}
            if method == 'POST':
                # Set default content-type for POST to what's traditional
                headers['content-type'] = "application/x-www-form-urlencoded"
            if 'CONTENT_TYPE' in environ:
                headers['content-type'] = environ['CONTENT_TYPE']
            if 'QUERY_STRING' in environ:
                self.qs_on_post = environ['QUERY_STRING']
            if 'CONTENT_LENGTH' in environ:
                headers['content-length'] = environ['CONTENT_LENGTH']
        self.fp = fp or sys.stdin
        self.headers = headers
        self.outerboundary = outerboundary

        # Process content-disposition header
        cdisp, pdict = "", {}
        if 'content-disposition' in self.headers:
            cdisp, pdict = parse_header(self.headers['content-disposition'])
        self.disposition = cdisp
        self.disposition_options = pdict
        self.name = None
        if 'name' in pdict:
            self.name = pdict['name']
        self.filename = None
        if 'filename' in pdict:
            self.filename = pdict['filename']

        # Process content-type header
        #
        # Honor any existing content-type header.  But if there is no
        # content-type header, use some sensible defaults.  Assume
        # outerboundary is "" at the outer level, but something non-false
        # inside a multi-part.  The default for an inner part is text/plain,
        # but for an outer part it should be urlencoded.  This should catch
        # bogus clients which erroneously forget to include a content-type
        # header.
        #
        # See below for what we do if there does exist a content-type header,
        # but it happens to be something we don't understand.
        if 'content-type' in self.headers:
            ctype, pdict = parse_header(self.headers['content-type'])
        elif self.outerboundary or method != 'POST':
            ctype, pdict = "text/plain", {}
        else:
            ctype, pdict = 'application/x-www-form-urlencoded', {}
        self.type = ctype
        self.type_options = pdict
        self.innerboundary = ""
        if 'boundary' in pdict:
            self.innerboundary = pdict['boundary']
        clen = -1
        if 'content-length' in self.headers:
            try:
                clen = int(self.headers['content-length'])
            except ValueError:
                pass
            if maxlen and clen > maxlen:
                raise ValueError, 'Maximum content length exceeded'
        self.length = clen

        self.list = self.file = None
        self.done = 0
        if ctype == 'application/x-www-form-urlencoded':
            self.read_urlencoded()
        elif ctype[:10] == 'multipart/':
            self.read_multi(environ, keep_blank_values, strict_parsing)
        else:
            self.read_single()

Example 64

Project: sonospy Source File: soap.py
    def call(self, addr, data, namespace, soapaction=None, encoding=None):
        """ Builds and performs an HTTP request. Returns the response payload.

        @param addr: address to receive the request in the form
        schema://hostname:port
        @param data: data to be sent
        @param soapaction: soap action to be called
        @param encoding: encoding for the message

        @type addr: string
        @type data: string
        @type soapaction: string
        @type encoding: string

        @return: response payload
        @rtype: string
        """

        log.debug('#### HTTPTransport call - addr : %s' % str(addr))
        log.debug('#### HTTPTransport call - data : %s' % str(data))
        log.debug('#### HTTPTransport call - namespace : %s' % str(namespace))
        log.debug('#### HTTPTransport call - soapaction : %s' % str(soapaction))
        log.debug('#### HTTPTransport call - encoding : %s' % str(encoding))

        # Build a request
        
        '''
        addr : http://legato.radiotime.com:80
        data : <?xml version="1.0" encoding="utf-8"?><s:Envelope s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"><s:Header><credentials xmlns="http://www.sonos.com/Services/1.1"><deviceProvider>Sonos</deviceProvider></credentials></s:Header><s:Body><ns0:getMetadata xmlns:ns0="http://www.sonos.com/Services/1.1"><count>100</count><index>0</index><recursive>false</recursive><id>root</id></ns0:getMetadata></s:Body></s:Envelope>
        namespace : ('u', 'http://www.sonos.com/Services/1.1')
        soapaction : http://www.sonos.com/Services/1.1#getMetadata
        encoding : utf-8
        real_addr : legato.radiotime.com:80
        real_path : 
        addr.scheme : http
        addr.hostname : legato.radiotime.com

        POST /Radio.asmx HTTP/1.1
        CONNECTION: close
        ACCEPT-ENCODING: gzip
        HOST: legato.radiotime.com
        USER-AGENT: Linux UPnP/1.0 Sonos/11.7-19141a
        CONTENT-LENGTH: 337
        CONTENT-TYPE: text/xml; charset="utf-8"
        ACCEPT-LANGUAGE: en-US
        SOAPACTION: "http://www.sonos.com/Services/1.1#getMetadata"
        '''
        # TODO: tidy up parameters, use saved params from musicservices call, change to gzip
        addr = parse_url(addr)
        real_addr = '%s:%d' % (addr.hostname, addr.port)
        real_path = addr.path

        if addr.scheme == 'https':
            r = httplib.HTTPSConnection(real_addr)
        else:
            r = httplib.HTTPConnection(real_addr)

        log.debug('#### HTTPTransport call - real_addr : %s' % real_addr)
        log.debug('#### HTTPTransport call - real_path : %s' % real_path)
        log.debug('#### HTTPTransport call - addr.scheme : %s' % addr.scheme)
        log.debug('#### HTTPTransport call - addr.hostname : %s' % addr.hostname)

        r.putrequest("POST", real_path, skip_host=1, skip_accept_encoding=1)
        
        r.putheader("ACCEPT-ENCODING", 'gzip')
        r.putheader("CONNECTION", 'close')

        r.putheader("HOST", addr.hostname)
        r.putheader("USER-AGENT", 'Linux UPnP/1.0 Sonos/11.7-19141a')
        t = 'text/xml'
        if encoding:
            t += '; charset="%s"' % encoding
            
            
        r.putheader("CONTENT-TYPE", t)
#        r.putheader("ACCEPT-CHARSET", 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')
        r.putheader("ACCEPT-LANGUAGE", 'en-US')
        r.putheader("CONTENT-LENGTH", str(len(data)))


        # if user is not a user:passwd format
        if addr.username != None:
            val = base64.encodestring(addr.user)
            r.putheader('Authorization', 'Basic ' + val.replace('\012', ''))

        # This fixes sending either "" or "None"
        if soapaction:
            r.putheader("SOAPACTION", '"%s"' % soapaction)
        else:
            r.putheader("SOAPACTION", "")

        r.endheaders()

        log.debug('#### HTTP BEFORE r.send ################################')

        r.send(data)

        log.debug('#### HTTP AFTER r.send ################################')

        #read response line
#        code, msg, headers = r.getreply()
        response = r.getresponse()
        code = response.status
        msg = response.reason
        headers = response.msg

        log.debug('#### HTTP AFTER START #################################')
        log.debug('#### HTTP code        : %s' % str(code))
        log.debug('#### HTTP msg         : %s' % str(msg))
        log.debug('#### HTTP headers     : %s' % str(headers))
        log.debug('#### HTTP AFTER END ###################################')

        content_type = headers.get("content-type", "text/xml")
        content_length = headers.get("Content-length")
        if content_length == None:
#            data = r.getfile().read()
            data = response.read()
            message_len = len(data)
        else:
            message_len = int(content_length)
#            data = r.getfile().read(message_len)
            data = response.read(message_len)

        def startswith(string, val):
            return string[0:len(val)] == val


        if code == 500 and not \
               (startswith(content_type, "text/xml") and message_len > 0):
            raise HTTPError(code, msg)

        if code not in (200, 500):
            raise HTTPError(code, msg)

        import StringIO
        stream = StringIO.StringIO(data)
        import gzip
        gzipper = gzip.GzipFile(fileobj=stream)
        data = gzipper.read()

        # TODO: use the content-type charset to convert the data returned

        #return response payload
        # NAS is sending some non utf-8 data - TODO: fix NAS rather than decoding for all types which is redundant
        try:
            d = data.decode('utf-8', 'replace')
        except UnicodeDecodeError:
            print "UnicodeDecodeError"
            return data
            
        log.debug('#### HTTP data        : %s' % d)
            
        return d

Example 65

Project: spark-cluster-deployment Source File: test_key.py
Function: test_file_callback
    def test_file_callback(self):
        def callback(wrote, total):
            self.my_cb_cnt += 1
            self.assertNotEqual(wrote, self.my_cb_last, "called twice with same value")
            self.my_cb_last = wrote

        # Zero bytes written => 1 call
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        sfp = StringIO.StringIO("")
        k.set_contents_from_file(sfp, cb=callback, num_cb=10)
        self.assertEqual(self.my_cb_cnt, 1)
        self.assertEqual(self.my_cb_last, 0)
        sfp.close()

        # Read back zero bytes => 1 call
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback)
        self.assertEqual(self.my_cb_cnt, 1)
        self.assertEqual(self.my_cb_last, 0)

        content="01234567890123456789"
        sfp = StringIO.StringIO(content)

        # expect 2 calls due start/finish
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.set_contents_from_file(sfp, cb=callback, num_cb=10)
        self.assertEqual(self.my_cb_cnt, 2)
        self.assertEqual(self.my_cb_last, 20)

        # Read back all bytes => 2 calls
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback)
        self.assertEqual(self.my_cb_cnt, 2)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # rewind sfp and try upload again. -1 should call
        # for every read/write so that should make 11 when bs=2
        sfp.seek(0)
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=-1)
        self.assertEqual(self.my_cb_cnt, 11)
        self.assertEqual(self.my_cb_last, 20)

        # Read back all bytes => 11 calls
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=-1)
        self.assertEqual(self.my_cb_cnt, 11)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 1 times => 2 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=1)
        self.assertTrue(self.my_cb_cnt <= 2)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 1 times => 2 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=1)
        self.assertTrue(self.my_cb_cnt <= 2)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 2 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=2)
        self.assertTrue(self.my_cb_cnt <= 2)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 2 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=2)
        self.assertTrue(self.my_cb_cnt <= 2)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 3 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=3)
        self.assertTrue(self.my_cb_cnt <= 3)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 3 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=3)
        self.assertTrue(self.my_cb_cnt <= 3)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 4 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=4)
        self.assertTrue(self.my_cb_cnt <= 4)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 4 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=4)
        self.assertTrue(self.my_cb_cnt <= 4)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 6 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=6)
        self.assertTrue(self.my_cb_cnt <= 6)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 6 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=6)
        self.assertTrue(self.my_cb_cnt <= 6)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 10 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0 
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=10)
        self.assertTrue(self.my_cb_cnt <= 10)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 10 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=10)
        self.assertTrue(self.my_cb_cnt <= 10)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

        # no more than 1000 times
        # last time always 20 bytes
        sfp.seek(0)
        self.my_cb_cnt = 0 
        self.my_cb_last = None
        k = self.bucket.new_key("k")
        k.BufferSize = 2
        k.set_contents_from_file(sfp, cb=callback, num_cb=1000)
        self.assertTrue(self.my_cb_cnt <= 1000)
        self.assertEqual(self.my_cb_last, 20)

        # no more than 1000 times
        self.my_cb_cnt = 0
        self.my_cb_last = None
        s = k.get_contents_as_string(cb=callback, num_cb=1000)
        self.assertTrue(self.my_cb_cnt <= 1000)
        self.assertEqual(self.my_cb_last, 20)
        self.assertEqual(s, content)

Example 66

Project: hydroshare Source File: receivers.py
@receiver(pre_create_resource, sender=NetcdfResource)
def netcdf_pre_create_resource(sender, **kwargs):

    files = kwargs['files']
    metadata = kwargs['metadata']
    validate_files_dict = kwargs['validate_files']
    res_title = kwargs['title']
    fed_res_fnames = kwargs['fed_res_file_names']

    file_selected = False
    in_file_name = ''

    if files:
        file_selected = True
        in_file_name = files[0].file.name
    elif fed_res_fnames:
        ref_tmpfiles = utils.get_fed_zone_files(fed_res_fnames)
        if ref_tmpfiles:
            in_file_name = ref_tmpfiles[0]
            file_selected = True

    if file_selected and in_file_name:
        # file validation and metadata extraction
        nc_dataset = nc_utils.get_nc_dataset(in_file_name)

        if isinstance(nc_dataset, netCDF4.Dataset):
            # Extract the metadata from netcdf file
            try:
                res_md_dict = nc_meta.get_nc_meta_dict(in_file_name)
                res_dublin_core_meta = res_md_dict['dublin_core_meta']
                res_type_specific_meta = res_md_dict['type_specific_meta']
            except:
                res_dublin_core_meta = {}
                res_type_specific_meta = {}

            # add creator:
            if res_dublin_core_meta.get('creator_name'):
                name = res_dublin_core_meta['creator_name']
                email = res_dublin_core_meta.get('creator_email', '')
                url = res_dublin_core_meta.get('creator_url', '')
                creator = {'creator': {'name': name, 'email': email, 'homepage': url}}
                metadata.append(creator)

            # add contributor:
            if res_dublin_core_meta.get('contributor_name'):
                name_list = res_dublin_core_meta['contributor_name'].split(',')
                for name in name_list:
                    contributor = {'contributor': {'name': name}}
                    metadata.append(contributor)

            # add title
            if res_dublin_core_meta.get('title'):
                res_title = {'title': {'value': res_dublin_core_meta['title']}}
                metadata.append(res_title)

            # add description
            if res_dublin_core_meta.get('description'):
                description = {'description': {'abstract': res_dublin_core_meta['description']}}
                metadata.append(description)

            # add keywords
            if res_dublin_core_meta.get('subject'):
                keywords = res_dublin_core_meta['subject'].split(',')
                for keyword in keywords:
                    metadata.append({'subject': {'value': keyword}})

            # add source
            if res_dublin_core_meta.get('source'):
                source = {'source': {'derived_from': res_dublin_core_meta['source']}}
                metadata.append(source)

            # add relation
            if res_dublin_core_meta.get('references'):
                relation = {'relation': {'type': 'cites', 'value': res_dublin_core_meta['references']}}
                metadata.append(relation)

            # add coverage - period
            if res_dublin_core_meta.get('period'):
                period = {'coverage': {'type': 'period', 'value': res_dublin_core_meta['period']}}
                metadata.append(period)

            # add coverage - box
            if res_dublin_core_meta.get('box'):
                box = {'coverage': {'type': 'box', 'value': res_dublin_core_meta['box']}}
                metadata.append(box)

            # add rights
            if res_dublin_core_meta.get('rights'):
                raw_info = res_dublin_core_meta.get('rights')
                b = re.search("(?P<url>https?://[^\s]+)", raw_info)
                url = b.group('url') if b else ''
                statement = raw_info.replace(url, '') if url else raw_info
                rights = {'rights': {'statement': statement, 'url': url}}
                metadata.append(rights)

            # Save extended meta to metadata variable
            for var_name, var_meta in res_type_specific_meta.items():
                meta_info = {}
                for element, value in var_meta.items():
                    if value != '':
                        meta_info[element] = value
                metadata.append({'variable': meta_info})

            # Save extended meta to original spatial coverage
            if res_dublin_core_meta.get('original-box'):
                if res_dublin_core_meta.get('projection-info'):
                    ori_cov = {'originalcoverage': {'value': res_dublin_core_meta['original-box'],
                                                    'projection_string_type': res_dublin_core_meta['projection-info']['type'],
                                                    'projection_string_text': res_dublin_core_meta['projection-info']['text']}}
                else:
                    ori_cov = {'originalcoverage': {'value': res_dublin_core_meta['original-box']}}

                metadata.append(ori_cov)

            # create the ncdump text file
            if nc_dump.get_nc_dump_string_by_ncdump(in_file_name):
                dump_str = nc_dump.get_nc_dump_string_by_ncdump(in_file_name)
            else:
                dump_str = nc_dump.get_nc_dump_string(in_file_name)

            if dump_str:
                # refine dump_str first line
                nc_file_name = os.path.splitext(files[0].name)[0]
                first_line = list('netcdf {0} '.format(nc_file_name))
                first_line_index = dump_str.index('{')
                dump_str_list = first_line + list(dump_str)[first_line_index:]
                dump_str = "".join(dump_str_list)

                # write dump_str to temporary file
                io = StringIO.StringIO()
                io.write(dump_str)
                dump_file_name = nc_file_name + '_header_info.txt'
                dump_file = InMemoryUploadedFile(io, None, dump_file_name, 'text', io.len, None)
                files.append(dump_file)

        else:
            validate_files_dict['are_files_valid'] = False
            validate_files_dict['message'] = 'Please check if the uploaded file is in valid NetCDF format.'

        if fed_res_fnames and in_file_name:
            shutil.rmtree(os.path.dirname(in_file_name))

Example 67

Project: edx-platform Source File: git_import.py
def add_repo(repo, rdir_in, branch=None):
    """
    This will add a git repo into the mongo modulestore.
    If branch is left as None, it will fetch the most recent
    version of the current branch.
    """
    # pylint: disable=too-many-statements

    git_repo_dir = getattr(settings, 'GIT_REPO_DIR', DEFAULT_GIT_REPO_DIR)
    git_import_static = getattr(settings, 'GIT_IMPORT_STATIC', True)

    # Set defaults even if it isn't defined in settings
    mongo_db = {
        'host': 'localhost',
        'port': 27017,
        'user': '',
        'password': '',
        'db': 'xlog',
    }

    # Allow overrides
    if hasattr(settings, 'MONGODB_LOG'):
        for config_item in ['host', 'user', 'password', 'db', 'port']:
            mongo_db[config_item] = settings.MONGODB_LOG.get(
                config_item, mongo_db[config_item])

    if not os.path.isdir(git_repo_dir):
        raise GitImportErrorNoDir(git_repo_dir)
    # pull from git
    if not (repo.endswith('.git') or
            repo.startswith(('http:', 'https:', 'git:', 'file:'))):
        raise GitImportErrorUrlBad()

    if rdir_in:
        rdir = os.path.basename(rdir_in)
    else:
        rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]
    log.debug('rdir = %s', rdir)

    rdirp = '{0}/{1}'.format(git_repo_dir, rdir)
    if os.path.exists(rdirp):
        log.info('directory already exists, doing a git pull instead '
                 'of git clone')
        cmd = ['git', 'pull', ]
        cwd = rdirp
    else:
        cmd = ['git', 'clone', repo, ]
        cwd = git_repo_dir

    cwd = os.path.abspath(cwd)
    try:
        ret_git = cmd_log(cmd, cwd=cwd)
    except subprocess.CalledProcessError as ex:
        log.exception('Error running git pull: %r', ex.output)
        raise GitImportErrorCannotPull()

    if branch:
        switch_branch(branch, rdirp)

    # get commit id
    cmd = ['git', 'log', '-1', '--format=%H', ]
    try:
        commit_id = cmd_log(cmd, cwd=rdirp)
    except subprocess.CalledProcessError as ex:
        log.exception('Unable to get git log: %r', ex.output)
        raise GitImportErrorBadRepo()

    ret_git += '\nCommit ID: {0}'.format(commit_id)

    # get branch
    cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ]
    try:
        branch = cmd_log(cmd, cwd=rdirp)
    except subprocess.CalledProcessError as ex:
        # I can't discover a way to excercise this, but git is complex
        # so still logging and raising here in case.
        log.exception('Unable to determine branch: %r', ex.output)
        raise GitImportErrorBadRepo()

    ret_git += '{0}Branch: {1}'.format('   \n', branch)

    # Get XML logging logger and capture debug to parse results
    output = StringIO.StringIO()
    import_log_handler = logging.StreamHandler(output)
    import_log_handler.setLevel(logging.DEBUG)

    logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course',
                    'xmodule.modulestore.xml', 'xmodule.seq_module', ]
    loggers = []

    for logger_name in logger_names:
        logger = logging.getLogger(logger_name)
        logger.setLevel(logging.DEBUG)
        logger.addHandler(import_log_handler)
        loggers.append(logger)

    try:
        management.call_command('import', git_repo_dir, rdir,
                                nostatic=not git_import_static)
    except CommandError:
        raise GitImportErrorXmlImportFailed()
    except NotImplementedError:
        raise GitImportErrorUnsupportedStore()

    ret_import = output.getvalue()

    # Remove handler hijacks
    for logger in loggers:
        logger.setLevel(logging.NOTSET)
        logger.removeHandler(import_log_handler)

    course_key = None
    location = 'unknown'

    # extract course ID from output of import-command-run and make symlink
    # this is needed in order for custom course scripts to work
    match = re.search(r'(?ms)===> IMPORTING courselike (\S+)', ret_import)
    if match:
        course_id = match.group(1)
        try:
            course_key = CourseKey.from_string(course_id)
        except InvalidKeyError:
            course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
        cdir = '{0}/{1}'.format(git_repo_dir, course_key.course)
        log.debug('Studio course dir = %s', cdir)

        if os.path.exists(cdir) and not os.path.islink(cdir):
            log.debug('   -> exists, but is not symlink')
            log.debug(subprocess.check_output(['ls', '-l', ],
                                              cwd=os.path.abspath(cdir)))
            try:
                os.rmdir(os.path.abspath(cdir))
            except OSError:
                log.exception('Failed to remove course directory')

        if not os.path.exists(cdir):
            log.debug('   -> creating symlink between %s and %s', rdirp, cdir)
            try:
                os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir))
            except OSError:
                log.exception('Unable to create course symlink')
            log.debug(subprocess.check_output(['ls', '-l', ],
                                              cwd=os.path.abspath(cdir)))

    # store import-command-run output in mongo
    mongouri = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(**mongo_db)

    try:
        if mongo_db['user'] and mongo_db['password']:
            mdb = mongoengine.connect(mongo_db['db'], host=mongouri)
        else:
            mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'], port=mongo_db['port'])
    except mongoengine.connection.ConnectionError:
        log.exception('Unable to connect to mongodb to save log, please '
                      'check MONGODB_LOG settings')
    cil = CourseImportLog(
        course_id=course_key,
        location=location,
        repo_dir=rdir,
        created=timezone.now(),
        import_log=ret_import,
        git_log=ret_git,
    )
    cil.save()

    log.debug('saved CourseImportLog for %s', cil.course_id)
    mdb.disconnect()

Example 68

Project: bolinas Source File: grammar.py
    @classmethod
    def load_from_file(cls, in_file, rule_class = VoRule, reverse = False, nodelabels = False, logprob = False):
        """
        Loads a SHRG grammar from the given file. 
        See docuementation for format details.
        
        rule_class specifies the type of rule to use. VoRule is a subclass using an arbitrary graph
        visit order (also used for strings). TdRule computes a tree decomposition on the first RHS
        when initialized.
        """

        output = Grammar(nodelabels = nodelabels, logprob = logprob)

        rule_count = 1
        line_count = 0
        is_synchronous = False

        rhs1_type = None
        rhs2_type = None

        buf = StringIO.StringIO() 

        for line in in_file: 
            line_count += 1
            l = line.strip()
            if l:
                if "#" in l: 
                    content, comment = l.split("#",1)
                else: 
                    content = l
                buf.write(content.strip())
                if ";" in content:
                    rulestring = buf.getvalue()
                    try:
                        content, weights = rulestring.split(";",1)            
                        weight = 0.0 if not weights else (float(weights) if logprob else math.log(float(weights)))
                    except:
                        raise GrammarError, \
            "Line %i, Rule %i: Error near end of line." % (line_count, rule_count)
                   
                    try:  
                        lhs, rhsstring = content.split("->")
                    except:
                        raise GrammarError, \
            "Line %i, Rule %i: Invalid rule format." % (line_count, rule_count)
                    lhs = lhs.strip()
                    if rule_count == 1:
                        output.start_symbol = lhs
                    if "|" in rhsstring:
                        if not is_synchronous and rule_count > 1:
                            raise GrammarError,\
           "Line %i, Rule %i: All or none of the rules need to have two RHSs." % (line_count, rule_count)
                        is_synchronous = True
                        try:
                            rhs1,rhs2 = rhsstring.split("|")
                        except:
                            raise GrammarError,"Only up to two RHSs are allowed in grammar file."
                    else: 
                        if is_synchronous and rule_count > 0:
                            raise ParserError,\
            "Line %i, Rule %i: All or none of the rules need to have two RHSs." % (line_count, rule_count)
                        is_synchronous = False
                        rhs1 = rhsstring
                        rhs2 = None                               
                    
                    try:    # If the first graph in the file cannot be parsed, assume it's a string
                        r1  = Hgraph.from_string(rhs1)
                        r1_nts = set([(ntlabel.label, ntlabel.index) for h, ntlabel, t in r1.nonterminal_edges()])
                        if not rhs1_type:
                            rhs1_type = GRAPH_FORMAT
                    except (ParserError, IndexError), e: 
                        if rhs1_type == GRAPH_FORMAT:
                           raise ParserError,\
            "Line %i, Rule %i: Could not parse graph description: %s" % (line_count, rule_count, e.message)
                        else:
                           r1 = parse_string(rhs1) 
                           nts = [t for t in r1 if isinstance(t, NonterminalLabel)]
                           r1_nts = set([(ntlabel.label, ntlabel.index) for ntlabel in nts])
                           rhs1_type = STRING_FORMAT
  
                    if is_synchronous:
                        try:    # If the first graph in the file cannot be parsed, assume it's a string
                            if rhs2_type: 
                                assert rhs2_type == GRAPH_FORMAT
                            r2  = Hgraph.from_string(rhs2)
                            r2_nts = set([(ntlabel.label, ntlabel.index) for h, ntlabel, t in r2.nonterminal_edges()])
                            if not rhs2_type:
                                rhs2_type = GRAPH_FORMAT
                        except (ParserError, IndexError, AssertionError), e: 
                            if rhs2_type == GRAPH_FORMAT:
                               raise ParserError,\
                "Line %i, Rule %i: Could not parse graph description: %s" % (line_count, rule_count, e.message)
                            else:
                               r2 = parse_string(rhs2) 
                               nts = [t for t in r2 if isinstance(t, NonterminalLabel)]
                               r2_nts = set([(ntlabel.label, ntlabel.index) for ntlabel in nts])
                               rhs2_type = STRING_FORMAT

                        # Verify that nonterminals match up
                        if not r1_nts == r2_nts:
                            raise GrammarError, \
            "Line %i, Rule %i: Nonterminals do not match between RHSs: %s %s" % (line_count, rule_count, str(r1_nts), str(r2_nts))
                    else: 
                        r2 = None
                    try:    
                        if is_synchronous and reverse: 
                            output[rule_count] = rule_class(rule_count, lhs, weight, r2, r1, nodelabels = nodelabels, logprob = logprob)                                     
                        else: 
                            output[rule_count] = rule_class(rule_count, lhs, weight, r1, r2, nodelabels = nodelabels, logprob = logprob) 
                    except Exception, e:         
                        raise GrammarError, \
            "Line %i, Rule %i: Could not initialize rule. %s" % (line_count, rule_count, e.message)
                    buf = StringIO.StringIO() 
                    rule_count += 1

        output.is_synchronous = is_synchronous
        if is_synchronous and reverse:
            output.rhs1_type, output.rhs2_type = rhs2_type, rhs1_type
        else: 
            output.rhs1_type, output.rhs2_type = rhs1_type, rhs2_type

        output._compute_reachability_table_lookup()
        return output 

Example 69

Project: cgstudiomap Source File: report_businessopp.py
    def create(self, cr, uid, ids, datas, context=None):

        """ @param cr: the current row, from the database cursor,
            @param uid: the current user’s ID for security checks,
            @param ids: List of IDs
            @param context: A standard dictionary for contextual values """

        assert len(ids), 'You should provide some ids!'
        responsible_data = {}
        responsible_names = {}
        data = []
        minbenef = 999999999999999999999
        maxbenef = 0

        cr.execute('select probability, planned_revenue, planned_cost, user_id,\
                 res_users.name as name from crm_case left join res_users on \
                 (crm_case.user_id=res_users.id) where crm_case.id IN %s order by user_id',(tuple(ids),))

        res = cr.dictfetchall()
        for row in res:
            proba = row['probability'] or 0 / 100.0
            cost = row['planned_cost'] or 0
            revenue = row['planned_revenue'] or 0
            userid = row['user_id'] or 0

            benefit = revenue - cost
            if benefit > maxbenef:
                maxbenef = benefit
            if benefit < minbenef:
                minbenef = benefit

            tuple_benefit = (proba * 100,  benefit)
            responsible_data.setdefault(userid, [])
            responsible_data[userid].append(tuple_benefit)

            tuple_benefit = (proba * 100, cost, benefit)
            data.append(tuple_benefit)

            responsible_names[userid] = (row['name'] or '/').replace('/','//')

        minbenef -= maxbenef * 0.05
        maxbenef *= 1.2

        ratio = 0.5
        minmaxdiff2 = (maxbenef - minbenef)/2

        for l in responsible_data.itervalues():
            for i in range(len(l)):
                percent, benef = l[i]
                proba = percent/100

                current_ratio = 1 + (ratio-1) * proba

                newbenef = minmaxdiff2 + ((benef - minbenef - minmaxdiff2) * current_ratio)

                l[i] = (percent, newbenef)

#TODO:
#-group by "categorie de probabilites ds graphe du haut"
#-echelle variable

        pdf_string = StringIO.StringIO()
        can = canvas.init(fname = pdf_string, format = 'pdf')

        chart_object.set_defaults(line_plot.T, line_style=None)

        xaxis = axis.X(label=None, format="%d%%", tic_interval=20)
        yaxis = axis.Y()

        x_range_a, x_range_b = (0, 100)
        y_range_a, y_range_b = (minbenef, maxbenef)
        if y_range_a == 0.0:
            y_range_a += 0.0001

        ar = area.T(
            size = (300,200),
            y_grid_interval = 10000,
            y_grid_style = None,
            x_range = (x_range_a, x_range_b),
            y_range = (y_range_a, y_range_b),
            x_axis = xaxis,
            y_axis = None,
            legend = legend.T()
        )

        #import pydb; pydb.debugger()
        for k, d in responsible_data.iteritems():
            fill = fill_style.Plain(bgcolor=color.T(r=random.random(), g=random.random(), b=random.random()))
            tick = tick_mark.Square(size=6, fill_style=fill)
            ar.add_plot(line_plot.T(label=responsible_names[k], data=d, tick_mark=tick))

        ar.draw(can)

        # second graph (top right)
        ar = area.T(legend = legend.T(),
                    size = (200,100),
                    loc = (100,250),
                    x_grid_interval = lambda min, max: [40,60,80,100],
                    x_grid_style = line_style.gray70_dash1,
                    x_range = (33, 100),
                    x_axis = axis.X(label=None, minor_tic_interval = lambda min,max: [50, 70, 90],\
                                     format=lambda x: ""),
                    y_axis = axis.Y(label="Planned amounts"))

        bar_plot.fill_styles.reset();
        plot1 = bar_plot.T(label="Cost", data=data, fill_style=fill_style.red)
        plot2 = bar_plot.T(label="Revenue", data=data, hcol=2, stack_on = plot1, fill_style=fill_style.blue)

        ar.add_plot(plot1, plot2)

        ar.draw(can)

        # diagonal "pipeline" lines
        can.line(line_style.black, 0, 200, 300, 150)
        can.line(line_style.black, 0, 0, 300, 50)

        # vertical lines
        ls = line_style.T(width=0.4, color=color.gray70, dash=(2, 2))
        for x in range(120, 300, 60):
            can.line(ls, x, 0, x, 250)

        # draw arrows to the right
        a = arrow.fat1
        for y in range(60, 150, 10):
            a.draw([(285, y), (315, y)], can=can)

        # close canvas so that the file is written to "disk"
        can.close()

        self.obj = external_pdf(pdf_string.getvalue())
        self.obj.render()

        pdf_string.close()
        return (self.obj.pdf, 'pdf')

Example 70

Project: aclhound Source File: deploy_arista.py
def deploy(hostname=None, acls=None, transport='ssh', save_config=False,
           timeout=60):
    """
    Deploy code in a safe way o a Cisco IOS device.
    """
    try:
        username, enable_pass, password = \
            netrc.netrc().authenticators(hostname)
        account = Account(name=username, password=password,
                          password2=enable_pass)
    except:
        print("ERROR: could not find device in ~/.netrc file")
        print("HINT: either update .netrc or enter username + pass now.")
        try:
            account = read_login()
        except EOFError:
            print("ERROR: could not find proper username + pass")
            print("HINT: set username & pass in ~/.netrc for device %s"
                  % hostname)
            import sys
            sys.exit(2)

    def s(conn, line):
        print("   %s" % line)
        conn.execute(line)

    def collect_interfaces(conn):
        template = """# textfsm
Value Required Interface ([^ ]+)
Value Inbound (.*)
Value Outbound (.*)

Start
  ^${Interface} is up
  ^  Outgoing access list is ${Outbound}
  ^  Inbound  access list is ${Inbound} -> Record Start

"""
        template_file = StringIO(template)
        table = textfsm.TextFSM(template_file)
        s(conn, 'show ip int | inc ine pro|list is')
        interface_acl_v4 = table.ParseText(conn.response)

        template = """# textfsm
Value Required Interface ([^ ]+)
Value Inbound (.*)
Value Outbound (.*)

Start
  ^${Interface} is up
  ^  Inbound access list ${Inbound}
  ^  Outgoing access list ${Outbound} -> Record Start

"""
        template_file = StringIO(template)
        table = textfsm.TextFSM(template_file)
        s(conn, 'show ipv6 int  | i ine pro|access list')
        interface_acl_v6 = table.ParseText(conn.response)
        template = """# textfsm
Value Required Vty (\d+\s\d+)
Value Inbound4 ([^ ]+)
Value Outbound4 ([^ ]+)
Value Inbound6 ([^ ]+)
Value Outbound6 ([^ ]+)

Start
  ^line vty ${Vty}
  ^ access-class ${Inbound4} in
  ^ access-class ${Outbound4} out
  ^ ipv6 access-class ${Inbound6} in
  ^ ipv6 access-class ${Outbound6} out -> Record Start

"""
        template_file = StringIO(template)
        table = textfsm.TextFSM(template_file)
        s(conn, 'show run | begin ^line vty')
        interface_acl_vty = table.ParseText(conn.response)

        results = {4: interface_acl_v4, 6: interface_acl_v6}
        # add vty lines
        for vty in interface_acl_vty:
            # v4 inbound
            v4_inbound = vty[1] if vty[1] else "not set"
            v4_outbound = vty[2] if vty[1] else "not set"
            v6_inbound = vty[3] if vty[1] else "not set"
            v6_outbound = vty[4] if vty[1] else "not set"
            results[4].append(["vty %s" % vty[0], v4_inbound, v4_outbound])
            results[6].append(["vty %s" % vty[0], v6_inbound, v6_outbound])
        return results

    # main flow of the program starts here
    if transport == 'ssh':
        conn = SSH2(verify_fingerprint=False, debug=0, timeout=timeout)
    elif transport == 'telnet':
        conn = Telnet(debug=0)
    else:
        print("ERROR: Unknown transport mechanism: %s"
              % transport)
        sys.exit(2)
    conn.set_driver('arista_eos')
    conn.connect(hostname)
    conn.login(account)
    conn.execute('terminal length 0')
    conn.auto_app_authorize(account)
    capabilities = {}

    map_pol_int = {}
    interfaces_overview = collect_interfaces(conn)
    for afi in interfaces_overview:
        for interface, inbound, outbound in interfaces_overview[afi]:
            # add inbound rules to map
            if inbound not in map_pol_int.keys():
                map_pol_int[inbound] = [{"int": interface,
                                        "afi": afi,
                                        "dir": "in"}]
            else:
                map_pol_int[inbound].append({"int": interface,
                                             "afi": afi,
                                             "dir": "in"})
            # add outbound
            if outbound not in map_pol_int.keys():
                map_pol_int[outbound] = [{"int": interface,
                                          "afi": afi,
                                          "dir": "in"}]
            else:
                map_pol_int[outbound].append({"int": interface,
                                             "afi": afi,
                                             "dir": "out"})
    print("INFO: interface / policy mapping:")
    pprint(map_pol_int)

    def lock_step(lock, pol, capabilities):
        name = acls[pol]['name']
        afi = acls[pol]['afi']
        if afi == 6 and not capabilities['ipv6']:
            return
        policy = acls[pol]['policy']
        print("INFO: uploading name: %s, afi: %s" % (name, afi))
        s(conn, 'configure terminal')
        if afi == 4:
            try:
                s(conn, "no ip access-list extended %s%s" % (lock, name))
            except:
                pass
            s(conn, "ip access-list extended %s%s" % (lock, name))
            for line in policy.split('\n'):
                s(conn, line)
        if afi == 6:
            try:
                s(conn, "no ipv6 access-list %s%s" % (lock, name))
            except:
                pass
            s(conn, "ipv6 access-list %s%s" % (lock, name))
            for line in policy.split('\n'):
                s(conn, line)
        s(conn, "end")

        # then replace ACL on all interfaces / VTYs
        if name in map_pol_int:
            for entry in map_pol_int[name]:
                if not entry['afi'] == afi:
                    continue
                print("INFO: lockstepping policy %s afi %s" % (name, afi))
                s(conn, "configure terminal")
                if entry['int'].startswith('vty '):
                    s(conn, "line %s" % entry['int'])
                    if afi == 4:
                        s(conn, "access-class %s%s %s"
                          % (lock, name, entry['dir']))
                    if afi == 6:
                        s(conn, "ipv6 access-class %s%s %s"
                          % (lock, name, entry['dir']))
                else:
                    s(conn, "interface %s" % entry['int'])
                    if afi == 4:
                        s(conn, "ip access-group %s%s %s"
                          % (lock, name, entry['dir']))
                    if afi == 6:
                        s(conn, "ipv6 traffic-filter %s%s %s"
                          % (lock, name, entry['dir']))
                s(conn, "end")

    for policy in acls:
        for lock in ["LOCKSTEP-", ""]:
            lock_step(lock, policy, capabilities)
        # cleanup
        s(conn, "configure terminal")
        if acls[policy]['afi'] == 4:
            s(conn, "no ip access-list extended LOCKSTEP-%s"
              % acls[policy]['name'])
        if acls[policy]['afi'] == 6 and capabilities['ipv6']:
            s(conn, "no ipv6 access-list LOCKSTEP-%s"
              % acls[policy]['name'])
        s(conn, "end")

    if save_config == True:
        s(conn, "write")

Example 71

Project: ssf Source File: survey.py
def series_export_spreadsheet(matrix, matrixAnswers, logo):
    ######################################################################
    #
    # Now take the matrix data type and generate a spreadsheet from it
    #
    ######################################################################
    import math
    try:
        import xlwt
    except ImportError:
        output = s3_rest_controller(module,
                                resourcename,
                                rheader=response.s3.survey_series_rheader)
        return output

    def wrapText(sheet, cell, style):
        row = cell.row
        col = cell.col
        try:
            text = unicode(cell.text)
        except:
            text = cell.text
        width = 16
        # Wrap text and calculate the row width and height
        characters_in_cell = float(width-2)
        twips_per_row = 255 #default row height for 10 point font
        if cell.merged():
            try:
                sheet.write_merge(cell.row,
                                  cell.row + cell.mergeV,
                                  cell.col,
                                  cell.col + cell.mergeH,
                                  text,
                                  style
                                 )
            except Exception as msg:
                print >> sys.stderr, msg
                print >> sys.stderr, "row: %s + vert: %s, col: %s + horiz %s" % (cell.row, cell.mergeV, cell.col, cell.mergeH)
                posn = "%s,%s"%(cell.row, cell.col)
                if matrix.matrix[posn]:
                    print >> sys.stderr, matrix.matrix[posn]
            rows = math.ceil((len(text) / characters_in_cell) / (1 + cell.mergeH))
        else:
            sheet.write(cell.row,
                        cell.col,
                        text,
                        style
                       )
            rows = math.ceil(len(text) / characters_in_cell)
        new_row_height = int(rows * twips_per_row)
        new_col_width = width * COL_WIDTH_MULTIPLIER
        if sheet.row(row).height < new_row_height:
            sheet.row(row).height = new_row_height
        if sheet.col(col).width < new_col_width:
            sheet.col(col).width = new_col_width

    def mergeStyles(listTemplate, styleList):
        """
            Take a list of styles and return a single style object with
            all the differences from a newly created object added to the
            resultant style.
        """
        if len(styleList) == 0:
            finalStyle = xlwt.XFStyle()
        elif len(styleList) == 1:
            finalStyle = listTemplate[styleList[0]]
        else:
            zeroStyle = xlwt.XFStyle()
            finalStyle = xlwt.XFStyle()
            for i in range(0,len(styleList)):
                finalStyle = mergeObjectDiff(finalStyle,
                                             listTemplate[styleList[i]],
                                             zeroStyle)
        return finalStyle

    def mergeObjectDiff(baseObj, newObj, zeroObj):
        """
            function to copy all the elements in newObj that are different from
            the zeroObj and place them in the baseObj
        """
        elementList = newObj.__dict__
        for (element, value) in elementList.items():
            try:
                baseObj.__dict__[element] = mergeObjectDiff(baseObj.__dict__[element],
                                                            value,
                                                            zeroObj.__dict__[element])
            except:
                if zeroObj.__dict__[element] != value:
                    baseObj.__dict__[element] = value
        return baseObj

    COL_WIDTH_MULTIPLIER = 240
    book = xlwt.Workbook(encoding="utf-8")
    output = StringIO()

    protection = xlwt.Protection()
    protection.cell_locked = 1
    noProtection = xlwt.Protection()
    noProtection.cell_locked = 0

    borders = xlwt.Borders()
    borders.left = xlwt.Borders.DOTTED
    borders.right = xlwt.Borders.DOTTED
    borders.top = xlwt.Borders.DOTTED
    borders.bottom = xlwt.Borders.DOTTED

    borderT1 = xlwt.Borders()
    borderT1.top = xlwt.Borders.THIN
    borderT2 = xlwt.Borders()
    borderT2.top = xlwt.Borders.MEDIUM

    borderL1 = xlwt.Borders()
    borderL1.left = xlwt.Borders.THIN
    borderL2 = xlwt.Borders()
    borderL2.left = xlwt.Borders.MEDIUM

    borderR1 = xlwt.Borders()
    borderR1.right = xlwt.Borders.THIN
    borderR2 = xlwt.Borders()
    borderR2.right = xlwt.Borders.MEDIUM

    borderB1 = xlwt.Borders()
    borderB1.bottom = xlwt.Borders.THIN
    borderB2 = xlwt.Borders()
    borderB2.bottom = xlwt.Borders.MEDIUM

    alignBase = xlwt.Alignment()
    alignBase.horz = xlwt.Alignment.HORZ_LEFT
    alignBase.vert = xlwt.Alignment.VERT_TOP

    alignWrap = xlwt.Alignment()
    alignWrap.horz = xlwt.Alignment.HORZ_LEFT
    alignWrap.vert = xlwt.Alignment.VERT_TOP
    alignWrap.wrap = xlwt.Alignment.WRAP_AT_RIGHT

    shadedFill = xlwt.Pattern()
    shadedFill.pattern = xlwt.Pattern.SOLID_PATTERN
    shadedFill.pattern_fore_colour = 0x16 # 25% Grey
    shadedFill.pattern_back_colour = 0x08 # Black

    headingFill = xlwt.Pattern()
    headingFill.pattern = xlwt.Pattern.SOLID_PATTERN
    headingFill.pattern_fore_colour = 0x1F # ice_blue
    headingFill.pattern_back_colour = 0x08 # Black

    styleTitle =  xlwt.XFStyle()
    styleTitle.font.height = 0x0140 # 320 twips, 16 points
    styleTitle.font.bold = True
    styleTitle.alignment = alignBase
    styleHeader = xlwt.XFStyle()
    styleHeader.font.height = 0x00F0 # 240 twips, 12 points
    styleHeader.font.bold = True
    styleHeader.alignment = alignBase
    styleSubHeader = xlwt.XFStyle()
    styleSubHeader.font.bold = True
    styleSubHeader.alignment = alignWrap
    styleSectionHeading = xlwt.XFStyle()
    styleSectionHeading.font.bold = True
    styleSectionHeading.alignment = alignWrap
    styleSectionHeading.pattern = headingFill
    styleHint = xlwt.XFStyle()
    styleHint.protection = protection
    styleHint.font.height = 160 # 160 twips, 8 points
    styleHint.font.italic = True
    styleHint.alignment = alignWrap
    styleText = xlwt.XFStyle()
    styleText.protection = protection
    styleText.alignment = alignWrap
    styleInstructions = xlwt.XFStyle()
    styleInstructions.font.height = 0x00B4 # 180 twips, 9 points
    styleInstructions.font.italic = True
    styleInstructions.protection = protection
    styleInstructions.alignment = alignWrap
    styleBox = xlwt.XFStyle()
    styleBox.borders = borders
    styleBox.protection = noProtection
    styleInput = xlwt.XFStyle()
    styleInput.borders = borders
    styleInput.protection = noProtection
    styleInput.pattern = shadedFill
    boxL1 = xlwt.XFStyle()
    boxL1.borders = borderL1
    boxL2 = xlwt.XFStyle()
    boxL2.borders = borderL2
    boxT1 = xlwt.XFStyle()
    boxT1.borders = borderT1
    boxT2 = xlwt.XFStyle()
    boxT2.borders = borderT2
    boxR1 = xlwt.XFStyle()
    boxR1.borders = borderR1
    boxR2 = xlwt.XFStyle()
    boxR2.borders = borderR2
    boxB1 = xlwt.XFStyle()
    boxB1.borders = borderB1
    boxB2 = xlwt.XFStyle()
    boxB2.borders = borderB2
    styleList = {}
    styleList["styleTitle"] = styleTitle
    styleList["styleHeader"] = styleHeader
    styleList["styleSubHeader"] = styleSubHeader
    styleList["styleSectionHeading"] = styleSectionHeading
    styleList["styleHint"] = styleHint
    styleList["styleText"] = styleText
    styleList["styleInstructions"] = styleInstructions
    styleList["styleInput"] = styleInput
    styleList["boxL1"] = boxL1
    styleList["boxL2"] = boxL2
    styleList["boxT1"] = boxT1
    styleList["boxT2"] = boxT2
    styleList["boxR1"] = boxR1
    styleList["boxR2"] = boxR2
    styleList["boxB1"] = boxB1
    styleList["boxB2"] = boxB2

    sheet1 = book.add_sheet(T("Assessment"))
    sheetA = book.add_sheet(T("Metadata"))
    maxCol = 0
    for cell in matrix.matrix.values():
        if cell.col + cell.mergeH > 255:
            print  >> sys.stderr, "Cell (%s,%s) - (%s,%s) ignored" % (cell.col, cell.row, cell.col + cell.mergeH, cell.row + cell.mergeV)
            continue
        if cell.col + cell.mergeH > maxCol:
            maxCol = cell.col + cell.mergeH
        if cell.joined():
            continue
        style = mergeStyles(styleList, cell.styleList)
        if (style.alignment.wrap == style.alignment.WRAP_AT_RIGHT):
            # get all the styles from the joined cells
            # and merge these styles in.
            joinedStyles = matrix.joinedElementStyles(cell)
            joinedStyle =  mergeStyles(styleList, joinedStyles)
            try:
                wrapText(sheet1, cell, joinedStyle)
            except:
                pass
        else:
            if cell.merged():
                # get all the styles from the joined cells
                # and merge these styles in.
                joinedStyles = matrix.joinedElementStyles(cell)
                joinedStyle =  mergeStyles(styleList, joinedStyles)
                try:
                    sheet1.write_merge(cell.row,
                                       cell.row + cell.mergeV,
                                       cell.col,
                                       cell.col + cell.mergeH,
                                       unicode(cell.text),
                                       joinedStyle
                                       )
                except Exception as msg:
                    print >> sys.stderr, msg
                    print >> sys.stderr, "row: %s + vert: %s, col: %s + horiz %s" % (cell.row, cell.mergeV, cell.col, cell.mergeH)
                    posn = "%s,%s"%(cell.row, cell.col)
                    if matrix.matrix[posn]:
                        print >> sys.stderr, matrix.matrix[posn]
            else:
                sheet1.write(cell.row,
                             cell.col,
                             unicode(cell.text),
                             style
                             )
    cellWidth = 480 # approximately 2 characters
    if maxCol > 255:
        maxCol = 255
    for col in range(maxCol+1):
        sheet1.col(col).width = cellWidth

    sheetA.write(0, 0, "Question Code")
    sheetA.write(0, 1, "Response Count")
    sheetA.write(0, 2, "Values")
    sheetA.write(0, 3, "Cell Address")
    for cell in matrixAnswers.matrix.values():
        style = mergeStyles(styleList, cell.styleList)
        sheetA.write(cell.row,
                     cell.col,
                     unicode(cell.text),
                     style
                    )

    if logo != None:
        sheet1.insert_bitmap(logo, 0, 0)

    sheet1.protect = True
    sheetA.protect = True
    for i in range(26):
        sheetA.col(i).width = 0
    sheetA.write(0,
                 26,
                 unicode(T("Please do not remove this sheet")),
                 styleHeader
                )
    sheetA.col(26).width = 12000
    book.save(output)
    return output

Example 72

Project: ssf Source File: xls.py
    def encode(self, data_source, **attr):
        """
            Export data as a Microsoft Excel spreadsheet

            @param data_source: the source of the data that is to be encoded
                               as a spreadsheet. This may be:
                               resource: the resource
                               item:     a list of pre-fetched values
                                         the headings are in the first row
                                         the data types are in the second row
            @param attr: dictionary of parameters:
                 * title:          The main title of the report
                 * list_fields:    Fields to include in list views
                 * report_groupby: Used to create a grouping of the result:
                                   either a Field object of the resource
                                   or a string which matches a value in the heading
                 * use_colour:     True to add colour to the cells. default True
        """
        import datetime
        try:
            import xlwt
        except ImportError:
            current.session.error = self.ERROR.XLWT_ERROR
            redirect(URL(extension=""))
        try:
            from xlrd.xldate import xldate_from_date_tuple, \
                                    xldate_from_time_tuple, \
                                    xldate_from_datetime_tuple
        except ImportError:
            current.session.error = self.ERROR.XLRD_ERROR
            redirect(URL(extension=""))

        # Get the attributes
        title = attr.get("title")
        list_fields = attr.get("list_fields")
        report_groupby = attr.get("report_groupby")
        use_colour = attr.get("use_colour", True)
        # Extract the data from the data_source
        if isinstance(data_source, (list, tuple)):
            headers = data_source[0]
            types = data_source[1]
            items = data_source[2:]
        else:
            (title, types, headers, items) = self.extractResource(data_source,
                                                                  list_fields,
                                                                  report_groupby)

        if report_groupby != None:
            if isinstance(report_groupby, Field):
                groupby_label = report_groupby.label
            else:
                groupby_label = report_groupby

        # Environment
        request = current.request
        response = current.response

        # Date/Time formats from L10N deployment settings
        settings = current.deployment_settings
        date_format = S3XLS.dt_format_translate(settings.get_L10n_date_format())
        time_format = S3XLS.dt_format_translate(settings.get_L10n_time_format())
        datetime_format = S3XLS.dt_format_translate(settings.get_L10n_datetime_format())

        # Initialize output
        output = StringIO()

        # Create the workbook and a sheet in it
        book = xlwt.Workbook(encoding="utf-8")
        # Length of the title Needs to be fixed...
        sheet1 = book.add_sheet(str(title[0:10]))

        # Styles
        styleLargeHeader = xlwt.XFStyle()
        styleLargeHeader.font.bold = True
        styleLargeHeader.font.height = 400
        if use_colour:
            styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER
            styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN
            styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR

        styleNotes = xlwt.XFStyle()
        styleNotes.font.italic = True
        styleNotes.font.height = 160 # 160 Twips = 8point
        styleNotes.num_format_str = datetime_format

        styleHeader = xlwt.XFStyle()
        styleHeader.font.bold = True
        styleHeader.num_format_str = datetime_format
        if use_colour:
            styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN
            styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR

        styleSubHeader = xlwt.XFStyle()
        styleSubHeader.font.bold = True
        if use_colour:
            styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN
            styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR

        styleOdd = xlwt.XFStyle()
        if use_colour:
            styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN
            styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[0]

        styleEven = xlwt.XFStyle()
        if use_colour:
            styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN
            styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[1]

        # Initialize counters
        rowCnt = 0
        colCnt = 0

        # Title row
        totalCols = len(headers)-1
        if report_groupby != None:
            totalCols -= 1

        if totalCols > 0:
            sheet1.write_merge(rowCnt, rowCnt, 0, totalCols, str(title),
                               styleLargeHeader)
        currentRow = sheet1.row(rowCnt)
        currentRow.height = 440
        rowCnt += 1
        currentRow = sheet1.row(rowCnt)
        currentRow.write(totalCols, request.now, styleNotes)
        rowCnt += 1
        currentRow = sheet1.row(rowCnt)

        # Header row
        fieldWidth=[]
        for label in headers:
            if report_groupby != None:
                if label == groupby_label:
                    continue
            currentRow.write(colCnt, str(label), styleHeader)
            width = len(label) * S3XLS.COL_WIDTH_MULTIPLIER
            fieldWidth.append(width)
            sheet1.col(colCnt).width = width
            colCnt += 1

        # fix the size of the last column to display the date
        if 16 * S3XLS.COL_WIDTH_MULTIPLIER > width:
            sheet1.col(totalCols).width = 16 * S3XLS.COL_WIDTH_MULTIPLIER

        subheading = None
        for item in items:
            # Item details
            rowCnt += 1
            currentRow = sheet1.row(rowCnt)
            colCnt = 0
            if rowCnt % 2 == 0:
                style = styleEven
            else:
                style = styleOdd
            for label in headers:
                represent = item[colCnt]
                if type(represent) is not str:
                    represent = unicode(represent)
                # Strip away markup from representation
                try:
                    markup = etree.XML(str(represent))
                    text = markup.xpath(".//text()")
                    if text:
                        text = " ".join(text)
                    else:
                        text = ""
                    represent = text
                except:
                    pass
                if report_groupby != None:
                    if label == groupby_label:
                        if subheading != represent:
                            subheading = represent
                            sheet1.write_merge(rowCnt, rowCnt, 0, totalCols,
                                               represent, styleSubHeader)
                            rowCnt += 1
                            currentRow = sheet1.row(rowCnt)
                            if rowCnt % 2 == 0:
                                style = styleEven
                            else:
                                style = styleOdd
                        continue
                coltype=types[colCnt]
                value = represent
                if coltype == "date":
                    try:
                        format = str(settings.get_L10n_date_format())
                        cell_datetime = datetime.datetime.strptime(value,
                                                                   format)
                        date_tuple = (cell_datetime.year,
                                      cell_datetime.month,
                                      cell_datetime.day
                                     )
                        value = xldate_from_date_tuple(date_tuple, 0)
                        style.num_format_str = date_format
                    except:
                        pass
                elif coltype == "datetime":
                    try:
                        format = str(settings.get_L10n_date_format())
                        cell_datetime = datetime.datetime.strptime(value,
                                                                   format)
                        date_tuple = (cell_datetime.year,
                                      cell_datetime.month,
                                      cell_datetime.day,
                                      cell_datetime.hour,
                                      cell_datetime.minute,
                                      cell_datetime.second,
                                     )
                        value = xldate_from_datetime_tuple(date_tuple, 0)
                        style.num_format_str = datetime_format
                    except:
                        pass
                elif coltype == "time":
                    try:
                        format = str(settings.get_L10n_date_format())
                        cell_datetime = datetime.datetime.strptime(value,
                                                                   format)
                        date_tuple = (cell_datetime.hour,
                                      cell_datetime.minute,
                                      cell_datetime.second,
                                     )
                        value = xldate_from_time_tuple(date_tuple)
                        style.num_format_str = time_format
                    except:
                        pass
                elif coltype == "integer":
                    try:
                        value = int(value)
                    except:
                        pass
                elif coltype == "double":
                    try:
                        value = float(value)
                    except:
                        pass
                currentRow.write(colCnt, value, style)
                width = len(represent) * S3XLS.COL_WIDTH_MULTIPLIER
                if width > fieldWidth[colCnt]:
                    fieldWidth[colCnt] = width
                    sheet1.col(colCnt).width = width
                colCnt += 1
        sheet1.panes_frozen = True
        sheet1.horz_split_pos = 3
        book.save(output)

        # Response headers
        filename = "%s_%s.xls" % (request.env.server_name, str(title))
        disposition = "attachment; filename=\"%s\"" % filename
        response.headers["Content-Type"] = contenttype(".xls")
        response.headers["Content-disposition"] = disposition

        output.seek(0)
        return output.read()

Example 73

Project: cgstudiomap Source File: print_xml.py
    def parse_node(self, node, parent, browser, datas=None):
            attrs = self.node_attrs_get(node)
            if 'type' in attrs:
                if attrs['type']=='field':
                    value = self.get_value(browser, attrs['name'])
#TODO: test this
                    if value == '' and 'default' in attrs:
                        value = attrs['default']
                    el = etree.SubElement(parent, node.tag)
                    el.text = tounicode(value)
#TODO: test this
                    for key, value in attrs.iteritems():
                        if key not in ('type', 'name', 'default'):
                            el.set(key, value)

                elif attrs['type']=='attachment':
                    model = browser._name
                    value = self.get_value(browser, attrs['name'])

                    ids = self.pool['ir.attachment'].search(self.cr, self.uid, [('res_model','=',model),('res_id','=',int(value))])
                    datas = self.pool['ir.attachment'].read(self.cr, self.uid, ids)

                    if len(datas):
                        # if there are several, pick first
                        datas = datas[0]
                        fname = str(datas['datas_fname'])
                        ext = fname.split('.')[-1].lower()
                        if ext in ('jpg','jpeg', 'png'):
                            import base64
                            from StringIO import StringIO
                            dt = base64.decodestring(datas['datas'])
                            fp = StringIO()
                            fp.write(dt)
                            i = str(len(self.bin_datas))
                            self.bin_datas[i] = fp
                            el = etree.SubElement(parent, node.tag)
                            el.text = i

                elif attrs['type']=='data':
#TODO: test this
                    txt = self.datas.get('form', {}).get(attrs['name'], '')
                    el = etree.SubElement(parent, node.tag)
                    el.text = txt

                elif attrs['type']=='function':
                    if attrs['name'] in self.func:
                        txt = self.func[attrs['name']](node)
                    else:
                        txt = print_fnc.print_fnc(attrs['name'], node)
                    el = etree.SubElement(parent, node.tag)
                    el.text = txt

                elif attrs['type']=='eval':
                    value = self.eval(browser, attrs['expr'])
                    el = etree.SubElement(parent, node.tag)
                    el.text = str(value)

                elif attrs['type']=='fields':
                    fields = attrs['name'].split(',')
                    vals = {}
                    for b in browser:
                        value = tuple([self.get_value2(b, f) for f in fields])
                        if not value in vals:
                            vals[value]=[]
                        vals[value].append(b)
                    keys = vals.keys()
                    keys.sort()

                    if 'order' in attrs and attrs['order']=='desc':
                        keys.reverse()

                    v_list = [vals[k] for k in keys]
                    for v in v_list:
                        el = etree.SubElement(parent, node.tag)
                        for el_cld in node:
                            self.parse_node(el_cld, el, v)

                elif attrs['type']=='call':
                    if len(attrs['args']):
#TODO: test this
                        # fetches the values of the variables which names where passed in the args attribute
                        args = [self.eval(browser, arg) for arg in attrs['args'].split(',')]
                    else:
                        args = []
                    # get the object
                    if 'model' in attrs:
                        obj = self.pool[attrs['model']]
                    else:
                        obj = browser       # the record(set) is an instance of the model

                    # get the ids
                    if 'ids' in attrs:
                        ids = self.eval(browser, attrs['ids'])
                    else:
                        ids = browse.ids

                    # call the method itself
                    newdatas = getattr(obj, attrs['name'])(self.cr, self.uid, ids, *args)

                    def parse_result_tree(node, parent, datas):
                        if not node.tag == etree.Comment:
                            el = etree.SubElement(parent, node.tag)
                            atr = self.node_attrs_get(node)
                            if 'value' in atr:
                                if not isinstance(datas[atr['value']], (str, unicode)):
                                    txt = str(datas[atr['value']])
                                else:
                                    txt = datas[atr['value']]
                                el.text = txt
                            else:
                                for el_cld in node:
                                    parse_result_tree(el_cld, el, datas)
                    if not isinstance(newdatas, (BaseModel, list)):
                        newdatas = [newdatas]
                    for newdata in newdatas:
                        parse_result_tree(node, parent, newdata)

                elif attrs['type']=='zoom':
                    value = self.get_value(browser, attrs['name'])
                    if value:
                        if not isinstance(value, (BaseModel, list)):
                            v_list = [value]
                        else:
                            v_list = value
                        for v in v_list:
                            el = etree.SubElement(parent, node.tag)
                            for el_cld in node:
                                self.parse_node(el_cld, el, v)
            else:
                # if there is no "type" attribute in the node, copy it to the xml data and parse its children
                if not node.tag == etree.Comment:
                    if node.tag == parent.tag:
                        el = parent
                    else:
                        el = etree.SubElement(parent, node.tag)
                    for el_cld in node:
                        self.parse_node(el_cld,el, browser)

Example 74

Project: kaa-metadata Source File: mp4.py
    def _readatom(self, file):
        s = file.read(8)
        if len(s) < 8:
            return 0

        atomsize,atomtype = struct.unpack('>I4s', s)
        if not str(atomtype).decode('latin1').isalnum():
            # stop at nonsense data
            return 0

        log.debug('%s [%X]' % (atomtype,atomsize))

        if atomtype == 'udta':
            # Userdata (Metadata)
            pos = 0
            tabl = {}
            i18ntabl = {}
            atomdata = file.read(atomsize-8)
            while pos < atomsize-12:
                (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos+8])
                if ord(datatype[0]) == 169:
                    # i18n Metadata...
                    mypos = 8+pos
                    while mypos + 4 < datasize+pos:
                        # first 4 Bytes are i18n header
                        (tlen, lang) = struct.unpack('>HH', atomdata[mypos:mypos+4])
                        i18ntabl[lang] = i18ntabl.get(lang, {})
                        l = atomdata[mypos+4:mypos+tlen+4]
                        i18ntabl[lang][datatype[1:]] = l
                        mypos += tlen+4
                elif datatype == 'WLOC':
                    # Drop Window Location
                    pass
                else:
                    if ord(atomdata[pos+8:pos+datasize][0]) > 1:
                        tabl[datatype] = atomdata[pos+8:pos+datasize]
                pos += datasize
            if len(i18ntabl.keys()) > 0:
                for k in i18ntabl.keys():
                    if QTLANGUAGES.has_key(k) and QTLANGUAGES[k] == 'en':
                        self._appendtable('QTUDTA', i18ntabl[k])
                        self._appendtable('QTUDTA', tabl)
            else:
                log.debug('NO i18')
                self._appendtable('QTUDTA', tabl)

        elif atomtype == 'trak':
            atomdata = file.read(atomsize-8)
            pos = 0
            trackinfo = {}
            tracktype = None
            while pos < atomsize-8:
                (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos+8])

                if datatype == 'tkhd':
                    tkhd = struct.unpack('>6I8x4H36xII', atomdata[pos+8:pos+datasize])
                    trackinfo['width'] = tkhd[10] >> 16
                    trackinfo['height'] = tkhd[11] >> 16
                    trackinfo['id'] = tkhd[3]

                    try:
                        # XXX Timestamp of Seconds is since January 1st 1904!
                        # XXX 2082844800 is the difference between Unix and
                        # XXX Apple time. FIXME to work on Apple, too
                        self.timestamp = int(tkhd[1]) - 2082844800
                    except Exception, e:
                        log.exception('There was trouble extracting timestamp')

                elif datatype == 'mdia':
                    pos      += 8
                    datasize -= 8
                    log.debug('--> mdia information')

                    while datasize:
                        mdia = struct.unpack('>I4s', atomdata[pos:pos+8])
                        if mdia[1] == 'mdhd':
                            # Parse based on version of mdhd header.  See
                            # http://wiki.multimedia.cx/index.php?title=QuickTime_container#mdhd
                            ver = ord(atomdata[pos + 8])
                            if ver == 0:
                                mdhd = struct.unpack('>IIIIIhh', atomdata[pos+8:pos+8+24])
                            elif ver == 1:
                                mdhd = struct.unpack('>IQQIQhh', atomdata[pos+8:pos+8+36])
                            else:
                                mdhd = None

                            if mdhd:
                                # duration / time scale
                                trackinfo['length'] = mdhd[4] / mdhd[3]
                                if mdhd[5] in QTLANGUAGES:
                                    trackinfo['language'] = QTLANGUAGES[mdhd[5]]
                                # mdhd[6] == quality
                                self.length = max(self.length, mdhd[4] / mdhd[3])
                        elif mdia[1] == 'minf':
                            # minf has only atoms inside
                            pos -=      (mdia[0] - 8)
                            datasize += (mdia[0] - 8)
                        elif mdia[1] == 'stbl':
                            # stbl has only atoms inside
                            pos -=      (mdia[0] - 8)
                            datasize += (mdia[0] - 8)
                        elif mdia[1] == 'hdlr':
                            hdlr = struct.unpack('>I4s4s', atomdata[pos+8:pos+8+12])
                            if hdlr[1] == 'mhlr':
                                if hdlr[2] == 'vide':
                                    tracktype = 'video'
                                if hdlr[2] == 'soun':
                                    tracktype = 'audio'
                        elif mdia[1] == 'stsd':
                            stsd = struct.unpack('>2I', atomdata[pos+8:pos+8+8])
                            if stsd[1] > 0:
                                codec = atomdata[pos+16:pos+16+8]
                                codec = struct.unpack('>I4s', codec)
                                trackinfo['codec'] = codec[1]
                                if codec[1] == 'jpeg':
                                    tracktype = 'image'
                        elif mdia[1] == 'dinf':
                            dref = struct.unpack('>I4s', atomdata[pos+8:pos+8+8])
                            log.debug('  --> %s, %s (useless)' % mdia)
                            if dref[1] == 'dref':
                                num = struct.unpack('>I', atomdata[pos+20:pos+20+4])[0]
                                rpos = pos+20+4
                                for ref in range(num):
                                    # FIXME: do somthing if this references
                                    ref = struct.unpack('>I3s', atomdata[rpos:rpos+7])
                                    data = atomdata[rpos+7:rpos+ref[0]]
                                    rpos += ref[0]
                        else:
                            if mdia[1].startswith('st'):
                                log.debug('  --> %s, %s (sample)' % mdia)
                            elif mdia[1] in ('vmhd',) and not tracktype:
                                # indicates that this track is video
                                tracktype = 'video'
                            elif mdia[1] in ('vmhd', 'smhd') and not tracktype:
                                # indicates that this track is audio
                                tracktype = 'audio'
                            else:
                                log.debug('  --> %s, %s (unknown)' % mdia)

                        pos      += mdia[0]
                        datasize -= mdia[0]

                elif datatype == 'udta':
                    log.debug(struct.unpack('>I4s', atomdata[:8]))
                else:
                    if datatype == 'edts':
                        log.debug('--> %s [%d] (edit list)' % \
                                  (datatype, datasize))
                    else:
                        log.debug('--> %s [%d] (unknown)' % \
                                  (datatype, datasize))
                pos += datasize

            info = None
            if tracktype == 'video':
                info = core.VideoStream()
                self.video.append(info)
            if tracktype == 'audio':
                info = core.AudioStream()
                self.audio.append(info)
            if info:
                for key, value in trackinfo.items():
                    setattr(info, key, value)

        elif atomtype == 'mvhd':
            # movie header
            mvhd = struct.unpack('>6I2h', file.read(28))
            self.length = max(self.length, mvhd[4] / mvhd[3])
            self.volume = mvhd[6]
            file.seek(atomsize-8-28,1)


        elif atomtype == 'cmov':
            # compressed movie
            datasize, atomtype = struct.unpack('>I4s', file.read(8))
            if not atomtype == 'dcom':
                return atomsize

            method = struct.unpack('>4s', file.read(datasize-8))[0]

            datasize, atomtype = struct.unpack('>I4s', file.read(8))
            if not atomtype == 'cmvd':
                return atomsize

            if method == 'zlib':
                data = file.read(datasize-8)
                try:
                    decompressed = zlib.decompress(data)
                except Exception, e:
                    try:
                        decompressed = zlib.decompress(data[4:])
                    except Exception, e:
                        log.exception('There was a proble decompressiong atom')
                        return atomsize

                decompressedIO = StringIO.StringIO(decompressed)
                while self._readatom(decompressedIO):
                    pass

            else:
                log.info('unknown compression %s' % method)
                # unknown compression method
                file.seek(datasize-8,1)

        elif atomtype == 'moov':
            # decompressed movie info
            while self._readatom(file):
                pass

        elif atomtype == 'mdat':
            pos = file.tell() + atomsize - 8
            # maybe there is data inside the mdat
            log.info('parsing mdat')
            while self._readatom(file):
                pass
            log.info('end of mdat')
            file.seek(pos, 0)


        elif atomtype == 'rmra':
            # reference list
            while self._readatom(file):
                pass

        elif atomtype == 'rmda':
            # reference
            atomdata = file.read(atomsize-8)
            pos   = 0
            url = ''
            quality = 0
            datarate = 0
            while pos < atomsize-8:
                (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos+8])
                if datatype == 'rdrf':
                    rflags, rtype, rlen = struct.unpack('>I4sI', atomdata[pos+8:pos+20])
                    if rtype == 'url ':
                        url = atomdata[pos+20:pos+20+rlen]
                        if url.find('\0') > 0:
                            url = url[:url.find('\0')]
                elif datatype == 'rmqu':
                    quality = struct.unpack('>I', atomdata[pos+8:pos+12])[0]

                elif datatype == 'rmdr':
                    datarate = struct.unpack('>I', atomdata[pos+12:pos+16])[0]

                pos += datasize
            if url:
                self._references.append((url, quality, datarate))

        else:
            if not atomtype in ('wide', 'free'):
                log.info('unhandled base atom %s' % atomtype)

            # Skip unknown atoms
            try:
                file.seek(atomsize-8,1)
            except IOError:
                return 0

        return atomsize

Example 75

Project: CouchPotatoServer Source File: mp4.py
    def _readatom(self, file):
        s = file.read(8)
        if len(s) < 8:
            return 0

        atomsize, atomtype = struct.unpack('>I4s', s)
        if not str(atomtype).decode('latin1').isalnum():
            # stop at nonsense data
            return 0

        log.debug(u'%r [%X]' % (atomtype, atomsize))

        if atomtype == 'udta':
            # Userdata (Metadata)
            pos = 0
            tabl = {}
            i18ntabl = {}
            atomdata = file.read(atomsize - 8)
            while pos < atomsize - 12:
                (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
                if ord(datatype[0]) == 169:
                    # i18n Metadata...
                    mypos = 8 + pos
                    while mypos + 4 < datasize + pos:
                        # first 4 Bytes are i18n header
                        (tlen, lang) = struct.unpack('>HH', atomdata[mypos:mypos + 4])
                        i18ntabl[lang] = i18ntabl.get(lang, {})
                        l = atomdata[mypos + 4:mypos + tlen + 4]
                        i18ntabl[lang][datatype[1:]] = l
                        mypos += tlen + 4
                elif datatype == 'WLOC':
                    # Drop Window Location
                    pass
                else:
                    if ord(atomdata[pos + 8:pos + datasize][0]) > 1:
                        tabl[datatype] = atomdata[pos + 8:pos + datasize]
                pos += datasize
            if len(i18ntabl.keys()) > 0:
                for k in i18ntabl.keys():
                    if QTLANGUAGES.has_key(k) and QTLANGUAGES[k] == 'en':
                        self._appendtable('QTUDTA', i18ntabl[k])
                        self._appendtable('QTUDTA', tabl)
            else:
                log.debug(u'NO i18')
                self._appendtable('QTUDTA', tabl)

        elif atomtype == 'trak':
            atomdata = file.read(atomsize - 8)
            pos = 0
            trackinfo = {}
            tracktype = None
            while pos < atomsize - 8:
                (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])

                if datatype == 'tkhd':
                    tkhd = struct.unpack('>6I8x4H36xII', atomdata[pos + 8:pos + datasize])
                    trackinfo['width'] = tkhd[10] >> 16
                    trackinfo['height'] = tkhd[11] >> 16
                    trackinfo['id'] = tkhd[3]

                    try:
                        # XXX Timestamp of Seconds is since January 1st 1904!
                        # XXX 2082844800 is the difference between Unix and
                        # XXX Apple time. FIXME to work on Apple, too
                        self.timestamp = int(tkhd[1]) - 2082844800
                    except Exception, e:
                        log.exception(u'There was trouble extracting timestamp')

                elif datatype == 'mdia':
                    pos += 8
                    datasize -= 8
                    log.debug(u'--> mdia information')

                    while datasize:
                        mdia = struct.unpack('>I4s', atomdata[pos:pos + 8])

                        if mdia[0] == 0:
                            break

                        if mdia[1] == 'mdhd':
                            # Parse based on version of mdhd header.  See
                            # http://wiki.multimedia.cx/index.php?title=QuickTime_container#mdhd
                            ver = ord(atomdata[pos + 8])
                            if ver == 0:
                                mdhd = struct.unpack('>IIIIIhh', atomdata[pos + 8:pos + 8 + 24])
                            elif ver == 1:
                                mdhd = struct.unpack('>IQQIQhh', atomdata[pos + 8:pos + 8 + 36])
                            else:
                                mdhd = None

                            if mdhd:
                                # duration / time scale
                                trackinfo['length'] = mdhd[4] / mdhd[3]
                                if mdhd[5] in QTLANGUAGES:
                                    trackinfo['language'] = QTLANGUAGES[mdhd[5]]
                                # mdhd[6] == quality
                                self.length = max(self.length, mdhd[4] / mdhd[3])
                        elif mdia[1] == 'minf':
                            # minf has only atoms inside
                            pos -= (mdia[0] - 8)
                            datasize += (mdia[0] - 8)
                        elif mdia[1] == 'stbl':
                            # stbl has only atoms inside
                            pos -= (mdia[0] - 8)
                            datasize += (mdia[0] - 8)
                        elif mdia[1] == 'hdlr':
                            hdlr = struct.unpack('>I4s4s', atomdata[pos + 8:pos + 8 + 12])
                            if hdlr[1] == 'mhlr':
                                if hdlr[2] == 'vide':
                                    tracktype = 'video'
                                if hdlr[2] == 'soun':
                                    tracktype = 'audio'
                        elif mdia[1] == 'stsd':
                            stsd = struct.unpack('>2I', atomdata[pos + 8:pos + 8 + 8])
                            if stsd[1] > 0:
                                codec = atomdata[pos + 16:pos + 16 + 8]
                                codec = struct.unpack('>I4s', codec)
                                trackinfo['codec'] = codec[1]
                                if codec[1] == 'jpeg':
                                    tracktype = 'image'
                        elif mdia[1] == 'dinf':
                            dref = struct.unpack('>I4s', atomdata[pos + 8:pos + 8 + 8])
                            log.debug(u'  --> %r, %r (useless)' % mdia)
                            if dref[1] == 'dref':
                                num = struct.unpack('>I', atomdata[pos + 20:pos + 20 + 4])[0]
                                rpos = pos + 20 + 4
                                for ref in range(num):
                                    # FIXME: do somthing if this references
                                    ref = struct.unpack('>I3s', atomdata[rpos:rpos + 7])
                                    data = atomdata[rpos + 7:rpos + ref[0]]
                                    rpos += ref[0]
                        else:
                            if mdia[1].startswith('st'):
                                log.debug(u'  --> %r, %r (sample)' % mdia)
                            elif mdia[1] == 'vmhd' and not tracktype:
                                # indicates that this track is video
                                tracktype = 'video'
                            elif mdia[1] in ['vmhd', 'smhd'] and not tracktype:
                                # indicates that this track is audio
                                tracktype = 'audio'
                            else:
                                log.debug(u'  --> %r, %r (unknown)' % mdia)

                        pos += mdia[0]
                        datasize -= mdia[0]

                elif datatype == 'udta':
                    log.debug(u'udta: %r' % struct.unpack('>I4s', atomdata[:8]))
                else:
                    if datatype == 'edts':
                        log.debug(u'--> %r [%d] (edit list)' % \
                                  (datatype, datasize))
                    else:
                        log.debug(u'--> %r [%d] (unknown)' % \
                                  (datatype, datasize))
                pos += datasize

            info = None
            if tracktype == 'video':
                info = core.VideoStream()
                self.video.append(info)
            if tracktype == 'audio':
                info = core.AudioStream()
                self.audio.append(info)
            if info:
                for key, value in trackinfo.items():
                    setattr(info, key, value)

        elif atomtype == 'mvhd':
            # movie header
            mvhd = struct.unpack('>6I2h', file.read(28))
            self.length = max(self.length, mvhd[4] / mvhd[3])
            self.volume = mvhd[6]
            file.seek(atomsize - 8 - 28, 1)


        elif atomtype == 'cmov':
            # compressed movie
            datasize, atomtype = struct.unpack('>I4s', file.read(8))
            if not atomtype == 'dcom':
                return atomsize

            method = struct.unpack('>4s', file.read(datasize - 8))[0]

            datasize, atomtype = struct.unpack('>I4s', file.read(8))
            if not atomtype == 'cmvd':
                return atomsize

            if method == 'zlib':
                data = file.read(datasize - 8)
                try:
                    decompressed = zlib.decompress(data)
                except Exception, e:
                    try:
                        decompressed = zlib.decompress(data[4:])
                    except Exception, e:
                        log.exception(u'There was a proble decompressiong atom')
                        return atomsize

                decompressedIO = StringIO.StringIO(decompressed)
                while self._readatom(decompressedIO):
                    pass

            else:
                log.info(u'unknown compression %r' % method)
                # unknown compression method
                file.seek(datasize - 8, 1)

        elif atomtype == 'moov':
            # decompressed movie info
            while self._readatom(file):
                pass

        elif atomtype == 'mdat':
            pos = file.tell() + atomsize - 8
            # maybe there is data inside the mdat
            log.info(u'parsing mdat')
            while self._readatom(file):
                pass
            log.info(u'end of mdat')
            file.seek(pos, 0)


        elif atomtype == 'rmra':
            # reference list
            while self._readatom(file):
                pass

        elif atomtype == 'rmda':
            # reference
            atomdata = file.read(atomsize - 8)
            pos = 0
            url = ''
            quality = 0
            datarate = 0
            while pos < atomsize - 8:
                (datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
                if datatype == 'rdrf':
                    rflags, rtype, rlen = struct.unpack('>I4sI', atomdata[pos + 8:pos + 20])
                    if rtype == 'url ':
                        url = atomdata[pos + 20:pos + 20 + rlen]
                        if url.find('\0') > 0:
                            url = url[:url.find('\0')]
                elif datatype == 'rmqu':
                    quality = struct.unpack('>I', atomdata[pos + 8:pos + 12])[0]

                elif datatype == 'rmdr':
                    datarate = struct.unpack('>I', atomdata[pos + 12:pos + 16])[0]

                pos += datasize
            if url:
                self._references.append((url, quality, datarate))

        else:
            if not atomtype in ['wide', 'free']:
                log.info(u'unhandled base atom %r' % atomtype)

            # Skip unknown atoms
            try:
                file.seek(atomsize - 8, 1)
            except IOError:
                return 0

        return atomsize

Example 76

Project: golismero Source File: http.py
    def make_raw_request(self, raw_request, host, port = 80, proto = "http",
                 callback = None, timeout = 10.0):
        """
        Send a raw HTTP request to the server and get the response back.

        .. note: This method does not support the use of the cache or a proxy.

        .. warning::
           This method only returns the HTTP response headers, **NOT THE CONTENT**.

        :param raw_request: Raw HTTP request to send.
        :type raw_request: HTTP_Raw_Request

        :param host: Hostname or IP address to connect to.
        :type host: str

        :param port: TCP port to connect to.
        :type port: int

        :param proto: Network protocol (that is, the URL scheme).
        :type proto: str

        :param callback: Callback function.
        :type callback: callable

        :param timeout: Timeout in seconds.
            The minimum value is 0.5 and the maximum is 100.0. Any other values
            will be silently converted to either one of them.
        :type timeout: int | float

        :param use_cache: Control the use of the cache.
                          Use True to force the use of the cache,
                          False to force not to use it,
                          or None for automatic.
        :type use_cache: bool | None

        :returns: HTTP response, or None if the request was cancelled.
        :rtype: HTTP_Response | None

        :raises NetworkOutOfScope: The resource is out of the audit scope.
        :raises NetworkException: A network error occurred.
        """

        # Abort if a proxy is configured, because we don't support this yet.
        if Config.audit_config.proxy_addr:
            raise NotImplementedError("Proxy not yet supported")

        # Check the arguments.
        if type(raw_request) is str:
            raw_request = HTTP_Raw_Request(raw_request)
            LocalDataCache.on_autogeneration(raw_request)
        elif not isinstance(raw_request, HTTP_Raw_Request):
            raise TypeError("Expected HTTP_Raw_Request, got %r instead" % type(raw_request))
        if type(host) == unicode:
            raise NotImplementedError("Unicode hostnames not yet supported")
        if type(host) != str:
            raise TypeError("Expected str, got %r instead" % type(host))
        if proto not in ("http", "https"):
            raise ValueError("Protocol must be 'http' or 'https', not %r" % proto)
        if port is None:
            if proto == "http":
                port = 80
            elif proto == "https":
                port = 443
            else:
                assert False, "internal error!"
        elif type(port) not in (int, long):
            raise TypeError("Expected int, got %r instead" % type(port))
        if port < 1 or port > 32767:
            raise ValueError("Invalid port number: %d" % port)
        if callback is not None and not callable(callback):
            raise TypeError(
                "Expected callable (function, class, instance with __call__),"
                " got %r instead" % type(callback)
            )

        # Check the request scope.
        if host not in Config.audit_scope:
            raise NetworkOutOfScope("Host out of scope: %s" % host)

        # Sanitize the timeout value.
        if timeout:
            timeout = float(timeout)
            if timeout > 100.0:
                timeout = 100.0
            elif timeout < 0.5:
                timeout = 0.5
        else:
            timeout = 0.5

        # Resolve the hostname.
        # FIXME: we're only using the first item, but we could use more
        #        than one, for example iterate through them if they fail.
        family, socktype, proto, canonname, sockaddr = \
            getaddrinfo(host, port, 0, SOCK_STREAM)[0]

        # Get a connection slot.
        with ConnectionSlot(host):

            # Start the timer.
            t1 = time()

            # Connect to the server.
            try:
                s = socket(family, socktype, proto)
                try:
                    s.settimeout(timeout)
                    s.connect(sockaddr)
                    try:
                        if proto == "https":
                            s = wrap_socket(s)

                        # Send the HTTP request.
                        s.sendall(raw_request.raw_request)

                        # Get the HTTP response headers.
                        raw_response = StringIO()
                        while True:
                            data = s.recv(1)
                            if not data:
                                raise NetworkException(
                                    "Server has closed the connection")
                            raw_response.write(data)
                            if raw_response.getvalue().endswith("\r\n\r\n"):
                                break   # full HTTP headers received
                            if len(raw_response.getvalue()) > 65536:
                                raise NetworkException(
                                    "Response headers too long")

                        # Stop the timer.
                        t2 = time()

                        # Call the user-defined callback,
                        # and cancel if requested.
                        if callback is not None:
                            temp_request  = HTTP_Raw_Request(
                                raw_request.raw_request)
                            temp_response = HTTP_Response(
                                temp_request,
                                raw_response = raw_response.getvalue()
                            )
                            discard_data(temp_request)
                            discard_data(temp_response)
                            cont = callback(temp_request, temp_response)
                            if not cont:
                                return
                            del temp_request
                            del temp_response

                        # Start the timer.
                        t3 = time()

                        # Download the contents.
                        #
                        #
                        #
                        # XXX TODO
                        #
                        #
                        #

                        # Stop the timer.
                        t4 = time()

                        # Return the HTTP_Response object.
                        return HTTP_Response(
                            request      = raw_request,
                            raw_response = raw_response.getvalue(),
                            elapsed      = (t2 - t1) + (t4 - t3),
                        )

                    # Close the connection and clean up the socket.
                    finally:
                        try:
                            s.shutdown(2)
                        except Exception:
                            pass
                finally:
                    try:
                        s.close()
                    except Exception:
                        pass

            # On socket errors, send an exception.
            except error, e:
                raise NetworkException(str(e))

Example 77

Project: jhbuild Source File: goalreport.py
    def run(self, config, options, args, help=None):
        if options.output:
            output = StringIO()
            global curses
            if curses and config.progress_bar:
                try:
                    curses.setupterm()
                except:
                    curses = None
        else:
            output = sys.stdout

        if not self.checks:
            self.load_checks_from_options(options.checks)

        self.load_bugs(options.bugfile)
        self.load_false_positives(options.falsepositivesfile)

        config.devhelp_dirname = options.devhelp_dirname
        config.partial_build = False

        module_set = jhbuild.moduleset.load(config)
        if options.list_all_modules:
            self.module_list = module_set.modules.values()
        else:
            self.module_list = module_set.get_module_list(args or config.modules, config.skip)

        results = {}
        try:
            cachedir = os.path.join(os.environ['XDG_CACHE_HOME'], 'jhbuild')
        except KeyError:
            cachedir = os.path.join(os.environ['HOME'], '.cache','jhbuild')
        if options.cache:
            try:
                results = cPickle.load(file(os.path.join(cachedir, options.cache)))
            except:
                pass

        self.repeat_row_header = 0
        if len(self.checks) > 4:
            self.repeat_row_header = 1

        for module_num, mod in enumerate(self.module_list):
            if mod.type in ('meta', 'tarball'):
                continue
            if not mod.branch or not mod.branch.repository.__class__.__name__ in (
                    'SubversionRepository', 'GitRepository'):
                if not mod.moduleset_name.startswith('gnome-external-deps'):
                    continue

            if not os.path.exists(mod.branch.srcdir):
                continue

            tree_id = mod.branch.tree_id()
            valid_cache = (tree_id and results.get(mod.name, {}).get('tree-id') == tree_id)

            if not mod.name in results:
                results[mod.name] = {
                    'results': {}
                }
            results[mod.name]['tree-id'] = tree_id
            r = results[mod.name]['results']
            for check in self.checks:
                if valid_cache and check.__name__ in r:
                    continue
                try:
                    c = check(config, mod)
                except ExcludedModuleException:
                    continue

                if output != sys.stdout and config.progress_bar:
                    progress_percent = 1.0 * (module_num-1) / len(self.module_list)
                    msg = '%s: %s' % (mod.name, check.__name__)
                    self.display_status_line(progress_percent, module_num, msg)

                try:
                    c.run()
                except CouldNotPerformCheckException:
                    continue
                except ExcludedModuleException:
                    continue

                try:
                    c.fix_false_positive(self.false_positives.get((mod.name, check.__name__)))
                except ExcludedModuleException:
                    continue

                r[check.__name__] = [c.status, c.complexity, c.result_comment]

        if not os.path.exists(cachedir):
            os.makedirs(cachedir)
        if options.cache:
            cPickle.dump(results, file(os.path.join(cachedir, options.cache), 'w'))

        print >> output, HTML_AT_TOP % {'title': self.title}
        if self.page_intro:
            print >> output, self.page_intro
        print >> output, '<table>'
        print >> output, '<thead>'
        print >> output, '<tr><td></td>'
        for check in self.checks:
            print >> output, '<th>%s</th>' % check.__name__
        print >> output, '<td></td></tr>'
        if [x for x in self.checks if x.header_note]:
            print >> output, '<tr><td></td>'
            for check in self.checks:
                print >> output, '<td>%s</td>' % (check.header_note or '')
            print >> output, '</tr>'
        print >> output, '</thead>'
        print >> output, '<tbody>'

        suites = []
        for module_key, module in module_set.modules.items():
            if not isinstance(module_set.get_module(module_key), MetaModule):
                continue
            if module_key.endswith('upcoming-deprecations'):
                # mark deprecated modules as processed, so they don't show in "Others"
                try:
                    metamodule = module_set.get_module(meta_key)
                except KeyError:
                    continue
                for module_name in metamodule.dependencies:
                    processed_modules[module_name] = True
            else:
                suites.append([module_key, module_key.replace('meta-', '')])

        processed_modules = {'gnome-common': True}

        not_other_module_names = []
        for suite_key, suite_label in suites:
            metamodule = module_set.get_module(suite_key)
            module_names = [x for x in metamodule.dependencies if x in results]
            if not module_names:
                continue
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                    1+len(self.checks)+self.repeat_row_header, suite_label)
            for module_name in module_names:
                if module_name in not_other_module_names:
                    continue
                r = results[module_name].get('results')
                print >> output, self.get_mod_line(module_name, r)
                processed_modules[module_name] = True
            not_other_module_names.extend(module_names)

        external_deps = [x for x in results.keys() if \
                         x in [y.name for y in self.module_list] and \
                         not x in processed_modules and \
                         module_set.get_module(x).moduleset_name.startswith('gnome-external-deps')]
        if external_deps:
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                    1+len(self.checks)+self.repeat_row_header, 'External Dependencies')
            for module_name in sorted(external_deps):
                if not module_name in results:
                    continue
                r = results[module_name].get('results')
                try:
                    version = module_set.get_module(module_name).branch.version
                except:
                    version = None
                print >> output, self.get_mod_line(module_name, r, version_number=version)

        other_module_names = [x for x in results.keys() if \
                              not x in processed_modules and not x in external_deps]
        if other_module_names:
            print >> output, '<tr><td class="heading" colspan="%d">%s</td></tr>' % (
                    1+len(self.checks)+self.repeat_row_header, 'Others')
            for module_name in sorted(other_module_names):
                if not module_name in results:
                    continue
                r = results[module_name].get('results')
                print >> output, self.get_mod_line(module_name, r)
        print >> output, '</tbody>'
        print >> output, '<tfoot>'

        print >> output, '<tr><td></td>'
        for check in self.checks:
            print >> output, '<th>%s</th>' % check.__name__
        print >> output, '<td></td></tr>'

        print >> output, self.get_stat_line(results, not_other_module_names)
        print >> output, '</tfoot>'
        print >> output, '</table>'

        if (options.bugfile and options.bugfile.startswith('http://')) or \
                (options.falsepositivesfile and options.falsepositivesfile.startswith('http://')):
            print >> output, '<div id="data">'
            print >> output, '<p>The following data sources are used:</p>'
            print >> output, '<ul>'
            if options.bugfile.startswith('http://'):
                print >> output, '  <li><a href="%s">Bugs</a></li>' % options.bugfile
            if options.falsepositivesfile.startswith('http://'):
                print >> output, '  <li><a href="%s">False positives</a></li>' % options.falsepositivesfile
            print >> output, '</ul>'
            print >> output, '</div>'

        print >> output, '<div id="footer">'
        print >> output, 'Generated:', time.strftime('%Y-%m-%d %H:%M:%S %z')
        print >> output, 'on ', socket.getfqdn()
        print >> output, '</div>'

        print >> output, '</body>'
        print >> output, '</html>'

        if output != sys.stdout:
            file(options.output, 'w').write(output.getvalue())

        if output != sys.stdout and config.progress_bar:
            sys.stdout.write('\n')
            sys.stdout.flush()

Example 78

Project: cstar_perf Source File: fab_common.py
@fab.parallel
def bootstrap(git_fetch=True, revision_override=None, replace_existing_dse_install=True):
    """Install and configure the specified product on each host

    Returns the git id for the version checked out.
    """
    partitioner = config['partitioner']

    fab.run('mkdir -p fab')

    if config['product'] in (None, ''):
        logger.warn("revision product was not set -- defaulting to cassandra")
        config['product'] = 'cassandra'
    elif config['product'] not in ('cassandra', 'dse'):
        raise ValueError("Invalid product. Should be cassandra or dse")

    product = dse if config['product'] == 'dse' else cstar

    if product.name == 'dse':
        rev_id = dse.bootstrap(config, replace_existing_dse_install=replace_existing_dse_install)
    else:
        rev_id = cstar.bootstrap(config, git_fetch=git_fetch, revision_override=revision_override)

    cassandra_path = product.get_cassandra_path()

    # Get host config:
    try:
        cfg = config['hosts'][fab.env.host]
    except KeyError:
        # If host has no config, don't configure it. This is used for
        # compiling the source on the controlling node which is
        # usually not a part of the cluster.
        return rev_id

    #Ensure JNA is available:
    if config['use_jna']:
        # Check if JNA already exists:
        jna_jars = os.path.join(cassandra_path, 'lib/jna*.jar')
        jna_jar = os.path.join(cassandra_path, 'lib/jna.jar')
        jna_exists = fab.run('ls {}'.format(jna_jars), quiet=True)
        if jna_exists.return_code != 0:
            # Symlink system JNA to cassandra lib dir:
            jna_candidates = ['/usr/share/java/jna/jna.jar', '/usr/share/java/jna.jar']
            for jar in jna_candidates:
                if fab.run('ls {jar}'.format(jar=jar), quiet=True).return_code == 0:
                    fab.run('ln -s {jar} {jna}'.format(jar=jar, jna=jna_jar))
                    break
            else:
                if not os.path.exists('fab/jna.jar'):
                    request = download_file(JNA_LIB_URL, 'fab/jna.jar')
                    if request.status_code != requests.codes.ok:
                        raise AssertionError('Could not force JNA loading, no JNA jar found.')

                fab.put('fab/jna.jar', jna_jar)
    else:
        fab.run('rm -f {}'.format(os.path.join(cassandra_path, 'lib/jna*')))

    # Configure cassandra.yaml:
    conf_file = StringIO()
    fab.get(os.path.join(cassandra_path.replace('$HOME', '~'), 'conf', 'cassandra.yaml'), conf_file)
    conf_file.seek(0)
    cass_yaml = yaml.load(conf_file.read())

    # Get the canonical list of options from the c* source code:
    cstar_config_opts = product.get_cassandra_config_options(config)
    # CASSANDRA-11217 brought in a 'log' method and locals() contains 'log' which taints our cassandra.yaml. Delete it.
    try:
        cstar_config_opts.remove('log')
    except ValueError:
        pass

    if product.name == 'dse':
        dse_config_options = product.get_dse_config_options(config)
        dse_conf_file = StringIO()
        dse_yaml_path = os.path.join(product.get_dse_conf_path(), 'dse.yaml')
        fab.get(dse_yaml_path.replace('$HOME', '~'), dse_conf_file)
        dse_conf_file.seek(0)
        dse_yaml = yaml.load(dse_conf_file.read())

        configured_dse_yaml_settings = config.get('dse_yaml', {})
        if configured_dse_yaml_settings:
            for option, value in configured_dse_yaml_settings.items():
                if option not in dse_config_options:
                    raise ValueError('Unknown dse.yaml option: {}'.format(option))
                dse_yaml[option] = value

            # write values to dse.yaml
            dse_conf_file = StringIO()
            dse_conf_file.write(yaml.safe_dump(dse_yaml, encoding='utf-8', allow_unicode=True))
            dse_conf_file.seek(0)
            fab.put(dse_conf_file, dse_yaml_path.replace('$HOME', '~'))

    # Cassandra YAML values can come from two places:
    # 1) Set as options at the top level of the config. This is how
    # legacy cstar_perf did it. These are unvalidated:
    for option, value in config.items():
        if option in cstar_config_opts:
            cass_yaml[option] = value
    # 2) Set in the second level 'yaml' dictionary. This is how the
    # frontend and bootstrap.py does it. These take precedence over
    # the #1 style and are always validated for typos / invalid options.
    for option, value in config.get('yaml', {}).items():
        if option in DENIED_CSTAR_CONFIG:
            raise ValueError(
                'C* yaml option "{}" can only be set in the cluster config.'.format(option)
            )
        elif option not in cstar_config_opts:
            raise ValueError('Unknown C* yaml option: {}'.format(option))
        cass_yaml[option] = value

    if 'num_tokens' not in config.get('yaml', {}):
        if config.get('use_vnodes', True):
            cass_yaml['num_tokens'] = config['num_tokens']
        else:
            cass_yaml['initial_token'] = cfg['initial_token']
            cass_yaml['num_tokens'] = 1
    cass_yaml['listen_address'] = cfg['internal_ip']
    cass_yaml['broadcast_address'] = cfg.get('external_ip', cfg['internal_ip'])
    cass_yaml['seed_provider'][0]['parameters'][0]['seeds'] =  ",".join(config['seeds'])
    if partitioner == 'random':
        cass_yaml['partitioner'] = 'org.apache.cassandra.dht.RandomPartitioner'
    elif partitioner == 'murmur3':
        cass_yaml['partitioner'] = 'org.apache.cassandra.dht.Murmur3Partitioner'
    cass_yaml['rpc_address'] = cfg['internal_ip']

    #Configure Topology:
    if not config.has_key('endpoint_snitch'):
        for node in config['hosts'].values():
            if node.get('datacenter',False):
                config['endpoint_snitch'] = "GossipingPropertyFileSnitch"
                cass_yaml['auto_bootstrap'] = False
                break
        else:
            config['endpoint_snitch'] = "SimpleSnitch"

    conf_dir = os.path.join(cassandra_path, 'conf/').replace('$HOME', '~')
    if config['endpoint_snitch'] == 'PropertyFileSnitch':
        cass_yaml['endpoint_snitch'] = 'PropertyFileSnitch'
        fab.run("echo 'default=dc1:r1' > {}".format(conf_dir+'cassandra-topology.properties'))
        for node in config['hosts'].values():
            line = '%s=%s:%s' % (node['external_ip'], node.get('datacenter', 'dc1'), node.get('rack', 'r1'))
            fab.run("echo '{}' >> {}".format(line, conf_dir+'cassandra-topology.properties'))
    if config['endpoint_snitch'] == "GossipingPropertyFileSnitch":
        cass_yaml['endpoint_snitch'] = 'GossipingPropertyFileSnitch'
        fab.run("echo 'dc={dc}\nrack={rack}' > {out}".format(
            dc=cfg.get('datacenter','dc1'), rack=cfg.get('rack','r1'),
            out=conf_dir+'cassandra-rackdc.properties'))

    # Save config:
    conf_file = StringIO()
    conf_file.write(yaml.safe_dump(cass_yaml, encoding='utf-8', allow_unicode=True))
    conf_file.seek(0)
    fab.put(conf_file, conf_dir+'cassandra.yaml')

    # Configure logback:
    logback_template_config = logback_debug_template if config.get('debug_logging', False) else logback_template

    logback_conf = StringIO()
    # Get absolute path to log dir:
    log_dir = fab.run("readlink -m {log_dir}".format(log_dir=config['log_dir']))

    logback_conf.write(logback_template_config.replace("${cassandra.logdir}", log_dir))
    logback_conf.seek(0)
    fab.put(logback_conf, conf_dir + 'logback.xml')

    # Configure log4j:
    log4j_conf = StringIO()
    log4j_conf.write(log4j_template.replace("${cassandra.logdir}",log_dir))
    log4j_conf.seek(0)
    fab.put(log4j_conf, conf_dir+'log4j-server.properties')

    # Copy fincore utility:
    fincore_script = os.path.join(os.path.dirname(os.path.realpath(__file__)),'fincore_capture.py')
    fab.put(fincore_script, '~/fab/fincore_capture.py')
    return rev_id

Example 79

Project: simian Source File: urlfetch_stub.py
  @staticmethod
  def _RetrieveURL(url, payload, method, headers, request, response,
                   follow_redirects=True, deadline=_API_CALL_DEADLINE,
                   validate_certificate=_API_CALL_VALIDATE_CERTIFICATE_DEFAULT):
    """Retrieves a URL over network.

    Args:
      url: String containing the URL to access.
      payload: Request payload to send, if any; None if no payload.
        If the payload is unicode, we assume it is utf-8.
      method: HTTP method to use (e.g., 'GET')
      headers: List of additional header objects to use for the request.
      request: A urlfetch_service_pb.URLFetchRequest proto object from
          original request.
      response: A urlfetch_service_pb.URLFetchResponse proto object to
          populate with the response data.
      follow_redirects: optional setting (defaulting to True) for whether or not
        we should transparently follow redirects (up to MAX_REDIRECTS)
      deadline: Number of seconds to wait for the urlfetch to finish.
      validate_certificate: If true, do not send request to server unless the
        certificate is valid, signed by a trusted CA and the hostname matches
        the certificate.

    Raises:
      Raises an apiproxy_errors.ApplicationError exception with
      INVALID_URL_ERROR in cases where:
        - The protocol of the redirected URL is bad or missing.
        - The port is not in the allowable range of ports.
      Raises an apiproxy_errors.ApplicationError exception with
      TOO_MANY_REDIRECTS in cases when MAX_REDIRECTS is exceeded
    """
    last_protocol = ''
    last_host = ''
    if isinstance(payload, unicode):
      payload = payload.encode('utf-8')

    for redirect_number in xrange(MAX_REDIRECTS + 1):
      parsed = urlparse.urlsplit(url)
      protocol, host, path, query, fragment = parsed







      port = urllib.splitport(urllib.splituser(host)[1])[1]

      if not _IsAllowedPort(port):
        logging.error(
          'urlfetch received %s ; port %s is not allowed in production!' %
          (url, port))





        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.INVALID_URL)

      if protocol and not host:

        logging.error('Missing host on redirect; target url is %s' % url)
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.INVALID_URL)




      if not host and not protocol:
        host = last_host
        protocol = last_protocol






      adjusted_headers = {
          'User-Agent':
          'AppEngine-Google; (+http://code.google.com/appengine)',
          'Host': host,
          'Accept-Encoding': 'gzip',
      }
      if payload is not None:


        adjusted_headers['Content-Length'] = str(len(payload))


      if method == 'POST' and payload:
        adjusted_headers['Content-Type'] = 'application/x-www-form-urlencoded'

      passthrough_content_encoding = False
      for header in headers:
        if header.key().title().lower() == 'user-agent':
          adjusted_headers['User-Agent'] = (
              '%s %s' %
              (header.value(), adjusted_headers['User-Agent']))
        else:
          if header.key().lower() == 'accept-encoding':
            passthrough_content_encoding = True
          adjusted_headers[header.key().title()] = header.value()

      if payload is not None:
        escaped_payload = payload.encode('string_escape')
      else:
        escaped_payload = ''
      logging.debug('Making HTTP request: host = %r, '
                    'url = %r, payload = %.1000r, headers = %r',
                    host, url, escaped_payload, adjusted_headers)
      try:
        if protocol == 'http':
          connection_class = httplib.HTTPConnection
        elif protocol == 'https':
          if (validate_certificate and _CanValidateCerts() and
              CERT_PATH):

            connection_class = fancy_urllib.create_fancy_connection(
                ca_certs=CERT_PATH)
          else:
            connection_class = httplib.HTTPSConnection
        else:

          error_msg = 'Redirect specified invalid protocol: "%s"' % protocol
          logging.error(error_msg)
          raise apiproxy_errors.ApplicationError(
              urlfetch_service_pb.URLFetchServiceError.INVALID_URL, error_msg)






        if _CONNECTION_SUPPORTS_TIMEOUT:
          connection = connection_class(host, timeout=deadline)
        else:
          connection = connection_class(host)



        last_protocol = protocol
        last_host = host

        if query != '':
          full_path = path + '?' + query
        else:
          full_path = path

        if not _CONNECTION_SUPPORTS_TIMEOUT:
          orig_timeout = socket.getdefaulttimeout()
        try:
          if not _CONNECTION_SUPPORTS_TIMEOUT:


            socket.setdefaulttimeout(deadline)
          connection.request(method, full_path, payload, adjusted_headers)
          http_response = connection.getresponse()
          if method == 'HEAD':
            http_response_data = ''
          else:
            http_response_data = http_response.read()
        finally:
          if not _CONNECTION_SUPPORTS_TIMEOUT:
            socket.setdefaulttimeout(orig_timeout)
          connection.close()
      except _fancy_urllib_InvalidCertException, e:
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.SSL_CERTIFICATE_ERROR,
          str(e))
      except _fancy_urllib_SSLError, e:





        app_error = (
            urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED
            if 'timed out' in e.message else
            urlfetch_service_pb.URLFetchServiceError.SSL_CERTIFICATE_ERROR)
        raise apiproxy_errors.ApplicationError(app_error, str(e))
      except socket.timeout, e:
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED, str(e))
      except (httplib.error, socket.error, IOError), e:
        raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, str(e))




      if http_response.status in REDIRECT_STATUSES and follow_redirects:

        url = http_response.getheader('Location', None)
        if url is None:
          error_msg = 'Redirecting response was missing "Location" header'
          logging.error(error_msg)
          raise apiproxy_errors.ApplicationError(
              urlfetch_service_pb.URLFetchServiceError.MALFORMED_REPLY,
              error_msg)



        if (http_response.status != httplib.TEMPORARY_REDIRECT and
            method not in PRESERVE_ON_REDIRECT):
          logging.warn('Received a %s to a %s. Redirecting with a GET',
                       http_response.status, method)
          method = 'GET'
          payload = None
      else:
        response.set_statuscode(http_response.status)
        if (http_response.getheader('content-encoding') == 'gzip' and
            not passthrough_content_encoding):
          gzip_stream = StringIO.StringIO(http_response_data)
          gzip_file = gzip.GzipFile(fileobj=gzip_stream)
          http_response_data = gzip_file.read()
        response.set_content(http_response_data[:MAX_RESPONSE_SIZE])


        for header_key in http_response.msg.keys():
          for header_value in http_response.msg.getheaders(header_key):
            if (header_key.lower() == 'content-encoding' and
                header_value == 'gzip' and
                not passthrough_content_encoding):
              continue
            if header_key.lower() == 'content-length' and method != 'HEAD':
              header_value = str(len(response.content()))
            header_proto = response.add_header()
            header_proto.set_key(header_key)
            header_proto.set_value(header_value)

        if len(http_response_data) > MAX_RESPONSE_SIZE:
          response.set_contentwastruncated(True)



        if request.url() != url:
          response.set_finalurl(url)


        break
    else:
      error_msg = 'Too many repeated redirects'
      logging.error(error_msg)
      raise apiproxy_errors.ApplicationError(
          urlfetch_service_pb.URLFetchServiceError.TOO_MANY_REDIRECTS,
          error_msg)

Example 80

Project: django-roa Source File: models.py
    def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
                  force_update=False, using=None, update_fields=None):
        """
        Does the heavy-lifting involved in saving. Subclasses shouldn't need to
        override this method. It's separate from save() in order to hide the
        need for overrides of save() to pass around internal-only parameters
        ('raw', 'cls', and 'origin').
        """

        assert not (force_insert and force_update)

        record_exists = False

        if cls is None:
            cls = self.__class__
            meta = cls._meta
            if not meta.proxy:
                origin = cls
        else:
            meta = cls._meta

        if origin and not getattr(meta, "auto_created", False):
            signals.pre_save.send(sender=origin, instance=self, raw=raw)

        model_name = str(meta)

        # If we are in a raw save, save the object exactly as presented.
        # That means that we don't try to be smart about saving attributes
        # that might have come from the parent class - we just save the
        # attributes we have been given to the class we have been given.
        # We also go through this process to defer the save of proxy objects
        # to their actual underlying model.
        if not raw or meta.proxy:
            if meta.proxy:
                org = cls
            else:
                org = None
            for parent, field in meta.parents.items():
                # At this point, parent's primary key field may be unknown
                # (for example, from administration form which doesn't fill
                # this field). If so, fill it.
                if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
                    setattr(self, parent._meta.pk.attname, getattr(self, field.attname))

                self.save_base(cls=parent, origin=org, using=using)

                if field:
                    setattr(self, field.attname, self._get_pk_val(parent._meta))
            if meta.proxy:
                return

        if not meta.proxy:
            pk_val = self._get_pk_val(meta)
            pk_is_set = pk_val is not None

            get_args = {}
            get_args[ROA_ARGS_NAMES_MAPPING.get('FORMAT', 'format')] = ROA_FORMAT
            get_args.update(ROA_CUSTOM_ARGS)

            # Construct Json payload
            serializer = self.get_serializer(self)
            payload = self.get_renderer().render(serializer.data)

            # Add serializer content_type
            headers = get_roa_headers()
            headers.update(self.get_serializer_content_type())

            # check if resource use custom primary key
            if not meta.pk.attname in ['pk', 'id']:
                # consider it might be inserting so check it first
                # @todo: try to improve this block to check if custom pripary key is not None first
                resource = Resource(self.get_resource_url_detail(),
                                    filters=ROA_FILTERS, **ROA_SSL_ARGS)
                try:
                    response = resource.get(payload=None, headers=headers, **get_args)
                except ResourceNotFound:
                    # since such resource does not exist, it's actually creating
                    pk_is_set = False
                except RequestFailed:
                    pk_is_set = False

            if force_update or pk_is_set and not self.pk is None:
                record_exists = True
                resource = Resource(self.get_resource_url_detail(),
                                    filters=ROA_FILTERS, **ROA_SSL_ARGS)
                try:
                    logger.debug(u"""Modifying : "%s" through %s with payload "%s" and GET args "%s" """ % (
                                  force_unicode(self),
                                  force_unicode(resource.uri),
                                  force_unicode(payload),
                                  force_unicode(get_args)))
                    response = resource.put(payload=payload, headers=headers, **get_args)
                except RequestFailed as e:
                    raise ROAException(e)
            else:
                record_exists = False
                resource = Resource(self.get_resource_url_list(),
                                    filters=ROA_FILTERS, **ROA_SSL_ARGS)
                try:
                    logger.debug(u"""Creating  : "%s" through %s with payload "%s" and GET args "%s" """ % (
                                  force_unicode(self),
                                  force_unicode(resource.uri),
                                  force_unicode(payload),
                                  force_unicode(get_args)))
                    response = resource.post(payload=payload, headers=headers, **get_args)
                except RequestFailed as e:
                    raise ROAException(e)

            response = force_unicode(response.body_string()).encode(DEFAULT_CHARSET)

            data = self.get_parser().parse(StringIO(response))
            serializer = self.get_serializer(data=data)
            if not serializer.is_valid():
                raise ROAException(u'Invalid deserialization for %s model: %s' % (self, serializer.errors))
            try:
                self.pk = int(serializer.object.pk)
            except ValueError:
                self.pk = serializer.object.pk
            self = serializer.object

        if origin:
            signals.post_save.send(sender=origin, instance=self,
                created=(not record_exists), raw=raw)

Example 81

Project: 2015.hackatbrown.org Source File: __init__.py
def getEncodingInfo(response=None, text=u'', log=None, url=None):
    """Find all encoding related information in given `text`.

    Information in headers of supplied HTTPResponse, possible XML
    declaration and X/HTML ``<meta>`` elements are used.

    :param response:
        HTTP response object, e.g. via ``urllib.urlopen('url')``
    :param text:
        a byte string to guess encoding for. XML prolog with
        encoding pseudo attribute or HTML meta element will be used to detect
        the encoding
    :param url:
        When given fetches docuement at `url` and all needed information.
        No `reponse` or `text` parameters are needed in this case.
    :param log:
        an optional logging logger to which messages may go, if
        no log given all log messages are available from resulting
        ``EncodingInfo``

    :returns:
        instance of :class:`EncodingInfo`.

    How the resulting encoding is retrieved:

    XML
        RFC 3023 states if media type given in the Content-Type HTTP header is
        application/xml, application/xml-dtd,
        application/xml-external-parsed-entity, or any one of the subtypes of
        application/xml such as application/atom+xml or application/rss+xml
        etc then the character encoding is determined in this order:

        1. the encoding given in the charset parameter of the Content-Type HTTP
        header, or
        2. the encoding given in the encoding attribute of the XML declaration
        within the docuement, or
        3. utf-8.

        Mismatch possibilities:
            - HTTP + XMLdecla
            - HTTP + HTMLmeta

            application/xhtml+xml ?
                XMLdecla + HTMLmeta


        If the media type given in the Content-Type HTTP header is text/xml,
        text/xml-external-parsed-entity, or a subtype like text/Anything+xml,
        the encoding attribute of the XML declaration is ignored completely
        and the character encoding is determined in the order:
        1. the encoding given in the charset parameter of the Content-Type HTTP
        header, or
        2. ascii.

        No mismatch possible.


        If no media type is given the XML encoding pseuso attribute is used
        if present.

        No mismatch possible.

    HTML
        For HTML served as text/html:
            http://www.w3.org/TR/REC-html40/charset.html#h-5.2.2

        1. An HTTP "charset" parameter in a "Content-Type" field.
           (maybe defaults to ISO-8859-1, but should not assume this)
        2. A META declaration with "http-equiv" set to "Content-Type" and a
           value set for "charset".
        3. The charset attribute set on an element that designates an external
           resource. (NOT IMPLEMENTED HERE YET)

        Mismatch possibilities:
            - HTTP + HTMLmeta

    TEXT
        For most text/* types the encoding will be reported as iso-8859-1.
        Exceptions are XML formats send as text/* mime type (see above) and
        text/css which has a default encoding of UTF-8.
    """
    if url:
        # may cause IOError which is raised
        response = urllib.urlopen(url)

    if text is None:
        # read text from response only if not explicitly given
        try:
            text = response.read()
        except IOError, e:
            pass

    if text is None:
        # text must be a string (not None)
        text = ''

    encinfo = EncodingInfo()

    logstream = StringIO.StringIO()
    if not log:
        log = buildlog(stream=logstream, format='%(message)s')

    # HTTP
    if response:
        encinfo.http_media_type, encinfo.http_encoding = getHTTPInfo(
            response, log)
        texttype = _getTextTypeByMediaType(encinfo.http_media_type, log)
    else:
        # check if maybe XML or (TODO:) HTML
        texttype = _getTextType(text, log)

    # XML only served as application/xml ! #(also XHTML served as text/html)
    if texttype == _XML_APPLICATION_TYPE:# or texttype == _XML_TEXT_TYPE:
        try:
            encinfo.xml_encoding = detectXMLEncoding(text, log)
        except (AttributeError, ValueError), e:
            encinfo.xml_encoding = None

    # XML (also XHTML served as text/html)
    if texttype == _HTML_TEXT_TYPE:
        try:
            encinfo.xml_encoding = detectXMLEncoding(text, log, includeDefault=False)
        except (AttributeError, ValueError), e:
            encinfo.xml_encoding = None

    # HTML
    if texttype == _HTML_TEXT_TYPE or texttype == _TEXT_TYPE:
        encinfo.meta_media_type, encinfo.meta_encoding = getMetaInfo(
            text, log)

    # guess
    # 1. HTTP charset?
    encinfo.encoding = encinfo.http_encoding
    encinfo.mismatch = False

    # 2. media_type?
    #   XML application/...
    if texttype == _XML_APPLICATION_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encinfo.xml_encoding
            # xml_encoding has default of utf-8

    #   text/html
    elif texttype == _HTML_TEXT_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encinfo.meta_encoding
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)
        if not encinfo.encoding:
            encinfo.encoding = tryEncodings(text)

    #   text/... + xml or text/*
    elif texttype == _XML_TEXT_TYPE or texttype == _TEXT_TYPE:
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)

    elif texttype == _TEXT_UTF8:
        if not encinfo.encoding:
            encinfo.encoding = encodingByMediaType(encinfo.http_media_type)

    # possible mismatches, checks if present at all and then if equal
    # HTTP + XML
    if encinfo.http_encoding and encinfo.xml_encoding and\
       encinfo.http_encoding != encinfo.xml_encoding:
        encinfo.mismatch = True
        log.warn(u'"%s" (HTTP) != "%s" (XML) encoding mismatch' %
                 (encinfo.http_encoding, encinfo.xml_encoding))
    # HTTP + Meta
    if encinfo.http_encoding and encinfo.meta_encoding and\
         encinfo.http_encoding != encinfo.meta_encoding:
        encinfo.mismatch = True
        log.warning(u'"%s" (HTTP) != "%s" (HTML <meta>) encoding mismatch' %
                 (encinfo.http_encoding, encinfo.meta_encoding))
    # XML + Meta
    if encinfo.xml_encoding and encinfo.meta_encoding and\
         encinfo.xml_encoding != encinfo.meta_encoding:
        encinfo.mismatch = True
        log.warning(u'"%s" (XML) != "%s" (HTML <meta>) encoding mismatch' %
                 (encinfo.xml_encoding, encinfo.meta_encoding))

    log.info(u'Encoding (probably): %s (Mismatch: %s)',
             encinfo.encoding, encinfo.mismatch)

    encinfo.logtext = logstream.getvalue()
    return encinfo

Example 82

Project: eyefiserver2 Source File: eyefiserver.py
Function: do_post
    def do_POST(self):
        try:
            eyeFiLogger.debug(self.command + " " + self.path + " " + self.request_version)

            SOAPAction = ""
            contentLength = ""

            # Loop through all the request headers and pick out ones that are relevant

            eyeFiLogger.debug("Headers received in POST request:")
            for headerName in self.headers.keys():
                for headerValue in self.headers.getheaders(headerName):

                    if( headerName == "soapaction"):
                        SOAPAction = headerValue

                    if( headerName == "content-length"):
                        contentLength = int(headerValue)

                    eyeFiLogger.debug(headerName + ": " + headerValue)


            # Read contentLength bytes worth of data
            eyeFiLogger.debug("Attempting to read " + str(contentLength) + " bytes of data")
            # postData = self.rfile.read(contentLength)
            try:
                from StringIO import StringIO
                import tempfile
            except ImportError:
                eyeFiLogger.debug("No StringIO module")
            chunksize = 1048576 # 1MB
            mem = StringIO()
            while 1:
                remain = contentLength - mem.tell()
                if remain <= 0: break
                chunk = self.rfile.read(min(chunksize, remain))
                if not chunk: break
                mem.write(chunk)
                print remain
            print "Finished"
            postData = mem.getvalue()
            mem.close()

            eyeFiLogger.debug("Finished reading " + str(contentLength) + " bytes of data")

            # Perform action based on path and SOAPAction
            # A SOAPAction of StartSession indicates the beginning of an EyeFi
            # authentication request
            if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:StartSession\"")):
                eyeFiLogger.debug("Got StartSession request")
                response = self.startSession(postData)
                contentLength = len(response)

                eyeFiLogger.debug("StartSession response: " + response)

                self.send_response(200)
                self.send_header('Date', self.date_time_string())
                self.send_header('Pragma','no-cache')
                self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)')
                self.send_header('Content-Type','text/xml; charset="utf-8"')
                self.send_header('Content-Length', contentLength)
                self.end_headers()

                self.wfile.write(response)
                self.wfile.flush()
                self.handle_one_request()

            # GetPhotoStatus allows the card to query if a photo has been uploaded
            # to the server yet
            if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:GetPhotoStatus\"")):
                eyeFiLogger.debug("Got GetPhotoStatus request")

                response = self.getPhotoStatus(postData)
                contentLength = len(response)

                eyeFiLogger.debug("GetPhotoStatus response: " + response)

                self.send_response(200)
                self.send_header('Date', self.date_time_string())
                self.send_header('Pragma','no-cache')
                self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)')
                self.send_header('Content-Type','text/xml; charset="utf-8"')
                self.send_header('Content-Length', contentLength)
                self.end_headers()

                self.wfile.write(response)
                self.wfile.flush()


            # If the URL is upload and there is no SOAPAction the card is ready to send a picture to me
            if((self.path == "/api/soap/eyefilm/v1/upload") and (SOAPAction == "")):
                eyeFiLogger.debug("Got upload request")
                response = self.uploadPhoto(postData)
                contentLength = len(response)

                eyeFiLogger.debug("Upload response: " + response)

                self.send_response(200)
                self.send_header('Date', self.date_time_string())
                self.send_header('Pragma','no-cache')
                self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)')
                self.send_header('Content-Type','text/xml; charset="utf-8"')
                self.send_header('Content-Length', contentLength)
                self.end_headers()

                self.wfile.write(response)
                self.wfile.flush()

            # If the URL is upload and SOAPAction is MarkLastPhotoInRoll
            if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:MarkLastPhotoInRoll\"")):
                eyeFiLogger.debug("Got MarkLastPhotoInRoll request")
                response = self.markLastPhotoInRoll(postData)
                contentLength = len(response)

                eyeFiLogger.debug("MarkLastPhotoInRoll response: " + response)
                self.send_response(200)
                self.send_header('Date', self.date_time_string())
                self.send_header('Pragma','no-cache')
                self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)')
                self.send_header('Content-Type','text/xml; charset="utf-8"')
                self.send_header('Content-Length', contentLength)
                self.send_header('Connection', 'Close')
                self.end_headers()

                self.wfile.write(response)
                self.wfile.flush()

                eyeFiLogger.debug("Connection closed.")
        except:
            eyeFiLogger.error("Got an an exception:")
            eyeFiLogger.error(traceback.format_exc())
            raise

Example 83

Project: sonospy Source File: soap_HTTP1.0.py
    def call(self, addr, data, namespace, soapaction=None, encoding=None):
        """ Builds and performs an HTTP request. Returns the response payload.

        @param addr: address to receive the request in the form
        schema://hostname:port
        @param data: data to be sent
        @param soapaction: soap action to be called
        @param encoding: encoding for the message

        @type addr: string
        @type data: string
        @type soapaction: string
        @type encoding: string

        @return: response payload
        @rtype: string
        """

        log.debug('#### HTTPTransport call - addr : %s' % str(addr))
        log.debug('#### HTTPTransport call - data : %s' % str(data))
        log.debug('#### HTTPTransport call - namespace : %s' % str(namespace))
        log.debug('#### HTTPTransport call - soapaction : %s' % str(soapaction))
        log.debug('#### HTTPTransport call - encoding : %s' % str(encoding))

        # Build a request
        
        '''
        addr : http://legato.radiotime.com:80
        data : <?xml version="1.0" encoding="utf-8"?><s:Envelope s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/" xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"><s:Header><credentials xmlns="http://www.sonos.com/Services/1.1"><deviceProvider>Sonos</deviceProvider></credentials></s:Header><s:Body><ns0:getMetadata xmlns:ns0="http://www.sonos.com/Services/1.1"><count>100</count><index>0</index><recursive>false</recursive><id>root</id></ns0:getMetadata></s:Body></s:Envelope>
        namespace : ('u', 'http://www.sonos.com/Services/1.1')
        soapaction : http://www.sonos.com/Services/1.1#getMetadata
        encoding : utf-8
        real_addr : legato.radiotime.com:80
        real_path : 
        addr.scheme : http
        addr.hostname : legato.radiotime.com

        POST /Radio.asmx HTTP/1.1
        CONNECTION: close
        ACCEPT-ENCODING: gzip
        HOST: legato.radiotime.com
        USER-AGENT: Linux UPnP/1.0 Sonos/11.7-19141a
        CONTENT-LENGTH: 337
        CONTENT-TYPE: text/xml; charset="utf-8"
        ACCEPT-LANGUAGE: en-US
        SOAPACTION: "http://www.sonos.com/Services/1.1#getMetadata"
        '''
        # TODO: tidy up parameters, use saved params from musicservices call, change to gzip
        addr = parse_url(addr)
        real_addr = '%s:%d' % (addr.hostname, addr.port)
        real_path = addr.path

        if addr.scheme == 'https':
            r = httplib.HTTPSConnection(real_addr)
        else:
            r = httplib.HTTPConnection(real_addr)

        log.debug('#### HTTPTransport call - real_addr : %s' % real_addr)
        log.debug('#### HTTPTransport call - real_path : %s' % real_path)
        log.debug('#### HTTPTransport call - addr.scheme : %s' % addr.scheme)
        log.debug('#### HTTPTransport call - addr.hostname : %s' % addr.hostname)

        r.putrequest("POST", real_path, skip_host=1, skip_accept_encoding=1)
        
        r.putheader("ACCEPT-ENCODING", 'gzip')
        r.putheader("CONNECTION", 'close')

        r.putheader("HOST", addr.hostname)
        r.putheader("USER-AGENT", 'Linux UPnP/1.0 Sonos/11.7-19141a')
        t = 'text/xml'
#        if encoding:
#            t += '; charset="%s"' % encoding
            
            
        r.putheader("CONTENT-TYPE", t)
#        r.putheader("ACCEPT-CHARSET", 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')
        r.putheader("ACCEPT-LANGUAGE", 'en-US')
        r.putheader("CONTENT-LENGTH", str(len(data)))


        # if user is not a user:passwd format
        if addr.username != None:
            val = base64.encodestring(addr.user)
            r.putheader('Authorization', 'Basic ' + val.replace('\012', ''))

        # This fixes sending either "" or "None"
        if soapaction:
            r.putheader("SOAPACTION", '"%s"' % soapaction)
        else:
            r.putheader("SOAPACTION", "")

        r.endheaders()

        log.debug('#### HTTP BEFORE r.send ################################')

        r.send(data)

        log.debug('#### HTTP AFTER r.send ################################')

        #read response line
#        code, msg, headers = r.getreply()
        response = r.getresponse()
        code = response.status
        msg = response.reason
        headers = response.msg

        log.debug('#### HTTP AFTER START #################################')
        log.debug('#### HTTP code        : %s' % str(code))
        log.debug('#### HTTP msg         : %s' % str(msg))
        log.debug('#### HTTP headers     : %s' % str(headers))
        log.debug('#### HTTP AFTER END ###################################')

        content_type = headers.get("content-type", "text/xml")
        content_length = headers.get("Content-length")
        if content_length == None:
#            data = r.getfile().read()
            data = response.read()
            message_len = len(data)
        else:
            message_len = int(content_length)
#            data = r.getfile().read(message_len)
            data = response.read(message_len)

        def startswith(string, val):
            return string[0:len(val)] == val


        if code == 500 and not \
               (startswith(content_type, "text/xml") and message_len > 0):
            raise HTTPError(code, msg)

        if code not in (200, 500):
            raise HTTPError(code, msg)

        #return response payload
#        return data.decode('utf-8')

        import StringIO
        stream = StringIO.StringIO(data)
        import gzip
        gzipper = gzip.GzipFile(fileobj=stream)
        data = gzipper.read()


        return data

Example 84

Project: cti-toolkit Source File: test_misp_submission.py
@httpretty.activate
@mock.patch('certau.transform.misp.time.sleep')
def test_misp_publishing(_):
    """Test that the stixtrans module can submit to a MISP server."""
    # STIX file to test against. Place in a StringIO instance so we can
    # close the file.
    with open('tests/CA-TEST-STIX.xml', 'rb') as stix_f:
        stix_io = StringIO.StringIO(stix_f.read())

    # Create a transformer - select 'text' output format and flag MISP
    # publishing (with appropriate settings).
    package = stix.core.STIXPackage.from_xml(stix_io)
    misp_args = {
        'misp_url': 'http://misp.host.tld/',
        'misp_key': '111111111111111111111111111',
    }
    misp_event_args = {
        'distribution': 1,
        'threat_level': 4,
        'analysis': 0,
    }

    # Ensures that non-registered paths fail
    httpretty.HTTPretty.allow_net_connect = False

    # Mock the MISP version retrieval.
    httpretty.register_uri(
        httpretty.GET,
        'http://misp.host.tld/servers/getVersion',
        body=json.dumps({}),
        content_type='application/json',
    )

    # Mock the creation of an event
    httpretty.register_uri(
        httpretty.POST,
        'http://misp.host.tld/events',
        body=json.dumps({'Event': {
            'id': '0',
            'distribution': misp_event_args['distribution'],
        }}),
        content_type='application/json',
    )

    # Mock the adding of a tag to an event
    httpretty.register_uri(
        httpretty.POST,
        'http://misp.host.tld/events/addTag',
        body=json.dumps({'Event': {
            'id': '0',
            'tag': 4,
        }}),
        content_type='application/json',
    )

    # Mock editing of a created event.
    httpretty.register_uri(
        httpretty.POST,
        'http://misp.host.tld/events/0',
        body=json.dumps({}),
        content_type='application/json',
    )

    # Perform the processing and the misp publishing.
    misp = certau.transform.StixMispTransform.get_misp_object(
        **misp_args
    )
    transformer = certau.transform.StixMispTransform(
        package=package,
        misp=misp,
        **misp_event_args
    )
    transformer.publish()

    # Test the correct requests were made
    reqs = list(httpretty.httpretty.latest_requests)

    # The "get version" request includes the MISP key.
    r_get_version = reqs[0]
    assert r_get_version.path == '/servers/getVersion'
    assert r_get_version.headers.dict['authorization'] == misp_args['misp_key']

    # The event creation request includes basic information.
    r_create_event = reqs[1]
    assert r_create_event.path == '/events'
    assert json.loads(r_create_event.body) == {
        u'Event': {
            u'analysis': misp_event_args['analysis'],
            u'published': False,
            u'threat_level_id': misp_event_args['threat_level'],
            u'distribution': misp_event_args['distribution'],
            u'date': '2015-12-23',
            u'info': 'CA-TEST-STIX | Test STIX data'
        }
    }

    # The TLP tag is added to the event.
    r_add_tag = reqs[2]
    assert r_add_tag.path == '/events/addTag'
    assert json.loads(r_add_tag.body) == {
        u'request': {
            u'Event': {
                u'id': '0',
                u'tag': 4,
            }
        }
    }

    # The event is then updated with the observables, over multiple
    # requests. We're only interested in the 'Attribute' key here as that
    # contains the data extracted from the observable.
    obs_attributes = sorted([json.loads(request.body)['Event']['Attribute'][0]
                             for request
                             in reqs[3:]])

    assert obs_attributes == sorted([
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'md5',
            u'value': u'11111111111111112977fa0588bd504a',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'md5',
            u'value': u'ccccccccccccccc33574c79829dc1ccf',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'md5',
            u'value': u'11111111111111133574c79829dc1ccf',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'md5',
            u'value': u'11111111111111111f2601b4d21660fb',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'md5',
            u'value': u'1111111111b42b57f518197d930471d9',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'mutex',
            u'value': u'\\BaseNamedObjects\\MUTEX_0001',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'mutex',
            u'value': u'\\BaseNamedObjects\\WIN_ABCDEF',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'mutex',
            u'value': u'\\BaseNamedObjects\\iurlkjashdk',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'regkey|value',
            u'value': u'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run|hotkey\\%APPDATA%\\malware.exe -st',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'sha1',
            u'value': u'893fb19ac24eabf9b1fe1ddd1111111111111111',
        },
        {
            u'category': u'Artifacts dropped',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'sha256',
            u'value': u'11111111111111119f167683e164e795896be3be94de7f7103f67c6fde667bdf',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'domain',
            u'value': u'bad.domain.org',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'domain',
            u'value': u'dnsupdate.dyn.net',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'domain',
            u'value': u'free.stuff.com',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'ip-dst',
            u'value': u'183.82.180.95',
        },

        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'ip-dst',
            u'value': u'111.222.33.44',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'ip-dst',
            u'value': u'158.164.39.51',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'url',
            u'value': u'http://host.domain.tld/path/file',
        },
        {
            u'category': u'Network activity',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'user-agent',
            u'value': u'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36',
        },
        {
            u'category': u'Payload delivery',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'email-src',
            u'value': u'[email protected]',
        },
        {
            u'category': u'Payload delivery',
            u'distribution': 1,
            u'to_ids': True,
            u'type': u'email-subject',
            u'value': u'Important project details',
        },
    ])

Example 85

Project: python-ilorest-library Source File: v1.py
    def _rest_request(self, path='', method="GET", args=None, body=None,
                      headers=None, optionalpassword=None, providerheader=None):
        """Rest request for blob store client

        :param path: path within tree
        :type path: str
        :param method: method to be implemented
        :type method: str
        :param args: the arguments for method
        :type args: dict
        :param body: body payload for the rest call
        :type body: dict
        :param headers: provide additional headers
        :type headers: dict
        :param optionalpassword: provide password for authentication
        :type optionalpassword: str
        :param provideheader: provider id for the header
        :type providerheader: str
        :return: returns a RestResponse object

        """
        headers = self._get_req_headers(headers, providerheader, \
                                                            optionalpassword)

        if not self.is_redfish and self.default_prefix in path and \
                                                                path[-1] == '/':
            path = path[0:-1]
        elif self.is_redfish and self.default_prefix in path and \
                                                                path[-1] != '/':
            #TODO: Fix back
            path = path# + '/'
        else:
            pass

        reqpath = path.replace('//', '/')

        if body is not None:
            if isinstance(body, dict) or isinstance(body, list):
                headers['Content-Type'] = u'application/json'
                body = json.dumps(body)
            else:
                headers['Content-Type'] = u'application/x-www-form-urlencoded'
                body = urllib.urlencode(body)

            if method == 'PUT':
                resp = self._rest_request(path=path)

                try:
                    if resp.getheader('content-encoding') == 'gzip':
                        buf = StringIO()
                        gfile = gzip.GzipFile(mode='wb', fileobj=buf)

                        try:
                            gfile.write(str(body))
                        finally:
                            gfile.close()

                        compresseddata = buf.getvalue()
                        if compresseddata:
                            data = bytearray()
                            data.extend(buffer(compresseddata))
                            body = data
                except BaseException as excp:
                    LOGGER.error('Error occur while compressing body: %s', excp)
                    raise

            headers['Content-Length'] = len(body)

        if args:
            if method == 'GET':
                reqpath += '?' + urllib.urlencode(args)
            elif method == 'PUT' or method == 'POST' or method == 'PATCH':
                headers['Content-Type'] = u'application/x-www-form-urlencoded'
                body = urllib.urlencode(args)

        str1 = '%s %s %s\r\n' % (method, reqpath, \
                                            Blobstore2RestClient._http_vsn_str)

        str1 += 'Host: \r\n'
        str1 += 'Accept-Encoding: identity\r\n'
        for header, value in headers.iteritems():
            str1 += '%s: %s\r\n' % (header, value)

        str1 += '\r\n'

        if body and len(body) > 0:
            if isinstance(body, bytearray):
                str1 = str1.encode("ASCII") + body
            else:
                str1 += body

        bs2 = BlobStore2()
        if not isinstance(str1, bytearray):
            str1 = str1.encode("ASCII")
        if logging.getLogger().isEnabledFor(logging.DEBUG):
            try:
                LOGGER.debug('Blobstore REQUEST: %s\n\tPATH: %s\n\tBODY: %s'% \
                         (method, path, body))
            except:
                LOGGER.debug('Blobstore REQUEST: %s\n\tPATH: %s\n\tBODY: %s'% \
                         (method, path, 'binary body'))                

        inittime = time.clock()
        resp_txt = bs2.rest_immediate(str1)
        endtime = time.clock()

        LOGGER.info("iLO Response Time to %s: %s secs."% \
                                                (path, str(endtime-inittime)))
        #Dummy response to support a bad host response
        if len(resp_txt) == 0:
            resp_txt = "HTTP/1.1 500 Not Found\r\nAllow: " \
            "GET\r\nCache-Control: no-cache\r\nContent-length: " \
            "0\r\nContent-type: text/html\r\nDate: Tues, 1 Apr 2025 " \
            "00:00:01 GMT\r\nServer: " \
            "HP-iLO-Server/1.30\r\nX_HP-CHRP-Service-Version: 1.0.3\r\n\r\n\r\n"

        restreq = RestRequest(reqpath, method=method, body=body)
        rest_response = RisRestResponse(restreq, resp_txt)

        if rest_response.status in range(300, 399) and \
                                                    rest_response.status != 304:
            newloc = rest_response.getheader("location")
            newurl = urlparse2.urlparse(newloc)

            rest_response = self._rest_request(newurl.path, method, args, \
                               body, headers, optionalpassword, providerheader)

        try:
            if rest_response.getheader('content-encoding') == 'gzip':
                compressedfile = StringIO(rest_response.text)
                decompressedfile = gzip.GzipFile(fileobj=compressedfile)
                rest_response.text = decompressedfile.read()
        except StandardError:
            pass
        if logging.getLogger().isEnabledFor(logging.DEBUG):
            headerstr = ''
            for header in rest_response._http_response.msg.headers:
                headerstr += '\t' + header.rstrip() + '\n'
            try:
                LOGGER.debug('Blobstore RESPONSE for %s:\nCode: %s\nHeaders:\n%s'\
                         '\nBody of %s: %s'%\
                         (rest_response.request.path,\
                        str(rest_response._http_response.status)+ ' ' + \
                        rest_response._http_response.reason, \
                        headerstr, rest_response.request.path, rest_response.read))
            except:
                LOGGER.debug('Blobstore RESPONSE for %s:\nCode:%s'% \
                             (rest_response.request.path, rest_response))
        return rest_response

Example 86

Project: DIRAC Source File: Test_JobAPI.py
  def test_basicJob( self ):
    self.job.setOwner( 'ownerName' )
    self.job.setOwnerGroup( 'ownerGroup' )
    self.job.setName( 'jobName' )
    self.job.setJobGroup( 'jobGroup' )
    self.job.setExecutable( 'someExe' )
    self.job.setType( 'jobType' )
    self.job.setDestination( 'DIRAC.someSite.ch' )

    xml = self.job._toXML()

    expected = '''<Workflow>
<origin></origin>
<description><![CDATA[]]></description>
<descr_short></descr_short>
<version>0.0</version>
<type></type>
<name>jobName</name>
<Parameter name="JobType" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified type"><value><![CDATA[jobType]]></value></Parameter>
<Parameter name="Priority" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User Job Priority"><value><![CDATA[1]]></value></Parameter>
<Parameter name="JobGroup" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified job group"><value><![CDATA[jobGroup]]></value></Parameter>
<Parameter name="JobName" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified name"><value><![CDATA[jobName]]></value></Parameter>
<Parameter name="Site" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified destination site"><value><![CDATA[DIRAC.someSite.ch]]></value></Parameter>
<Parameter name="Origin" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Origin of client"><value><![CDATA[DIRAC]]></value></Parameter>
<Parameter name="StdOutput" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Standard output file"><value><![CDATA[std.out]]></value></Parameter>
<Parameter name="StdError" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Standard error file"><value><![CDATA[std.err]]></value></Parameter>
<Parameter name="InputData" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Default null input data value"><value><![CDATA[]]></value></Parameter>
<Parameter name="LogLevel" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Job Logging Level"><value><![CDATA[info]]></value></Parameter>
<Parameter name="ParametricInputData" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Default null parametric input data value"><value><![CDATA[]]></value></Parameter>
<Parameter name="ParametricInputSandbox" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Default null parametric input sandbox value"><value><![CDATA[]]></value></Parameter>
<Parameter name="ParametricParameters" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Default null parametric input parameters value"><value><![CDATA[]]></value></Parameter>
<Parameter name="Owner" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified ID"><value><![CDATA[ownerName]]></value></Parameter>
<Parameter name="OwnerGroup" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified owner group."><value><![CDATA[ownerGroup]]></value></Parameter>
<ModuleDefinition>
<body><![CDATA[
from DIRAC.Workflow.Modules.Script import Script
]]></body>
<origin></origin>
<description><![CDATA[ The Script class provides a simple way for users to specify an executable
    or file to run (and is also a simple example of a workflow module).
]]></description>
<descr_short></descr_short>
<required></required>
<version>0.0</version>
<type>Script</type>
</ModuleDefinition>
<StepDefinition>
<origin></origin>
<version>0.0</version>
<type>ScriptStep1</type>
<description><![CDATA[]]></description>
<descr_short></descr_short>
<Parameter name="executable" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Executable Script"><value><![CDATA[]]></value></Parameter>
<Parameter name="arguments" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Arguments for executable Script"><value><![CDATA[]]></value></Parameter>
<Parameter name="applicationLog" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Log file name"><value><![CDATA[]]></value></Parameter>
<ModuleInstance>
<type>Script</type>
<name>Script</name>
<descr_short></descr_short>
</ModuleInstance>
</StepDefinition>
<StepInstance>
<type>ScriptStep1</type>
<name>RunScriptStep1</name>
<descr_short></descr_short>
<Parameter name="executable" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Executable Script"><value><![CDATA[someExe]]></value></Parameter>
<Parameter name="arguments" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Arguments for executable Script"><value><![CDATA[]]></value></Parameter>
<Parameter name="applicationLog" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Log file name"><value><![CDATA[Script1_CodeOutput.log]]></value></Parameter>
</StepInstance>
</Workflow>
'''

    expected = """<Workflow>
<origin></origin>
<description><![CDATA[]]></description>
<descr_short></descr_short>
<version>0.0</version>
<type></type>
<name>jobName</name>
<Parameter name="JobType" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified type"><value><![CDATA[jobType]]></value></Parameter>
<Parameter name="Priority" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User Job Priority"><value><![CDATA[1]]></value></Parameter>
<Parameter name="JobGroup" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified job group"><value><![CDATA[jobGroup]]></value></Parameter>
<Parameter name="JobName" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified name"><value><![CDATA[jobName]]></value></Parameter>
<Parameter name="Site" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified destination site"><value><![CDATA[DIRAC.someSite.ch]]></value></Parameter>
<Parameter name="Origin" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Origin of client"><value><![CDATA[DIRAC]]></value></Parameter>
<Parameter name="StdOutput" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Standard output file"><value><![CDATA[std.out]]></value></Parameter>
<Parameter name="StdError" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Standard error file"><value><![CDATA[std.err]]></value></Parameter>
<Parameter name="InputData" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Default null input data value"><value><![CDATA[]]></value></Parameter>
<Parameter name="LogLevel" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="Job Logging Level"><value><![CDATA[info]]></value></Parameter>
<Parameter name="arguments" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Arguments to executable Step"><value><![CDATA[]]></value></Parameter>
<Parameter name="ParametricInputData" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Default null parametric input data value"><value><![CDATA[]]></value></Parameter>
<Parameter name="ParametricInputSandbox" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Default null parametric input sandbox value"><value><![CDATA[]]></value></Parameter>
<Parameter name="Owner" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified ID"><value><![CDATA[ownerName]]></value></Parameter>
<Parameter name="OwnerGroup" type="JDL" linked_module="" linked_parameter="" in="True" out="False" description="User specified owner group."><value><![CDATA[ownerGroup]]></value></Parameter>
<ModuleDefinition>
<body><![CDATA[
from DIRAC.Workflow.Modules.Script import Script
]]></body>
<origin></origin>
<description><![CDATA[ The Script class provides a simple way for users to specify an executable
    or file to run (and is also a simple example of a workflow module).
]]></description>
<descr_short></descr_short>
<required></required>
<version>0.0</version>
<type>Script</type>
</ModuleDefinition>
<StepDefinition>
<origin></origin>
<version>0.0</version>
<type>ScriptStep1</type>
<description><![CDATA[]]></description>
<descr_short></descr_short>
<Parameter name="executable" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Executable Script"><value><![CDATA[]]></value></Parameter>
<Parameter name="arguments" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Arguments for executable Script"><value><![CDATA[]]></value></Parameter>
<Parameter name="applicationLog" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Log file name"><value><![CDATA[]]></value></Parameter>
<ModuleInstance>
<type>Script</type>
<name>Script</name>
<descr_short></descr_short>
</ModuleInstance>
</StepDefinition>
<StepInstance>
<type>ScriptStep1</type>
<name>RunScriptStep1</name>
<descr_short></descr_short>
<Parameter name="executable" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Executable Script"><value><![CDATA[someExe]]></value></Parameter>
<Parameter name="arguments" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Arguments for executable Script"><value><![CDATA[]]></value></Parameter>
<Parameter name="applicationLog" type="string" linked_module="" linked_parameter="" in="True" out="False" description="Log file name"><value><![CDATA[Script1_CodeOutput.log]]></value></Parameter>
</StepInstance>
</Workflow>
"""

    self.assertEqual( xml, expected )

    self.job._toJDL( jobDescriptionObject = StringIO.StringIO( self.job._toXML() ) )

Example 87

Project: hydroshare Source File: receivers.py
@receiver(pre_add_files_to_resource, sender=NetcdfResource)
def netcdf_pre_add_files_to_resource(sender, **kwargs):
    nc_res = kwargs['resource']
    files = kwargs['files']
    validate_files_dict = kwargs['validate_files']
    fed_res_fnames = kwargs['fed_res_file_names']

    if len(files) > 1:
        # file number validation
        validate_files_dict['are_files_valid'] = False
        validate_files_dict['message'] = 'Only one file can be uploaded.'

    file_selected = False
    in_file_name = ''
    if files:
        file_selected = True
        in_file_name = files[0].file.name
    elif fed_res_fnames:
        ref_tmpfiles = utils.get_fed_zone_files(fed_res_fnames)
        if ref_tmpfiles:
            in_file_name = ref_tmpfiles[0]
            file_selected = True

    if file_selected and in_file_name:
        # file type validation and existing metadata update and create new ncdump text file
        nc_dataset = nc_utils.get_nc_dataset(in_file_name)
        if isinstance(nc_dataset, netCDF4.Dataset):
            # delete all existing resource files and metadata related
            for f in ResourceFile.objects.filter(object_id=nc_res.id):
                delete_resource_file_only(nc_res, f)

            # update resource modification info
            user = kwargs['user']
            utils.resource_modified(nc_res, user)

            # extract metadata
            try:
                res_dublin_core_meta = nc_meta.get_dublin_core_meta(nc_dataset)
            except Exception:
                res_dublin_core_meta = {}

            try:
                res_type_specific_meta = nc_meta.get_type_specific_meta(nc_dataset)
            except Exception:
                res_type_specific_meta = {}

            # update title info
            if res_dublin_core_meta.get('title'):
                if nc_res.metadata.title:
                    nc_res.metadata.title.delete()
                nc_res.metadata.create_element('title', value=res_dublin_core_meta['title'])

            # update description info
            if res_dublin_core_meta.get('description'):
                if nc_res.metadata.description:
                    nc_res.metadata.description.delete()
                nc_res.metadata.create_element('description', abstract=res_dublin_core_meta.get('description'))

            # update creator info
            if res_dublin_core_meta.get('creator_name'):
                name = res_dublin_core_meta.get('creator_name')
                email = res_dublin_core_meta.get('creator_email', '')
                url = res_dublin_core_meta.get('creator_url', '')
                arguments = dict(name=name, email=email, homepage=url)
                creator = nc_res.metadata.creators.all().filter(name=name).first()
                if creator:
                    order = creator.order
                    if order != 1:
                        creator.delete()
                        arguments['order'] = order
                        nc_res.metadata.create_element('creator', **arguments)
                else:
                    nc_res.metadata.create_element('creator', **arguments)

            # update contributor info
            if res_dublin_core_meta.get('contributor_name'):
                name_list = res_dublin_core_meta['contributor_name'].split(',')
                existing_contributor_names = [contributor.name for contributor in nc_res.metadata.contributors.all()]
                for name in name_list:
                    if name not in existing_contributor_names:
                        nc_res.metadata.create_element('contributor', name=name)

            # update subject info
            if res_dublin_core_meta.get('subject'):
                keywords = res_dublin_core_meta['subject'].split(',')
                existing_keywords = [subject.value for subject in nc_res.metadata.subjects.all()]
                for keyword in keywords:
                    if keyword not in existing_keywords:
                        nc_res.metadata.create_element('subject', value=keyword)

            # update source
            if res_dublin_core_meta.get('source'):
                for source in nc_res.metadata.sources.all():
                    source.delete()
                nc_res.metadata.create_element('source', derived_from=res_dublin_core_meta.get('source'))

            # update license element:
            if res_dublin_core_meta.get('rights'):
                raw_info = res_dublin_core_meta.get('rights')
                b = re.search("(?P<url>https?://[^\s]+)", raw_info)
                url = b.group('url') if b else ''
                statement = raw_info.replace(url, '') if url else raw_info
                if nc_res.metadata.rights:
                    nc_res.metadata.rights.delete()
                nc_res.metadata.create_element('rights', statement=statement, url=url)

            # update relation
            if res_dublin_core_meta.get('references'):
                for cite in nc_res.metadata.relations.all().filter(type='cites'):
                    cite.delete()
                nc_res.metadata.create_element('relation', type='cites', value=res_dublin_core_meta['references'])

            # update box info
            nc_res.metadata.coverages.all().delete()
            if res_dublin_core_meta.get('box'):
                nc_res.metadata.create_element('coverage', type='box', value=res_dublin_core_meta['box'])

            # update period info
            if res_dublin_core_meta.get('period'):
                nc_res.metadata.create_element('coverage', type='period', value=res_dublin_core_meta['period'])

            # update variable info
            nc_res.metadata.variables.all().delete()
            for var_info in res_type_specific_meta.values():
                nc_res.metadata.create_element('variable',
                                               name=var_info['name'],
                                               unit=var_info['unit'],
                                               type=var_info['type'],
                                               shape=var_info['shape'],
                                               missing_value=var_info['missing_value'],
                                               descriptive_name=var_info['descriptive_name'],
                                               method=var_info['method'])

            # update the original spatial coverage meta
            nc_res.metadata.ori_coverage.all().delete()
            if res_dublin_core_meta.get('original-box'):
                if res_dublin_core_meta.get('projection-info'):
                    nc_res.metadata.create_element('originalcoverage',
                                                    value=res_dublin_core_meta['original-box'],
                                                    projection_string_type=res_dublin_core_meta['projection-info']['type'],
                                                    projection_string_text=res_dublin_core_meta['projection-info']['text'])
                else:
                    nc_res.metadata.create_element('originalcoverage', value=res_dublin_core_meta['original-box'])

            # create the ncdump text file
            if nc_dump.get_nc_dump_string_by_ncdump(in_file_name):
                dump_str = nc_dump.get_nc_dump_string_by_ncdump(in_file_name)
            else:
                dump_str = nc_dump.get_nc_dump_string(in_file_name)

            if dump_str:
                # refine dump_str first line
                nc_file_name = files[0].name[:-3]
                first_line = list('netcdf {0} '.format(nc_file_name))
                first_line_index = dump_str.index('{')
                dump_str_list = first_line + list(dump_str)[first_line_index:]
                dump_str = "".join(dump_str_list)

                # write dump_str to temporary file
                io = StringIO.StringIO()
                io.write(dump_str)
                dump_file_name = nc_file_name + '_header_info.txt'
                dump_file = InMemoryUploadedFile(io, None, dump_file_name, 'text', io.len, None)
                files.append(dump_file)

        else:
            validate_files_dict['are_files_valid'] = False
            validate_files_dict['message'] = 'Please check if the uploaded file is in valid NetCDF format.'

        if fed_res_fnames and in_file_name:
            shutil.rmtree(os.path.dirname(in_file_name))

Example 88

Project: powerstrip Source File: powerstrip.py
    def render(self, request, reactor=reactor):
        # We are processing a leaf request.
        # Get the original request body from the client.
        skipPreHooks = False
        if request.requestHeaders.getRawHeaders('content-type') == ["application/json"]:
            originalRequestBody = request.content.read()
            request.content.seek(0) # hee hee
        elif request.requestHeaders.getRawHeaders('content-type') == ["application/tar"]:
            # We can't JSON encode binary data, so don't even try.
            skipPreHooks = True
            originalRequestBody = None
        else:
            originalRequestBody = None
        preHooks = []
        postHooks = []
        d = defer.succeed(None)
        for endpoint in self.parser.match_endpoint(request.method, request.uri.split("?")[0]):
            # It's possible for a request to match multiple endpoint
            # definitions.  Order of matched endpoint is not defined in
            # that case.
            adapters = self.config.endpoint(endpoint)
            preHooks.extend(adapters.pre)
            postHooks.extend(adapters.post)
        def callPreHook(result, hookURL):
            if result is None:
                newRequestBody = originalRequestBody
            else:
                newRequestBody = result["ModifiedClientRequest"]["Body"]
            return self.client.post(hookURL, json.dumps({
                        "PowerstripProtocolVersion": 1,
                        "Type": "pre-hook",
                        "ClientRequest": {
                            "Method": request.method,
                            "Request": request.uri,
                            "Body": newRequestBody,
                        }
                    }), headers={'Content-Type': ['application/json']})
        if not skipPreHooks:
            for preHook in preHooks:
                hookURL = self.config.adapter_uri(preHook)
                d.addCallback(callPreHook, hookURL=hookURL)
                d.addCallback(treq.json_content)
        def doneAllPrehooks(result):
            # Finally pass through the request to actual Docker.  For now we
            # mutate request in-place in such a way that ReverseProxyResource
            # understands it.
            if result is not None:
                requestBody = b""
                bodyFromAdapter = result["ModifiedClientRequest"]["Body"]
                if bodyFromAdapter is not None:
                    requestBody = bodyFromAdapter.encode("utf-8")
                request.content = StringIO.StringIO(requestBody)
                request.requestHeaders.setRawHeaders(b"content-length",
                        [str(len(requestBody))])
            ###########################
            # The following code is copied from t.w.proxy.ReverseProxy so that
            # clientFactory reference can be kept.
            if not self.socket:
                if self.port == 80:
                    host = self.host
                else:
                    host = "%s:%d" % (self.host, self.port)
                request.requestHeaders.setRawHeaders(b"host", [host])
            request.content.seek(0, 0)
            qs = urlparse.urlparse(request.uri)[4]
            if qs:
                rest = self.path + '?' + qs
            else:
                rest = self.path
            allRequestHeaders = request.getAllHeaders()
            if allRequestHeaders.get("transfer-encoding") == "chunked":
                del allRequestHeaders["transfer-encoding"]
            # XXX Streaming the contents of the request body into memory could
            # cause OOM issues for large build contexts POSTed through
            # powerstrip. See https://github.com/ClusterHQ/powerstrip/issues/51
            body = request.content.read()
            allRequestHeaders["content-length"] = str(len(body))
            clientFactory = self.proxyClientFactoryClass(
                request.method, rest, request.clientproto,
                allRequestHeaders, body, request)
            ###########################
            if self.socket:
                self.reactor.connectUNIX(self.socket, clientFactory)
            else:
                self.reactor.connectTCP(self.host, self.port, clientFactory)
            d = defer.Deferred()
            clientFactory.onCreate(d)
            return d
        d.addCallback(doneAllPrehooks)
        def inspect(client):
            # If there are no post-hooks, allow the response to be streamed
            # back to the client, rather than buffered.
            d = defer.Deferred()
            client.registerListener(d)
            if not postHooks:
                client.setStreamingMode(True)
            return d
        d.addCallback(inspect)
        def callPostHook(result, hookURL):
            serverResponse = result["ModifiedServerResponse"]
            return self.client.post(hookURL, json.dumps({
                        # TODO Write tests for the information provided to the adapter.
                        "PowerstripProtocolVersion": 1,
                        "Type": "post-hook",
                        "ClientRequest": {
                            "Method": request.method,
                            "Request": request.uri,
                            "Body": originalRequestBody,
                            },
                        "ServerResponse": {
                            "ContentType": serverResponse["ContentType"],
                            "Body": serverResponse["Body"],
                            "Code": serverResponse["Code"],
                        },
                    }), headers={'Content-Type': ['application/json']})
        # XXX Need to skip post-hooks for tar archives from e.g. docker export.
        # https://github.com/ClusterHQ/powerstrip/issues/52
        for postHook in postHooks:
            hookURL = self.config.adapter_uri(postHook)
            d.addCallback(callPostHook, hookURL=hookURL)
            d.addCallback(treq.json_content)
        def sendFinalResponseToClient(result):            
            resultBody = result["ModifiedServerResponse"]["Body"].encode("utf-8")
            # Update the Content-Length, since we're modifying the request object in-place.
            request.responseHeaders.setRawHeaders(
                b"content-length",
                [str(len(resultBody))]
            )
            # Write the final response to the client.
            request.write(resultBody)
            request.finish()
        d.addCallback(sendFinalResponseToClient)
        def squashNoPostHooks(failure):
            failure.trap(NoPostHooks)
        d.addErrback(squashNoPostHooks)
        d.addErrback(log.err, 'while running chain')
        return NOT_DONE_YET

Example 89

Project: topic-explorer Source File: server.py
    def _setup_routes(self, **kwargs):
        @self.route('/<k:int>/doc_topics/<doc_id>')
        @_set_acao_headers
        def doc_topic_csv(k, doc_id):
            response.content_type = 'text/csv; charset=UTF8'

            doc_id = unquote(doc_id)

            data = self.v[k].doc_topics(doc_id)

            output = StringIO()
            writer = csv.writer(output)
            writer.writerow(['topic', 'prob'])
            writer.writerows([(t, "%6f" % p) for t, p in data])

            return output.getvalue()

        @self.route('/<k:int>/docs/<doc_id>')
        @_set_acao_headers
        def doc_csv(k, doc_id, threshold=0.2):
            response.content_type = 'text/csv; charset=UTF8'

            doc_id = unquote(doc_id)

            data = self.v[k].dist_doc_doc(doc_id)

            output = StringIO()
            writer = csv.writer(output)
            writer.writerow(['doc', 'prob'])
            writer.writerows([(d, "%6f" % p) for d, p in data if p > threshold])

            return output.getvalue()

        @self.route('/<k:int>/topics/<topic_no:int>.json')
        @_set_acao_headers
        def topic_json(k, topic_no, N=40):
            response.content_type = 'application/json; charset=UTF8'
            try:
                N = int(request.query.n)
            except:
                pass

            if N > 0:
                data = self.v[k].dist_top_doc([topic_no])[:N]
            else:
                data = self.v[k].dist_top_doc([topic_no])[N:]
                data = reversed(data)

            docs = [doc for doc, prob in data]
            doc_topics_mat = self.v[k].doc_topics(docs)
            docs = self.get_docs(docs, id_as_key=True)

            js = []
            for doc_prob, topics in zip(data, doc_topics_mat):
                doc, prob = doc_prob
                struct = docs[doc]
                struct.update({'prob': 1 - prob,
                               'topics': dict([(str(t), float(p)) for t, p in topics])})
                js.append(struct)

            return json.dumps(js)

        @self.route('/<k:int>/docs_topics/<doc_id:path>.json')
        @_set_acao_headers
        def doc_topics(k, doc_id, N=40):
            try:
                N = int(request.query.n)
            except:
                pass

            doc_id = unquote(doc_id)

            response.content_type = 'application/json; charset=UTF8'

            if N > 0:
                data = self.v[k].dist_doc_doc(doc_id)[:N]
            else:
                data = self.v[k].dist_doc_doc(doc_id)[N:]
                data = reversed(data)

            docs = [doc for doc, prob in data]
            doc_topics_mat = self.v[k].doc_topics(docs)
            docs = self.get_docs(docs, id_as_key=True)

            js = []
            for doc_prob, topics in zip(data, doc_topics_mat):
                doc, prob = doc_prob
                struct = docs[doc]
                struct.update({'prob': 1 - prob,
                               'topics': dict([(str(t), float(p)) for t, p in topics])})
                js.append(struct)

            return json.dumps(js)

        @self.route('/<k:int>/word_docs.json')
        @_set_acao_headers
        def word_docs(k, N=40):
            try:
                N = int(request.query.n)
            except:
                pass
            try:
                query = request.query.q.lower().split('|')
            except:
                raise Exception('Must specify a query')

            response.content_type = 'application/json; charset=UTF8'

            query = [word for word in query if word in self.c.words]

            # abort if there are no terms in the query
            if not query:
                response.status = 400  # Bad Request
                return "Search terms not in model"

            topics = self.v[k].dist_word_top(query, show_topics=False)
            data = self.v[k].dist_top_doc(topics['i'],
                                          weights=(topics['value'].max() - topics['value']))

            if N > 0:
                data = data[:N]
            else:
                data = data[N:]
                data = reversed(data)

            docs = [doc for doc, prob in data]
            doc_topics_mat = self.v[k].doc_topics(docs)
            docs = self.get_docs(docs, id_as_key=True)

            js = []
            for doc_prob, topics in zip(data, doc_topics_mat):
                doc, prob = doc_prob
                struct = docs[doc]
                struct.update({'prob': 1 - prob,
                               'topics': dict([(str(t), p) for t, p in topics])})
                js.append(struct)

            return json.dumps(js)

        @self.route('/<k:int>/topics.json')
        @_set_acao_headers
        def topics(k):
            from topicexplorer.lib.color import rgb2hex

            response.content_type = 'application/json; charset=UTF8'
            response.set_header('Expires', _cache_date())
            response.set_header('Cache-Control', 'max-age=86400')
            

            # populate partial jsd values
            data = self.v[k].topic_jsds()

            js = {}
            for rank, topic_H in enumerate(data):
                topic, H = topic_H
                if math.isnan(H): 
                    H = 0.0
                js[str(topic)] = {
                    "H": float(H),
                    "color": rgb2hex(self.colors[k][topic])
                }

            # populate word values
            data = self.v[k].topics()

            wordmax = 10  # for alphabetic languages
            if kwargs.get('lang', None) == 'cn':
                wordmax = 25  # for ideographic languages

            for i, topic in enumerate(data):
                js[str(i)].update({'words': dict([(unicode(w), float(p))
                                                  for w, p in topic[:wordmax]])})

            return json.dumps(js)

        @self.route('/topics.json')
        @_set_acao_headers
        def word_topic_distance():
            import numpy as np
            response.content_type = 'application/json; charset=UTF8'

            # parse query
            try:
                if '|' in request.query.q:
                    query = request.query.q.lower().split('|')
                else:
                    query = request.query.q.lower().split(' ')
            except:
                raise Exception('Must specify a query')

            query = [word for word in query if word in self.c.words]

            # abort if there are no terms in the query
            if not query:
                response.status = 400  # Bad Request
                return "Search terms not in model"


            # calculate distances
            distances = dict()
            for k, viewer in self.v.iteritems():
                d = viewer.dist_word_top(query, show_topics=False)
                distances[k] = np.fromiter(
                    ((k, row['i'], row['value']) for row in d),
                    dtype=[('k', '<i8'), ('i', '<i8'), ('value', '<f8')])

            # merge and sort all topics across all models
            merged_similarity = np.hstack(distances.values())
            sorted_topics = merged_similarity[np.argsort(merged_similarity['value'])]

            # return data
            data = [{'k' : t['k'],
                     't' : t['i'],
                     'distance' : t['value'] } for t in sorted_topics]
            return json.dumps(data)


        @self.route('/topics')
        @_set_acao_headers
        def view_clusters():
            with open(resource_filename(__name__, '../www/master.mustache.html'),
                      encoding='utf-8') as tmpl_file:
                template = tmpl_file.read()

            tmpl_params = {'body' : _render_template('cluster.mustache.html'),
                           'topic_range': self.topic_range}
            return self.renderer.render(template, tmpl_params)


        @self.route('/docs.json')
        @_set_acao_headers
        def docs(docs=None, q=None):
            response.content_type = 'application/json; charset=UTF8'
            response.set_header('Expires', _cache_date())

            try:
                if request.query.q:
                    q = unquote(request.query.q)
            except:
                pass

            try:
                if request.query.id:
                    docs = [unquote(request.query.id)]
            except:
                pass

            try:
                response.set_header('Expires', 0)
                response.set_header('Pragma', 'no-cache')
                response.set_header('Cache-Control', 'no-cache, no-store, must-revalidate')
                if request.query.random:
                    docs = [random.choice(self.labels)]
            except:
                pass

            js = self.get_docs(docs, query=q)

            return json.dumps(js)

        @self.route('/icons.js')
        def icons():
            with open(resource_filename(__name__, '../www/icons.js')) as icons:
                text = '{0}\n var icons = {1};'\
                    .format(icons.read(), json.dumps(self.icons))
            return text

        def _render_template(page):
            response.set_header('Expires', _cache_date())

            with open(resource_filename(__name__, '../www/' + page),
                      encoding='utf-8') as tmpl_file:
                template = tmpl_file.read()

            tmpl_params = {'corpus_name': kwargs.get('corpus_name', ''),
                           'corpus_link': kwargs.get('corpus_link', ''),
                           'context_type': self.context_type,
                           'topic_range': self.topic_range,
                           'doc_title_format': kwargs.get('doc_title_format', '{0}'),
                           'doc_url_format': kwargs.get('doc_url_format', ''),
                           'home_link': kwargs.get('home_link', '/')}
            return self.renderer.render(template, tmpl_params)

        @self.route('/<k:int>/')
        def index(k):
            with open(resource_filename(__name__, '../www/master.mustache.html'),
                      encoding='utf-8') as tmpl_file:
                template = tmpl_file.read()

            tmpl_params = {'body' : _render_template('bars.mustache.html'),
                           'topic_range': self.topic_range}
            return self.renderer.render(template, tmpl_params)

        @self.route('/cluster.csv')
        @_set_acao_headers
        def cluster_csv(second=False):
            filename = kwargs.get('cluster_path')
            print "Retireving cluster.csv:", filename
            if not filename or not os.path.exists(filename):
                import topicexplorer.train
                filename = topicexplorer.train.cluster(10, self.config_file)
                kwargs['cluster_path'] = filename

            root, filename = os.path.split(filename)
            return static_file(filename, root=root)
        
        @self.route('/description.md')
        @_set_acao_headers
        def description():
            filename = kwargs.get('corpus_desc')
            if not filename:
                response.status = 404
                return "File not found"
            root, filename = os.path.split(filename)
            return static_file(filename, root=root)
        
        @self.route('/')
        @_set_acao_headers
        def cluster():
            with open(resource_filename(__name__, '../www/master.mustache.html'),
                      encoding='utf-8') as tmpl_file:
                template = tmpl_file.read()

            tmpl_params = {'body' : _render_template('splash.mustache.html'),
                           'topic_range': self.topic_range}
            return self.renderer.render(template, tmpl_params)

        @self.route('/<filename:path>')
        @_set_acao_headers
        def send_static(filename):
            return static_file(filename, root=resource_filename(__name__, '../www/'))

Example 90

Project: openerp-7.0 Source File: report_aeroo.py
    def create_aeroo_report(self, cr, uid, ids, data, report_xml, context=None, output='odt'):
        """ Returns an aeroo report generated with aeroolib
        """
        pool = pooler.get_pool(cr.dbname)
        if not context:
            context={}
        context = context.copy()
        if self.name=='report.printscreen.list':
            context['model'] = data['model']
            context['ids'] = ids
        
        print_id = context.get('print_id', False)
        aeroo_print = self.active_prints[print_id] # Aeroo print object
        aeroo_print.subreports = []
        #self.oo_subreports[print_id] = []
        objects = self.getObjects_mod(cr, uid, ids, report_xml.report_type, context) or []
        oo_parser = self.parser(cr, uid, self.name2, context=context)
        oo_parser.localcontext.update(context)
        oo_parser.set_context(objects, data, ids, report_xml.report_type)

        self.set_xml_data_fields(objects, oo_parser) # Get/Set XML

        oo_parser.localcontext['data'] = data
        oo_parser.localcontext['user_lang'] = context.get('lang', False)
        if len(objects)>0:
            oo_parser.localcontext['o'] = objects[0]
        xfunc = ExtraFunctions(cr, uid, report_xml.id, oo_parser.localcontext)
        oo_parser.localcontext.update(xfunc.functions)

        #company_id = objects and 'company_id' in objects[0]._table._columns.keys() and \
        #                        objects[0].company_id and objects[0].company_id.id or False # for object company usage
        company_id = False
        style_io=self.get_styles_file(cr, uid, report_xml, company=company_id, context=context)

        if report_xml.tml_source in ('file', 'database'):
            if not report_xml.report_sxw_content or report_xml.report_sxw_content=='False':
                raise osv.except_osv(_('Error!'), _('No template found!'))
            file_data = base64.decodestring(report_xml.report_sxw_content)
        else:
            file_data = self.get_other_template(cr, uid, data, oo_parser)
        if not file_data and not report_xml.report_sxw_content:
            self.logger("End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time), logging.INFO) # debug mode
            return False, output
        #elif file_data:
        #    template_io = StringIO()
        #    template_io.write(file_data or report_xml.report_sxw_content)
        #    basic = Template(source=template_io, styles=style_io)
        else:
            if report_xml.preload_mode == 'preload' and hasattr(self, 'serializer'):
                serializer = copy.copy(self.serializer)
                serializer.apply_style(style_io)
                template_io = serializer.template
            else:
                template_io = StringIO()
                template_io.write(file_data or base64.decodestring(report_xml.report_sxw_content) )
                serializer = OOSerializer(template_io, oo_styles=style_io)
            try:
                basic = Template(source=template_io, serializer=serializer)
            except Exception, e:
                self._raise_exception(e, print_id)

        #if not file_data:
        #    return False, output

        #basic = Template(source=template_io, serializer=serializer)

        aeroo_ooo = context.get('aeroo_ooo', False)
        oo_parser.localcontext['include_subreport'] = self._subreport(cr, uid, aeroo_print, output='odt', aeroo_ooo=aeroo_ooo, context=context)
        oo_parser.localcontext['include_docuement'] = self._include_docuement(aeroo_ooo, print_id)
        deferred = context.get('deferred_process')
        oo_parser.localcontext['progress_update'] = deferred and deferred.progress_update or (lambda:True)
        ####### Add counter functons to localcontext #######
        oo_parser.localcontext.update({'def_inc':self._def_inc(aeroo_print),
                                      'get_inc':self._get_inc(aeroo_print),
                                      'prev':self._prev(aeroo_print),
                                      'next':self._next(aeroo_print)})

        user_name = pool.get('res.users').browse(cr, uid, uid, {}).name
        model_id = pool.get('ir.model').search(cr, uid, [('model','=',context.get('active_model', data['model']) or data['model'])])[0]
        model_name = pool.get('ir.model').browse(cr, uid, model_id).name

        #basic = Template(source=None, filepath=odt_path)

        basic.Serializer.add_title(model_name)
        basic.Serializer.add_creation_user(user_name)
        module_info = load_information_from_description_file('report_aeroo')
        version = module_info['version']
        basic.Serializer.add_generator_info('Aeroo Lib/%s Aeroo Reports/%s' % (aeroolib.__version__, version))
        basic.Serializer.add_custom_property('Aeroo Reports %s' % version, 'Generator')
        basic.Serializer.add_custom_property('OpenERP %s' % release.version, 'Software')
        basic.Serializer.add_custom_property(module_info['website'], 'URL')
        basic.Serializer.add_creation_date(time.strftime('%Y-%m-%dT%H:%M:%S'))

        try:
            data = basic.generate(**oo_parser.localcontext).render().getvalue()
        except osv.except_osv, e:
            raise
        except Exception, e:
            self._raise_exception(e, print_id)

        ######### OpenOffice extras #########
        DC = netsvc.Service._services.get('openoffice')
        #if (output!=report_xml.in_format[3:] or self.oo_subreports.get(print_id)):
        if output!=report_xml.in_format[3:] or aeroo_print.subreports:
            if aeroo_ooo and DC:
                try:
                    data = self._generate_doc(DC, data, report_xml, print_id)
                except Exception, e:
                    self.logger(_("OpenOffice.org related error!")+'\n'+str(e), logging.ERROR)
                    if DC._restart_ooo():
                        # We try again
                        try:
                            data = self._generate_doc(DC, data, report_xml, print_id)
                        except Exception, e:
                            self.logger(_("OpenOffice.org related error!")+'\n'+str(e), logging.ERROR)
                            if not report_xml.fallback_false:
                                output=report_xml.in_format[3:]
                    elif not report_xml.fallback_false:
                        output=report_xml.in_format[3:]
                    aeroo_print.subreports = []
            else:
                if report_xml.fallback_false:
                    if not aeroo_ooo:
                        raise osv.except_osv(_('OpenOffice.org related error!'), _('Module "report_aeroo_ooo" not installed.'))
                    elif not DC:
                        raise osv.except_osv(_('OpenOffice.org related error!'), _('Can not connect to OpenOffice.org.'))
                else:
                    self.logger(_("PDF generator temporarily offline, please wait a minute"), logging.WARNING)
                    output=report_xml.in_format[3:]
        elif output in ('pdf', 'doc', 'xls'):
            output=report_xml.in_format[3:]
        #####################################

        if report_xml.content_fname:
            output = report_xml.content_fname
        self.logger("End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time), logging.INFO) # debug mode
        return data, output

Example 91

Project: LibrERP Source File: report_aeroo.py
    def create_aeroo_report(self, cr, uid, ids, data, report_xml, context=None, output='odt'):
        """ Returns an aeroo report generated with aeroolib
        """
        pool = pooler.get_pool(cr.dbname)
        if not context:
            context = {}
        context = context.copy()
        if self.name == 'report.printscreen.list':
            context['model'] = data['model']
            context['ids'] = ids
        if context.get('report_toprint_id', False):
            ids = [context['report_toprint_id']]

        print_id = context.get('print_id', False)
        aeroo_print = self.active_prints[print_id]  # Aeroo print object
        aeroo_print.subreports = []
        # self.oo_subreports[print_id] = []
        objects = self.getObjects_mod(cr, uid, ids, report_xml.report_type, context) or []
        oo_parser = self.parser(cr, uid, self.name2, context=context)
        oo_parser.localcontext.update(context)
        oo_parser.set_context(objects, data, ids, report_xml.report_type)

        # oo_parser.objects = objects
        self.set_xml_data_fields(objects, oo_parser)  # Get/Set XML

        oo_parser.localcontext['data'] = data
        oo_parser.localcontext['user_lang'] = context.get('lang', False)
        if len(objects) > 0:
            oo_parser.localcontext['o'] = objects[0]
        xfunc = ExtraFunctions(cr, uid, report_xml.id, oo_parser.localcontext)
        oo_parser.localcontext.update(xfunc.functions)

        # company_id = objects and 'company_id' in objects[0]._table._columns.keys() and \
        #                        objects[0].company_id and objects[0].company_id.id or False # for object company usage
        company_id = False
        style_io = self.get_styles_file(cr, uid, report_xml, company=company_id, context=context)

        if report_xml.tml_source in ('file', 'database'):
            if not report_xml.report_sxw_content or report_xml.report_sxw_content == 'False':
                raise osv.except_osv(_('Error!'), _('No template found!'))
            file_data = base64.decodestring(report_xml.report_sxw_content)
        else:
            file_data = self.get_other_template(cr, uid, data, oo_parser)
        if not file_data and not report_xml.report_sxw_content:
            _logger.info(
                "End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time))  # debug mode
            return False, output
        # elif file_data:
        #    template_io = StringIO()
        #    template_io.write(file_data or report_xml.report_sxw_content)
        #    basic = Template(source=template_io, styles=style_io)
        else:
            if report_xml.preload_mode == 'preload' and hasattr(self, 'serializer'):
                serializer = copy.copy(self.serializer)
                serializer.apply_style(style_io)
                template_io = serializer.template
            else:
                template_io = StringIO()
                template_io.write(file_data or base64.decodestring(report_xml.report_sxw_content))
                serializer = OOSerializer(template_io, oo_styles=style_io)
            try:
                basic = Template(source=template_io, serializer=serializer)
            except Exception, e:
                self._raise_exception(e, print_id)

        # if not file_data:
        #    return False, output

        # basic = Template(source=template_io, serializer=serializer)

        aeroo_ooo = context.get('aeroo_ooo', False)
        oo_parser.localcontext['include_subreport'] = self._subreport(cr, uid, aeroo_print, output='odt',
                                                                      aeroo_ooo=aeroo_ooo, context=context)
        oo_parser.localcontext['include_docuement'] = self._include_docuement(aeroo_ooo, print_id)
        deferred = context.get('deferred_process')
        oo_parser.localcontext['progress_update'] = deferred and deferred.progress_update or (lambda: True)
        ####### Add counter functons to localcontext #######
        oo_parser.localcontext.update({'def_inc': self._def_inc(aeroo_print),
                                       'get_inc': self._get_inc(aeroo_print),
                                       'prev': self._prev(aeroo_print),
                                       'next': self._next(aeroo_print)})

        user_name = pool.get('res.users').browse(cr, uid, uid, {}, context).name
        model_ids = pool.get('ir.model').search(cr, uid, [
            ('model', '=', context.get('active_model', data['model']) or data['model'])], context=context)
        if model_ids:
            model_id = model_ids[0]
            model_name = pool.get('ir.model').browse(cr, uid, model_id, context).name
        else:
            model_name = 'ftp'

        # basic = Template(source=None, filepath=odt_path)

        basic.Serializer.add_title(model_name)
        basic.Serializer.add_creation_user(user_name)
        module_info = load_information_from_description_file('report_aeroo')
        version = module_info['version']
        basic.Serializer.add_generator_info('Aeroo Lib/%s Aeroo Reports/%s' % (aeroolib.__version__, version))
        basic.Serializer.add_custom_property('Aeroo Reports %s' % version, 'Generator')
        basic.Serializer.add_custom_property('OpenERP %s' % release.version, 'Software')
        basic.Serializer.add_custom_property(module_info['website'], 'URL')
        basic.Serializer.add_creation_date(time.strftime('%Y-%m-%dT%H:%M:%S'))

        try:
            data = basic.generate(**oo_parser.localcontext).render().getvalue()
        except osv.except_osv, e:
            raise
        except Exception, e:
            self._raise_exception(e, print_id)

        ######### OpenOffice extras #########
        DC = netsvc.Service._services.get('openoffice')
        # if (output!=report_xml.in_format[3:] or self.oo_subreports.get(print_id)):
        if output != report_xml.in_format[3:] or aeroo_print.subreports:
            if aeroo_ooo and DC:
                try:
                    data = self._generate_doc(DC, data, report_xml, print_id)
                except Exception, e:
                    _logger.error(_("OpenOffice.org related error!") + '\n' + str(e))
                    if DC._restart_ooo():
                        # We try again
                        try:
                            data = self._generate_doc(DC, data, report_xml, print_id)
                        except Exception, e:
                            _logger.error(_("OpenOffice.org related error!") + '\n' + str(e))
                            if not report_xml.fallback_false:
                                output = report_xml.in_format[3:]
                    elif not report_xml.fallback_false:
                        output = report_xml.in_format[3:]
                    aeroo_print.subreports = []
            else:
                if report_xml.fallback_false:
                    if not aeroo_ooo:
                        raise osv.except_osv(_('OpenOffice.org related error!'),
                                             _('Module "report_aeroo_ooo" not installed.'))
                    elif not DC:
                        raise osv.except_osv(_('OpenOffice.org related error!'),
                                             _('Can not connect to OpenOffice.org.'))
                else:
                    _logger.warning(_("PDF generator temporarily offline, please wait a minute"))
                    output = report_xml.in_format[3:]
        elif output in ('pdf', 'doc', 'xls'):
            output = report_xml.in_format[3:]
        #####################################

        if report_xml.content_fname:
            output = report_xml.content_fname
            _logger.info(
            "End process %s (%s), elapsed time: %s" % (self.name, self.table, time.time() - aeroo_print.start_time))  # debug mode
        return data, output

Example 92

Project: canvas Source File: compress.py
    def compress(self, log=None, **options):
        """
        Searches templates containing 'compress' nodes and compresses them
        "offline" -- outside of the request/response cycle.

        The result is cached with a cache-key derived from the content of the
        compress nodes (not the content of the possibly linked files!).
        """
        extensions = options.get('extensions')
        extensions = self.handle_extensions(extensions or ['html'])
        verbosity = int(options.get("verbosity", 0))
        if not log:
            log = StringIO()
        if not settings.TEMPLATE_LOADERS:
            raise OfflineGenerationError("No template loaders defined. You "
                                         "must set TEMPLATE_LOADERS in your "
                                         "settings.")
        paths = set()
        for loader in self.get_loaders():
            try:
                module = import_module(loader.__module__)
                get_template_sources = getattr(module,
                    'get_template_sources', None)
                if get_template_sources is None:
                    get_template_sources = loader.get_template_sources
                paths.update(list(get_template_sources('')))
            except (ImportError, AttributeError):
                # Yeah, this didn't work out so well, let's move on
                pass
        if not paths:
            raise OfflineGenerationError("No template paths found. None of "
                                         "the configured template loaders "
                                         "provided template paths. See "
                                         "http://django.me/template-loaders "
                                         "for more information on template "
                                         "loaders.")
        if verbosity > 1:
            log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
        templates = set()
        for path in paths:
            for root, dirs, files in walk(path,
                    followlinks=options.get('followlinks', False)):
                templates.update(os.path.join(root, name)
                    for name in files if not name.startswith('.') and
                        any(fnmatch(name, "*%s" % glob) for glob in extensions))
        if not templates:
            raise OfflineGenerationError("No templates found. Make sure your "
                                         "TEMPLATE_LOADERS and TEMPLATE_DIRS "
                                         "settings are correct.")
        if verbosity > 1:
            log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")

        compressor_nodes = SortedDict()
        for template_name in templates:
            try:
                template_file = open(template_name)
                try:
                    template = Template(template_file.read().decode(
                                        settings.FILE_CHARSET))
                finally:
                    template_file.close()
            except IOError:  # unreadable file -> ignore
                if verbosity > 0:
                    log.write("Unreadable template at: %s\n" % template_name)
                continue
            except TemplateSyntaxError:  # broken template -> try jinja -> ignore if still broken
                try:
                    template_file = open(template_name)
                    template = jinja_env.parse(template_file.read().decode(settings.FILE_CHARSET))
                    template.is_jinja = True
                    template.name = template_name
                except jinja2.exceptions.TemplateSyntaxError:
                    if verbosity > 0:
                        log.write("Invalid template at: %s\n" % template_name)
                    continue
                finally:
                    template_file.close()
            except UnicodeDecodeError:
                if verbosity > 0:
                    log.write("UnicodeDecodeError while trying to read "
                              "template %s\n" % template_name)
            if getattr(template, 'is_jinja', False):
                nodes = template.find_all(jinja2.nodes.CallBlock)
                for node in nodes:
                    try:
                        compress_node = node.call.node
                        if (compress_node.identifier == 'compressor.contrib.jinja2ext.CompressorExtension'
                                and compress_node.name == '_compress'):
                            template.template_name = template_name
                            compressor_nodes.setdefault(template, []).append(node)
                    except AttributeError, IndexError:
                        pass
            else:
                nodes = list(self.walk_nodes(template))
                if nodes:
                    template.template_name = template_name
                    compressor_nodes.setdefault(template, []).extend(nodes)

        if not compressor_nodes:
            raise OfflineGenerationError(
                "No 'compress' template tags found in templates.")

        if verbosity > 0:
            log.write("Found 'compress' tags in:\n\t" +
                      "\n\t".join((t.template_name for t in compressor_nodes.keys())) + "\n")

        log.write("Compressing... ")
        count = 0
        results = []
        offline_manifest = {}
        for template, nodes in compressor_nodes.iteritems():
            if getattr(template, 'is_jinja', False):
                for node in nodes:
                    context = settings.COMPRESS_OFFLINE_CONTEXT.copy()
                    old_forced = getattr(jinja_env, '_django_compressor_offline_forced', None)
                    jinja_env._django_compressor_offline_forced = True
                    nodelist = node.body
                    key = get_offline_jinja_hexdigest(nodelist)
                    result = render_jinja_node(node, context, jinja_env)
                    if old_forced is not None:
                        jinja_env._django_compressor_offline_forced = old_forced
                    offline_manifest[key] = result
                    results.append(result)
                    count += 1
                continue

            context = Context(settings.COMPRESS_OFFLINE_CONTEXT)
            extra_context = {}
            firstnode = template.nodelist[0]
            if isinstance(firstnode, ExtendsNode):
                # If this template has a ExtendsNode, we apply our patch to
                # generate the necessary context, and then use it for all the
                # nodes in it, just in case (we don't know which nodes were
                # in a block)
                firstnode._old_get_parent = firstnode.get_parent
                firstnode.get_parent = MethodType(patched_get_parent, firstnode)
                extra_context = firstnode.render(context)
                context.render_context = extra_context.render_context
            for node in nodes:
                context.push()
                if extra_context and node._block_name:
                    context['block'] = context.render_context[BLOCK_CONTEXT_KEY].pop(node._block_name)
                    if context['block']:
                        context['block'].context = context
                key = get_offline_hexdigest(node.nodelist)
                result = node.render(context, forced=True)
                offline_manifest[key] = result
                context.pop()
                results.append(result)
                count += 1

        write_offline_manifest(offline_manifest)

        log.write("done\nCompressed %d block(s) from %d template(s).\n" %
                  (count, len(compressor_nodes)))
        return count, results

Example 93

Project: mythbox Source File: test_policies.py
    def test_limit(self):
        """
        Full test using a custom server limiting number of connections.
        """
        server = Server()
        c1, c2, c3, c4 = [SimpleProtocol() for i in range(4)]
        tServer = policies.ThrottlingFactory(server, 2)
        wrapTServer = WrappingFactory(tServer)
        wrapTServer.deferred = defer.Deferred()

        # Start listening
        p = reactor.listenTCP(0, wrapTServer, interface="127.0.0.1")
        n = p.getHost().port

        def _connect123(results):
            reactor.connectTCP("127.0.0.1", n, SillyFactory(c1))
            c1.dConnected.addCallback(
                lambda r: reactor.connectTCP("127.0.0.1", n, SillyFactory(c2)))
            c2.dConnected.addCallback(
                lambda r: reactor.connectTCP("127.0.0.1", n, SillyFactory(c3)))
            return c3.dDisconnected

        def _check123(results):
            self.assertEquals([c.connected for c in c1, c2, c3], [1, 1, 1])
            self.assertEquals([c.disconnected for c in c1, c2, c3], [0, 0, 1])
            self.assertEquals(len(tServer.protocols.keys()), 2)
            return results

        def _lose1(results):
            # disconnect one protocol and now another should be able to connect
            c1.transport.loseConnection()
            return c1.dDisconnected

        def _connect4(results):
            reactor.connectTCP("127.0.0.1", n, SillyFactory(c4))
            return c4.dConnected

        def _check4(results):
            self.assertEquals(c4.connected, 1)
            self.assertEquals(c4.disconnected, 0)
            return results

        def _cleanup(results):
            for c in c2, c4:
                c.transport.loseConnection()
            return defer.DeferredList([
                defer.maybeDeferred(p.stopListening),
                c2.dDisconnected,
                c4.dDisconnected])

        wrapTServer.deferred.addCallback(_connect123)
        wrapTServer.deferred.addCallback(_check123)
        wrapTServer.deferred.addCallback(_lose1)
        wrapTServer.deferred.addCallback(_connect4)
        wrapTServer.deferred.addCallback(_check4)
        wrapTServer.deferred.addCallback(_cleanup)
        return wrapTServer.deferred


    def test_writeLimit(self):
        """
        Check the writeLimit parameter: write data, and check for the pause
        status.
        """
        server = Server()
        tServer = TestableThrottlingFactory(task.Clock(), server, writeLimit=10)
        port = tServer.buildProtocol(address.IPv4Address('TCP', '127.0.0.1', 0))
        tr = StringTransportWithDisconnection()
        tr.protocol = port
        port.makeConnection(tr)
        port.producer = port.wrappedProtocol

        port.dataReceived("0123456789")
        port.dataReceived("abcdefghij")
        self.assertEquals(tr.value(), "0123456789abcdefghij")
        self.assertEquals(tServer.writtenThisSecond, 20)
        self.assertFalse(port.wrappedProtocol.paused)

        # at this point server should've written 20 bytes, 10 bytes
        # above the limit so writing should be paused around 1 second
        # from 'now', and resumed a second after that
        tServer.clock.advance(1.05)
        self.assertEquals(tServer.writtenThisSecond, 0)
        self.assertTrue(port.wrappedProtocol.paused)

        tServer.clock.advance(1.05)
        self.assertEquals(tServer.writtenThisSecond, 0)
        self.assertFalse(port.wrappedProtocol.paused)


    def test_readLimit(self):
        """
        Check the readLimit parameter: read data and check for the pause
        status.
        """
        server = Server()
        tServer = TestableThrottlingFactory(task.Clock(), server, readLimit=10)
        port = tServer.buildProtocol(address.IPv4Address('TCP', '127.0.0.1', 0))
        tr = StringTransportWithDisconnection()
        tr.protocol = port
        port.makeConnection(tr)

        port.dataReceived("0123456789")
        port.dataReceived("abcdefghij")
        self.assertEquals(tr.value(), "0123456789abcdefghij")
        self.assertEquals(tServer.readThisSecond, 20)

        tServer.clock.advance(1.05)
        self.assertEquals(tServer.readThisSecond, 0)
        self.assertEquals(tr.producerState, 'paused')

        tServer.clock.advance(1.05)
        self.assertEquals(tServer.readThisSecond, 0)
        self.assertEquals(tr.producerState, 'producing')

        tr.clear()
        port.dataReceived("0123456789")
        port.dataReceived("abcdefghij")
        self.assertEquals(tr.value(), "0123456789abcdefghij")
        self.assertEquals(tServer.readThisSecond, 20)

        tServer.clock.advance(1.05)
        self.assertEquals(tServer.readThisSecond, 0)
        self.assertEquals(tr.producerState, 'paused')

        tServer.clock.advance(1.05)
        self.assertEquals(tServer.readThisSecond, 0)
        self.assertEquals(tr.producerState, 'producing')



class TimeoutTestCase(unittest.TestCase):
    """
    Tests for L{policies.TimeoutFactory}.
    """

    def setUp(self):
        """
        Create a testable, deterministic clock, and a set of
        server factory/protocol/transport.
        """
        self.clock = task.Clock()
        wrappedFactory = protocol.ServerFactory()
        wrappedFactory.protocol = SimpleProtocol
        self.factory = TestableTimeoutFactory(self.clock, wrappedFactory, 3)
        self.proto = self.factory.buildProtocol(
            address.IPv4Address('TCP', '127.0.0.1', 12345))
        self.transport = StringTransportWithDisconnection()
        self.transport.protocol = self.proto
        self.proto.makeConnection(self.transport)


    def test_timeout(self):
        """
        Make sure that when a TimeoutFactory accepts a connection, it will
        time out that connection if no data is read or written within the
        timeout period.
        """
        # Let almost 3 time units pass
        self.clock.pump([0.0, 0.5, 1.0, 1.0, 0.4])
        self.failIf(self.proto.wrappedProtocol.disconnected)

        # Now let the timer elapse
        self.clock.pump([0.0, 0.2])
        self.failUnless(self.proto.wrappedProtocol.disconnected)


    def test_sendAvoidsTimeout(self):
        """
        Make sure that writing data to a transport from a protocol
        constructed by a TimeoutFactory resets the timeout countdown.
        """
        # Let half the countdown period elapse
        self.clock.pump([0.0, 0.5, 1.0])
        self.failIf(self.proto.wrappedProtocol.disconnected)

        # Send some data (self.proto is the /real/ proto's transport, so this
        # is the write that gets called)
        self.proto.write('bytes bytes bytes')

        # More time passes, putting us past the original timeout
        self.clock.pump([0.0, 1.0, 1.0])
        self.failIf(self.proto.wrappedProtocol.disconnected)

        # Make sure writeSequence delays timeout as well
        self.proto.writeSequence(['bytes'] * 3)

        # Tick tock
        self.clock.pump([0.0, 1.0, 1.0])
        self.failIf(self.proto.wrappedProtocol.disconnected)

        # Don't write anything more, just let the timeout expire
        self.clock.pump([0.0, 2.0])
        self.failUnless(self.proto.wrappedProtocol.disconnected)


    def test_receiveAvoidsTimeout(self):
        """
        Make sure that receiving data also resets the timeout countdown.
        """
        # Let half the countdown period elapse
        self.clock.pump([0.0, 1.0, 0.5])
        self.failIf(self.proto.wrappedProtocol.disconnected)

        # Some bytes arrive, they should reset the counter
        self.proto.dataReceived('bytes bytes bytes')

        # We pass the original timeout
        self.clock.pump([0.0, 1.0, 1.0])
        self.failIf(self.proto.wrappedProtocol.disconnected)

        # Nothing more arrives though, the new timeout deadline is passed,
        # the connection should be dropped.
        self.clock.pump([0.0, 1.0, 1.0])
        self.failUnless(self.proto.wrappedProtocol.disconnected)



class TimeoutTester(protocol.Protocol, policies.TimeoutMixin):
    """
    A testable protocol with timeout facility.

    @ivar timedOut: set to C{True} if a timeout has been detected.
    @type timedOut: C{bool}
    """
    timeOut  = 3
    timedOut = False

    def __init__(self, clock):
        """
        Initialize the protocol with a C{task.Clock} object.
        """
        self.clock = clock


    def connectionMade(self):
        """
        Upon connection, set the timeout.
        """
        self.setTimeout(self.timeOut)


    def dataReceived(self, data):
        """
        Reset the timeout on data.
        """
        self.resetTimeout()
        protocol.Protocol.dataReceived(self, data)


    def connectionLost(self, reason=None):
        """
        On connection lost, cancel all timeout operations.
        """
        self.setTimeout(None)


    def timeoutConnection(self):
        """
        Flags the timedOut variable to indicate the timeout of the connection.
        """
        self.timedOut = True


    def callLater(self, timeout, func, *args, **kwargs):
        """
        Override callLater to use the deterministic clock.
        """
        return self.clock.callLater(timeout, func, *args, **kwargs)



class TestTimeout(unittest.TestCase):
    """
    Tests for L{policies.TimeoutMixin}.
    """

    def setUp(self):
        """
        Create a testable, deterministic clock and a C{TimeoutTester} instance.
        """
        self.clock = task.Clock()
        self.proto = TimeoutTester(self.clock)


    def test_overriddenCallLater(self):
        """
        Test that the callLater of the clock is used instead of
        C{reactor.callLater}.
        """
        self.proto.setTimeout(10)
        self.assertEquals(len(self.clock.calls), 1)


    def test_timeout(self):
        """
        Check that the protocol does timeout at the time specified by its
        C{timeOut} attribute.
        """
        self.proto.makeConnection(StringTransport())

        # timeOut value is 3
        self.clock.pump([0, 0.5, 1.0, 1.0])
        self.failIf(self.proto.timedOut)
        self.clock.pump([0, 1.0])
        self.failUnless(self.proto.timedOut)


    def test_noTimeout(self):
        """
        Check that receiving data is delaying the timeout of the connection.
        """
        self.proto.makeConnection(StringTransport())

        self.clock.pump([0, 0.5, 1.0, 1.0])
        self.failIf(self.proto.timedOut)
        self.proto.dataReceived('hello there')
        self.clock.pump([0, 1.0, 1.0, 0.5])
        self.failIf(self.proto.timedOut)
        self.clock.pump([0, 1.0])
        self.failUnless(self.proto.timedOut)


    def test_resetTimeout(self):
        """
        Check that setting a new value for timeout cancel the previous value
        and install a new timeout.
        """
        self.proto.timeOut = None
        self.proto.makeConnection(StringTransport())

        self.proto.setTimeout(1)
        self.assertEquals(self.proto.timeOut, 1)

        self.clock.pump([0, 0.9])
        self.failIf(self.proto.timedOut)
        self.clock.pump([0, 0.2])
        self.failUnless(self.proto.timedOut)


    def test_cancelTimeout(self):
        """
        Setting the timeout to C{None} cancel any timeout operations.
        """
        self.proto.timeOut = 5
        self.proto.makeConnection(StringTransport())

        self.proto.setTimeout(None)
        self.assertEquals(self.proto.timeOut, None)

        self.clock.pump([0, 5, 5, 5])
        self.failIf(self.proto.timedOut)


    def test_return(self):
        """
        setTimeout should return the value of the previous timeout.
        """
        self.proto.timeOut = 5

        self.assertEquals(self.proto.setTimeout(10), 5)
        self.assertEquals(self.proto.setTimeout(None), 10)
        self.assertEquals(self.proto.setTimeout(1), None)
        self.assertEquals(self.proto.timeOut, 1)

        # Clean up the DelayedCall
        self.proto.setTimeout(None)



class LimitTotalConnectionsFactoryTestCase(unittest.TestCase):
    """Tests for policies.LimitTotalConnectionsFactory"""
    def testConnectionCounting(self):
        # Make a basic factory
        factory = policies.LimitTotalConnectionsFactory()
        factory.protocol = protocol.Protocol

        # connectionCount starts at zero
        self.assertEqual(0, factory.connectionCount)

        # connectionCount increments as connections are made
        p1 = factory.buildProtocol(None)
        self.assertEqual(1, factory.connectionCount)
        p2 = factory.buildProtocol(None)
        self.assertEqual(2, factory.connectionCount)

        # and decrements as they are lost
        p1.connectionLost(None)
        self.assertEqual(1, factory.connectionCount)
        p2.connectionLost(None)
        self.assertEqual(0, factory.connectionCount)

    def testConnectionLimiting(self):
        # Make a basic factory with a connection limit of 1
        factory = policies.LimitTotalConnectionsFactory()
        factory.protocol = protocol.Protocol
        factory.connectionLimit = 1

        # Make a connection
        p = factory.buildProtocol(None)
        self.assertNotEqual(None, p)
        self.assertEqual(1, factory.connectionCount)

        # Try to make a second connection, which will exceed the connection
        # limit.  This should return None, because overflowProtocol is None.
        self.assertEqual(None, factory.buildProtocol(None))
        self.assertEqual(1, factory.connectionCount)

        # Define an overflow protocol
        class OverflowProtocol(protocol.Protocol):
            def connectionMade(self):
                factory.overflowed = True
        factory.overflowProtocol = OverflowProtocol
        factory.overflowed = False

        # Try to make a second connection again, now that we have an overflow
        # protocol.  Note that overflow connections count towards the connection
        # count.
        op = factory.buildProtocol(None)
        op.makeConnection(None) # to trigger connectionMade
        self.assertEqual(True, factory.overflowed)
        self.assertEqual(2, factory.connectionCount)

        # Close the connections.
        p.connectionLost(None)
        self.assertEqual(1, factory.connectionCount)
        op.connectionLost(None)
        self.assertEqual(0, factory.connectionCount)


class WriteSequenceEchoProtocol(EchoProtocol):
    def dataReceived(self, bytes):
        if bytes.find('vector!') != -1:
            self.transport.writeSequence([bytes])
        else:
            EchoProtocol.dataReceived(self, bytes)

class TestLoggingFactory(policies.TrafficLoggingFactory):
    openFile = None
    def open(self, name):
        assert self.openFile is None, "open() called too many times"
        self.openFile = StringIO()
        return self.openFile



class LoggingFactoryTestCase(unittest.TestCase):
    """
    Tests for L{policies.TrafficLoggingFactory}.
    """

    def test_thingsGetLogged(self):
        """
        Check the output produced by L{policies.TrafficLoggingFactory}.
        """
        wrappedFactory = Server()
        wrappedFactory.protocol = WriteSequenceEchoProtocol
        t = StringTransportWithDisconnection()
        f = TestLoggingFactory(wrappedFactory, 'test')
        p = f.buildProtocol(('1.2.3.4', 5678))
        t.protocol = p
        p.makeConnection(t)

        v = f.openFile.getvalue()
        self.failUnless('*' in v, "* not found in %r" % (v,))
        self.failIf(t.value())

        p.dataReceived('here are some bytes')

        v = f.openFile.getvalue()
        self.assertIn("C 1: 'here are some bytes'", v)
        self.assertIn("S 1: 'here are some bytes'", v)
        self.assertEquals(t.value(), 'here are some bytes')

        t.clear()
        p.dataReceived('prepare for vector! to the extreme')
        v = f.openFile.getvalue()
        self.assertIn("SV 1: ['prepare for vector! to the extreme']", v)
        self.assertEquals(t.value(), 'prepare for vector! to the extreme')

        p.loseConnection()

        v = f.openFile.getvalue()
        self.assertIn('ConnectionDone', v)


    def test_counter(self):
        """
        Test counter management with the resetCounter method.
        """
        wrappedFactory = Server()
        f = TestLoggingFactory(wrappedFactory, 'test')
        self.assertEqual(f._counter, 0)
        f.buildProtocol(('1.2.3.4', 5678))
        self.assertEqual(f._counter, 1)
        # Reset log file
        f.openFile = None
        f.buildProtocol(('1.2.3.4', 5679))
        self.assertEqual(f._counter, 2)

        f.resetCounter()
        self.assertEqual(f._counter, 0)

Example 94

Project: f5-ansible Source File: bigip_ssl_certificate.py
    def present(self):
        current = self.read()
        changed = False
        do_key = False
        do_cert = False
        chash = None
        khash = None

        check_mode = self.params['check_mode']
        name = self.params['name']
        partition = self.params['partition']
        cert_content = self.params['cert_content']
        key_content = self.params['key_content']
        passphrase = self.params['passphrase']

        # Technically you dont need to provide us with anything in the form
        # of content for your cert, but that's kind of illogical, so we just
        # return saying you didn't "do" anything if you left the cert and keys
        # empty.
        if not cert_content and not key_content:
            return False

        if key_content is not None:
            if 'key_checksum' in current:
                khash = self.get_hash(key_content)
                if khash not in current['key_checksum']:
                    do_key = "update"
            else:
                do_key = "create"

        if cert_content is not None:
            if 'cert_checksum' in current:
                chash = self.get_hash(cert_content)
                if chash not in current['cert_checksum']:
                    do_cert = "update"
            else:
                do_cert = "create"

        if do_cert or do_key:
            changed = True
            params = dict()
            params['cert_name'] = name
            params['key_name'] = name
            params['partition'] = partition
            if khash:
                params['key_checksum'] = khash
            if chash:
                params['cert_checksum'] = chash
            self.cparams = params

            if check_mode:
                return changed

        if not do_cert and not do_key:
            return False

        tx = self.api.tm.transactions.transaction
        with TransactionContextManager(tx) as api:
            if do_cert:
                # Upload the content of a certificate as a StringIO object
                cstring = StringIO.StringIO(cert_content)
                filename = "%s.crt" % (name)
                filepath = os.path.join(self.dlpath, filename)
                api.shared.file_transfer.uploads.upload_stringio(
                    cstring,
                    filename
                )

            if do_cert == "update":
                # Install the certificate
                params = {
                    'name': name,
                    'partition': partition
                }
                cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)

                # This works because, while the source path is the same,
                # calling update causes the file to be re-read
                cert.update()
                changed = True
            elif do_cert == "create":
                # Install the certificate
                params = {
                    'sourcePath': "file://" + filepath,
                    'name': name,
                    'partition': partition
                }
                api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
                changed = True

            if do_key:
                # Upload the content of a certificate key as a StringIO object
                kstring = StringIO.StringIO(key_content)
                filename = "%s.key" % (name)
                filepath = os.path.join(self.dlpath, filename)
                api.shared.file_transfer.uploads.upload_stringio(
                    kstring,
                    filename
                )

            if do_key == "update":
                # Install the key
                params = {
                    'name': name,
                    'partition': partition
                }
                key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)

                params = dict()

                if passphrase:
                    params['passphrase'] = passphrase
                else:
                    params['passphrase'] = None

                key.update(**params)
                changed = True
            elif do_key == "create":
                # Install the key
                params = {
                    'sourcePath': "file://" + filepath,
                    'name': name,
                    'partition': partition
                }
                if passphrase:
                    params['passphrase'] = self.params['passphrase']
                else:
                    params['passphrase'] = None

                api.tm.sys.file.ssl_keys.ssl_key.create(**params)
                changed = True
        return changed

Example 95

Project: github-africa Source File: step3_extend_users.py
def extend_user(user):

    print(user.get('username'))

    def get_activity_from_html(username):
        r = requests.get('https://github.com/%s' % username,
                         headers=headers, auth=TOKEN_AUTH)

        if r.status_code == 404:
            return None

        parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
        dom = parser.parse(StringIO.StringIO(r.content))
        divs = dom.getElementsByTagName('div')

        contrib_columns = [d for d in divs
                           if 'contrib-column' in
                           d.getAttribute('class')]

        if not len(contrib_columns):
            return {'contrib_total_num': 0,
                    'contrib_total_start': None,
                    'contrib_total_end': None,
                    'contrib_long_num': 0,
                    'contrib_long_start': None,
                    'contrib_long_end': None}

        total_str = getElementsByClassName(
            contrib_columns[0], "span",
            "contrib-number")[0].firstChild.nodeValue
        # logger.debug("total_str: {}".format(total_str))
        total_dates_dom = getElementsByClassName(
            contrib_columns[0], "span", "text-muted")[1]
        total_dates = "".join([n.nodeValue
                               for n in total_dates_dom.childNodes])
        # logger.debug("total_dates: {}".format(total_dates))

        total_start = du_parser.parse(total_dates.split(u'–')[0])
        total_end = du_parser.parse(total_dates.split(u'–')[1])
        # logger.debug("total_start: {}".format(total_start))
        # logger.debug("total_end: {}".format(total_end))

        long_str = getElementsByClassName(
            contrib_columns[1], "span",
            "contrib-number")[0].firstChild.nodeValue
        # logger.debug("long_str: {}".format(long_str))
        long_dates_dom = getElementsByClassName(
            contrib_columns[1], "span", "text-muted")[1]
        long_dates = "".join([n.nodeValue
                              for n in long_dates_dom.childNodes])
        # logger.debug("total_dates: {}".format(total_dates))
        # logger.debug("long_dates: {}".format(long_dates))

        if long_dates == "No recent contributions":
            long_start = None
            long_end = None
        else:
            long_start = du_parser.parse(long_dates.split(u'–')[0].strip())
            if long_start.year > total_end.year:
                long_start = datetime(long_start.year - 1,
                                      long_start.month, long_start.year.day)
            long_end = du_parser.parse(long_dates.split(u'–')[1].strip())
            if long_end.year > total_end.year:
                long_end = datetime(long_end.year - 1, long_end.month,
                                    long_end.year.day)

        return {
            'contrib_total_num': int(total_str.split()[0].replace(',', '')),
            'contrib_total_start': total_start.isoformat(),
            'contrib_total_end': total_end.isoformat(),
            'contrib_long_num': int(long_str.split()[0].replace(',', '')),
            'contrib_long_start':
                long_start.isoformat() if long_start is not None else None,
            'contrib_long_end':
                long_end.isoformat() if long_end is not None else None}

    def get_profile(user):
        r = requests.get(
            'https://api.github.com/users/%s' % user.get('username'),
            headers=headers, auth=TOKEN_AUTH)

        check_limits(r.headers)

        nd = {}
        data = json.loads(r.content)
        for col in data.keys():
            if 'url' in col and not col == 'avatar_url':
                continue
            if col in user.keys():
                continue
            nd.update({col: data[col]})
        return nd

    def get_orgs(username):
        orgs = {}
        r = requests.get('https://api.github.com/users/%s/orgs' % username,
                         headers=headers, auth=TOKEN_AUTH)

        check_limits(r.headers)

        data = json.loads(r.content)

        orgs.update({'orgs_num': len(data)})
        for i, org in enumerate(data):
            org_name = org.get('login')
            prefix = 'org%d_' % i
            rorg = requests.get('https://api.github.com/orgs/%s' % org_name,
                                headers=headers, auth=TOKEN_AUTH)

            check_limits(rorg.headers)

            data_org = json.loads(rorg.content)
            nd = {}
            for col in data_org.keys():
                if 'url' in col and not col == 'avatar_url':
                    continue
                nd.update({prefix + col: data_org[col]})
            orgs.update(nd)
        return orgs

    try:
        acitiviy = get_activity_from_html(user.get('username'))
    except Exception as e:
        logger.exception(e)
        raise
        acitiviy = {}
    from pprint import pprint as pp ; pp(acitiviy)

    if acitiviy is None:
        return None

    profile = get_profile(user)

    orgs = get_orgs(user.get('username'))

    user.update(acitiviy)
    user.update(profile)
    user.update(orgs)

    return user

Example 96

Project: ConfigArgParse Source File: configargparse.py
    def parse_known_args(self, args = None, namespace = None,
                         config_file_contents = None, env_vars = os.environ):
        """Supports all the same args as the ArgumentParser.parse_args(..),
        as well as the following additional args.

        Additional Args:
            args: a list of args as in argparse, or a string (eg. "-x -y bla")
            config_file_contents: String. Used for testing.
            env_vars: Dictionary. Used for testing.
        """
        if args is None:
            args = sys.argv[1:]
        elif type(args) == str:
            args = args.split()
        else:
            args = list(args)

        # normalize args by converting args like --key=value to --key value
        normalized_args = list()
        for arg in args:
            if arg and arg[0] in self.prefix_chars and '=' in arg:
                key, value =  arg.split('=', 1)
                normalized_args.append(key)
                normalized_args.append(value)
            else:
                normalized_args.append(arg)
        args = normalized_args

        for a in self._actions:
            a.is_positional_arg = not a.option_strings

        # maps a string describing the source (eg. env var) to a settings dict
        # to keep track of where values came from (used by print_values()).
        # The settings dicts for env vars and config files will then map
        # the config key to an (argparse Action obj, string value) 2-tuple.
        self._source_to_settings = OrderedDict()
        if args:
            a_v_pair = (None, list(args))  # copy args list to isolate changes
            self._source_to_settings[_COMMAND_LINE_SOURCE_KEY] = {'': a_v_pair}

        # handle auto_env_var_prefix __init__ arg by setting a.env_var as needed
        if self._auto_env_var_prefix is not None:
            for a in self._actions:
                config_file_keys = self.get_possible_config_keys(a)
                if config_file_keys and not (a.env_var or a.is_positional_arg
                    or a.is_config_file_arg or a.is_write_out_config_file_arg or
                    type(a) == argparse._HelpAction):
                    stripped_config_file_key = config_file_keys[0].strip(
                        self.prefix_chars)
                    a.env_var = (self._auto_env_var_prefix +
                                 stripped_config_file_key).replace('-', '_').upper()

        # add env var settings to the commandline that aren't there already
        env_var_args = []
        actions_with_env_var_values = [a for a in self._actions
            if not a.is_positional_arg and a.env_var and a.env_var in env_vars
                and not already_on_command_line(args, a.option_strings)]
        for action in actions_with_env_var_values:
            key = action.env_var
            value = env_vars[key]  # TODO parse env var values here to allow lists?
            env_var_args += self.convert_item_to_command_line_arg(
                action, key, value)

        args = env_var_args + args

        if env_var_args:
            self._source_to_settings[_ENV_VAR_SOURCE_KEY] = OrderedDict(
                [(a.env_var, (a, env_vars[a.env_var]))
                    for a in actions_with_env_var_values])


        # before parsing any config files, check if -h was specified.
        supports_help_arg = any(
            a for a in self._actions if type(a) == argparse._HelpAction)
        skip_config_file_parsing = supports_help_arg and (
            "-h" in args or "--help" in args)

        # prepare for reading config file(s)
        known_config_keys = dict((config_key, action) for action in self._actions
            for config_key in self.get_possible_config_keys(action))

        # open the config file(s)
        config_streams = []
        if config_file_contents:
            stream = StringIO(config_file_contents)
            stream.name = "method arg"
            config_streams = [stream]
        elif not skip_config_file_parsing:
            config_streams = self._open_config_files(args)

        # parse each config file
        for stream in reversed(config_streams):
            try:
                config_items = self._config_file_parser.parse(stream)
            except ConfigFileParserException as e:
                self.error(e)
            finally:
                if hasattr(stream, "close"):
                    stream.close()

            # add each config item to the commandline unless it's there already
            config_args = []
            for key, value in config_items.items():
                if key in known_config_keys:
                    action = known_config_keys[key]
                    discard_this_key = already_on_command_line(
                        args, action.option_strings)
                else:
                    action = None
                    discard_this_key = self._ignore_unknown_config_file_keys or \
                        already_on_command_line(
                            args,
                            self.get_command_line_key_for_unknown_config_file_setting(key))

                if not discard_this_key:
                    config_args += self.convert_item_to_command_line_arg(
                        action, key, value)
                    source_key = "%s|%s" %(_CONFIG_FILE_SOURCE_KEY, stream.name)
                    if source_key not in self._source_to_settings:
                        self._source_to_settings[source_key] = OrderedDict()
                    self._source_to_settings[source_key][key] = (action, value)

            args = config_args + args


        # save default settings for use by print_values()
        default_settings = OrderedDict()
        for action in self._actions:
            cares_about_default_value = (not action.is_positional_arg or
                action.nargs in [OPTIONAL, ZERO_OR_MORE])
            if (already_on_command_line(args, action.option_strings) or
                    not cares_about_default_value or
                    action.default is None or
                    action.default == SUPPRESS or
                    type(action) in ACTION_TYPES_THAT_DONT_NEED_A_VALUE):
                continue
            else:
                if action.option_strings:
                    key = action.option_strings[-1]
                else:
                    key = action.dest
                default_settings[key] = (action, str(action.default))

        if default_settings:
            self._source_to_settings[_DEFAULTS_SOURCE_KEY] = default_settings

        # parse all args (including commandline, config file, and env var)
        namespace, unknown_args = argparse.ArgumentParser.parse_known_args(
            self, args=args, namespace=namespace)
        # handle any args that have is_write_out_config_file_arg set to true
        user_write_out_config_file_arg_actions = [a for a in self._actions
            if getattr(a, "is_write_out_config_file_arg", False)]
        if user_write_out_config_file_arg_actions:
            output_file_paths = []
            for action in user_write_out_config_file_arg_actions:
                # check if the user specified this arg on the commandline
                output_file_path = getattr(namespace, action.dest, None)
                if output_file_path:
                    # validate the output file path
                    try:
                        with open(output_file_path, "w") as output_file:
                            output_file_paths.append(output_file_path)
                    except IOError as e:
                        raise ValueError("Couldn't open %s for writing: %s" % (
                            output_file_path, e))

            if output_file_paths:
                # generate the config file contents
                config_items = self.get_items_for_config_file_output(
                    self._source_to_settings, namespace)
                file_contents = self._config_file_parser.serialize(config_items)
                for output_file_path in output_file_paths:
                    with open(output_file_path, "w") as output_file:
                        output_file.write(file_contents)
                if len(output_file_paths) == 1:
                    output_file_paths = output_file_paths[0]
                self.exit(0, "Wrote config file to " + str(output_file_paths))
        return namespace, unknown_args

Example 97

Project: miasm Source File: gdbserver.py
    def process_messages(self):

        while self.recv_queue:
            msg = self.recv_queue.pop(0)
            buf = StringIO(msg)
            msg_type = buf.read(1)

            self.send_queue.append("+")

            if msg_type == "q":
                if msg.startswith("qSupported"):
                    self.send_queue.append("PacketSize=3fff")
                elif msg.startswith("qC"):
                    # Current thread
                    self.send_queue.append("")
                elif msg.startswith("qAttached"):
                    # Not supported
                    self.send_queue.append("")
                elif msg.startswith("qTStatus"):
                    # Not supported
                    self.send_queue.append("")
                elif msg.startswith("qfThreadInfo"):
                    # Not supported
                    self.send_queue.append("")
                else:
                    raise NotImplementedError()

            elif msg_type == "H":
                # Set current thread
                self.send_queue.append("OK")

            elif msg_type == "?":
                # Report why the target halted
                self.send_queue.append(self.status)  # TRAP signal

            elif msg_type == "g":
                # Report all general register values
                self.send_queue.append(self.report_general_register_values())

            elif msg_type == "p":
                # Read a specific register
                reg_num = int(buf.read(), 16)
                self.send_queue.append(self.read_register(reg_num))

            elif msg_type == "P":
                # Set a specific register
                reg_num, value = buf.read().split("=")
                reg_num = int(reg_num, 16)
                value = int(value.decode("hex")[::-1].encode("hex"), 16)
                self.set_register(reg_num, value)
                self.send_queue.append("OK")

            elif msg_type == "m":
                # Read memory
                addr, size = map(lambda x: int(x, 16), buf.read().split(","))
                self.send_queue.append(self.read_memory(addr, size))

            elif msg_type == "k":
                # Kill
                self.sock.close()
                self.send_queue = []
                self.sock = None

            elif msg_type == "!":
                # Extending debugging will be used
                self.send_queue.append("OK")

            elif msg_type == "v":
                if msg == "vCont?":
                    # Is vCont supported ?
                    self.send_queue.append("")

            elif msg_type == "s":
                # Step
                self.dbg.step()
                self.send_queue.append("S05")  # TRAP signal

            elif msg_type == "Z":
                # Add breakpoint or watchpoint
                bp_type = buf.read(1)
                if bp_type == "0":
                    # Exec breakpoint
                    assert(buf.read(1) == ",")
                    addr, size = map(
                        lambda x: int(x, 16), buf.read().split(","))

                    if size != 1:
                        raise NotImplementedError("Bigger size")
                    self.dbg.add_breakpoint(addr)
                    self.send_queue.append("OK")

                elif bp_type == "1":
                    # Hardware BP
                    assert(buf.read(1) == ",")
                    addr, size = map(
                        lambda x: int(x, 16), buf.read().split(","))

                    self.dbg.add_memory_breakpoint(addr, size,
                                                   read=True,
                                                   write=True)
                    self.send_queue.append("OK")

                elif bp_type in ["2", "3", "4"]:
                    # Memory breakpoint
                    assert(buf.read(1) == ",")
                    read = bp_type in ["3", "4"]
                    write = bp_type in ["2", "4"]
                    addr, size = map(
                        lambda x: int(x, 16), buf.read().split(","))

                    self.dbg.add_memory_breakpoint(addr, size,
                                                   read=read,
                                                   write=write)
                    self.send_queue.append("OK")

                else:
                    raise ValueError("Impossible value")

            elif msg_type == "z":
                # Remove breakpoint or watchpoint
                bp_type = buf.read(1)
                if bp_type == "0":
                    # Exec breakpoint
                    assert(buf.read(1) == ",")
                    addr, size = map(
                        lambda x: int(x, 16), buf.read().split(","))

                    if size != 1:
                        raise NotImplementedError("Bigger size")
                    dbgsoft = self.dbg.get_breakpoint_by_addr(addr)
                    assert(len(dbgsoft) == 1)
                    self.dbg.remove_breakpoint(dbgsoft[0])
                    self.send_queue.append("OK")

                elif bp_type == "1":
                    # Hardware BP
                    assert(buf.read(1) == ",")
                    addr, size = map(
                        lambda x: int(x, 16), buf.read().split(","))
                    self.dbg.remove_memory_breakpoint_by_addr_access(
                        addr, read=True, write=True)
                    self.send_queue.append("OK")

                elif bp_type in ["2", "3", "4"]:
                    # Memory breakpoint
                    assert(buf.read(1) == ",")
                    read = bp_type in ["3", "4"]
                    write = bp_type in ["2", "4"]
                    addr, size = map(
                        lambda x: int(x, 16), buf.read().split(","))

                    self.dbg.remove_memory_breakpoint_by_addr_access(
                        addr, read=read, write=write)
                    self.send_queue.append("OK")

                else:
                    raise ValueError("Impossible value")

            elif msg_type == "c":
                # Continue
                self.status = ""
                self.send_messages()
                ret = self.dbg.run()
                if isinstance(ret, debugging.DebugBreakpointSoft):
                    self.status = "S05"
                    self.send_queue.append("S05")  # TRAP signal
                elif isinstance(ret, ExceptionHandle):
                    if ret == ExceptionHandle.memoryBreakpoint():
                        self.status = "S05"
                        self.send_queue.append("S05")
                    else:
                        raise NotImplementedError("Unknown Except")
                elif isinstance(ret, debugging.DebugBreakpointTerminate):
                    # Connexion should close, but keep it running as a TRAP
                    # The connexion will be close on instance destruction
                    print ret
                    self.status = "S05"
                    self.send_queue.append("S05")
                else:
                    raise NotImplementedError()

            else:
                raise NotImplementedError(
                    "Not implemented: message type '%s'" % msg_type)

Example 98

Project: EmPyre Source File: agent.py
def processPacket(taskingID, data):

    try:
        taskingID = int(taskingID)
    except Exception as e:
        return None

    if taskingID == 1:
        # sysinfo request
        # get_sysinfo should be exposed from stager.py
        return encodePacket(1, get_sysinfo())

    elif taskingID == 2:
        # agent exit

        msg = "[!] Agent %s exiting" %(sessionID)
        sendMessage(encodePacket(2, msg))
        agent_exit()

    elif taskingID == 40:
        # run a command
        resultData = str(run_command(data))
        return encodePacket(40, resultData)

    elif taskingID == 41:
        # file download

        filePath = os.path.abspath(data)
        if not os.path.exists(filePath):
            return encodePacket(40, "file does not exist or cannot be accessed")

        offset = 0
        size = os.path.getsize(filePath)
        partIndex = 0

        while True:

            # get 512kb of the given file starting at the specified offset
            encodedPart = get_file_part(filePath, offset=offset, base64=False)
            c = compress()
            start_crc32 = c.crc32_data(encodedPart)
            comp_data = c.comp_data(encodedPart)
            encodedPart = c.build_header(comp_data, start_crc32)
            encodedPart = base64.b64encode(encodedPart)

            partData = "%s|%s|%s" %(partIndex, filePath, encodedPart)
            if not encodedPart or encodedPart == '' or len(encodedPart) == 16:
                break

            sendMessage(encodePacket(41, partData))

            global delay
            global jitter
            if jitter < 0: jitter = -jitter
            if jitter > 1: jitter = 1/jitter

            minSleep = int((1.0-jitter)*delay)
            maxSleep = int((1.0+jitter)*delay)
            sleepTime = random.randint(minSleep, maxSleep)
            time.sleep(sleepTime)
            partIndex += 1
            offset += 5120000

    elif taskingID == 42:
        # file upload
        try:
            parts = data.split("|")
            filePath = parts[0]
            base64part = parts[1]
            raw = base64.b64decode(base64part)
            d = decompress()
            dec_data = d.dec_data(raw, cheader=True)
            if not dec_data['crc32_check']:
                sendMessage(encodePacket(0, "[!] WARNING: File upload failed crc32 check during decompressing!."))
                sendMessage(encodePacket(0, "[!] HEADER: Start crc32: %s -- Received crc32: %s -- Crc32 pass: %s!." %(dec_data['header_crc32'],dec_data['dec_crc32'],dec_data['crc32_check'])))
            f = open(filePath, 'ab')
            f.write(dec_data['data'])
            f.close()

            sendMessage(encodePacket(42, "[*] Upload of %s successful" %(filePath) ))
        except Exception as e:
            sendec_datadMessage(encodePacket(0, "[!] Error in writing file %s during upload: %s" %(filePath, str(e)) ))

    elif taskingID == 50:
        # return the currently running jobs
        msg = ""
        if len(jobs) == 0:
            msg = "No active jobs"
        else:
            msg = "Active jobs:\n"
            for x in xrange(len(jobs)):
                msg += "\t%s" %(x)
        return encodePacket(50, msg)

    elif taskingID == 51:
        # stop and remove a specified job if it's running
        try:
            # Calling join first seems to hang
            # result = jobs[int(data)].join()
            sendMessage(encodePacket(0, "[*] Attempting to stop job thread"))
            result = jobs[int(data)].kill()
            sendMessage(encodePacket(0, "[*] Job thread stoped!"))
            jobs[int(data)]._Thread__stop()
            jobs.pop(int(data))
            if result and result != "":
                sendMessage(encodePacket(51, result))
        except:
            return encodePacket(0, "error stopping job: %s" %(data))

    elif taskingID == 100:
        # dynamic code execution, wait for output, don't save outputPicl
        try:
            buffer = StringIO()
            sys.stdout = buffer
            code_obj = compile(data, '<string>', 'exec')
            exec code_obj in globals()
            sys.stdout = sys.__stdout__
            results = buffer.getvalue()
            return encodePacket(100, str(results))
        except Exception as e:
            errorData = str(buffer.getvalue())
            return encodePacket(0, "error executing specified Python data: %s \nBuffer data recovered:\n%s" %(e, errorData))

    elif taskingID == 101:
        # dynamic code execution, wait for output, save output
        prefix = data[0:15].strip()
        extension = data[15:20].strip()
        data = data[20:]
        try:
            buffer = StringIO()
            sys.stdout = buffer
            code_obj = compile(data, '<string>', 'exec')
            exec code_obj in globals()
            sys.stdout = sys.__stdout__
            c = compress()
            start_crc32 = c.crc32_data(buffer.getvalue())
            comp_data = c.comp_data(buffer.getvalue())
            encodedPart = c.build_header(comp_data, start_crc32)
            encodedPart = base64.b64encode(encodedPart)
            return encodePacket(101, '{0: <15}'.format(prefix) + '{0: <5}'.format(extension) + encodedPart )
        except Exception as e:
            # Also return partial code that has been executed
            errorData = str(buffer.getvalue())
            return encodePacket(0, "error executing specified Python data %s \nBuffer data recovered:\n%s" %(e, errorData))

    elif taskingID == 102:
        # on disk code execution for modules that require multiprocessing not supported by exec
        try:
            implantHome = expanduser("~") + '/.Trash/'
            moduleName = ".mac-debug-data"
            implantPath = implantHome + moduleName
            result = "[*] Module disk path: %s \n" %(implantPath) 
            with open(implantPath, 'w') as f:
                f.write(data)
            result += "[*] Module properly dropped to disk \n"
            pythonCommand = "python %s" %(implantPath)
            process = subprocess.Popen(pythonCommand, stdout=subprocess.PIPE, shell=True)
            data = process.communicate()
            result += data[0].strip()
            try:
                os.remove(implantPath)
                result += "\n[*] Module path was properly removed: %s" %(implantPath) 
            except Exception as e:
                print "error removing module filed: %s" %(e)
            fileCheck = os.path.isfile(implantPath)
            if fileCheck:
                result += "\n\nError removing module file, please verify path: " + str(implantPath)
            return encodePacket(100, str(result))
        except Exception as e:
            fileCheck = os.path.isfile(implantPath)
            if fileCheck:
                return encodePacket(0, "error executing specified Python data: %s \nError removing module file, please verify path: %s" %(e, implantPath))
            return encodePacket(0, "error executing specified Python data: %s" %(e))

    elif taskingID == 110:
        start_job(data)
        return encodePacket(110, "job %s started" %(len(jobs)-1))

    elif taskingID == 111:
        # TASK_CMD_JOB_SAVE
        # TODO: implement job structure
        pass

    else:
        return encodePacket(0, "invalid tasking ID: %s" %(taskingID))

Example 99

Project: maze Source File: pdf.py
def render(grid, options):

    draw_with_curves = options['draw_with_curves']
    filename = options['filename']

    use_A4 = options['use_A4']
    width = options['width']
    height = options['height']

    def s_shape_00(p):
        p.moveTo(a, 0)
        if draw_with_curves:
            p.arcTo(-a, -a, a, a, 0, 90)
        else:
            p.lineTo(a, a)
        p.lineTo(0, a)

    def s_shape_01(p):
        p.moveTo(0, b)
        if draw_with_curves:
            p.arcTo(-a, b, a, s + a, 270, 90)
        else:
            p.lineTo(a, b)
        p.lineTo(a, s)

    def s_shape_10(p):
        p.moveTo(s, a)
        if draw_with_curves:
            p.arcTo(b, -a, s + a, a, 90, 90)
        else:
            p.lineTo(b, a)
        p.lineTo(b, 0)

    def s_shape_11(p):
        p.moveTo(s, b)
        if draw_with_curves:
            p.arcTo(b, b, s + a, s + a, 270, -90)
        else:
            p.lineTo(b, b)
        p.lineTo(b, s)

    buffer = StringIO()
    if filename:
        c = Canvas(filename)
    else:
        c = Canvas(buffer)

    c.setTitle('Maze')
    c.setSubject("")
    c.setAuthor("Dale O'Brien")

    if use_A4:
        page_width = 8.3 * 72
        page_height = 11.7 * 72
    else:
        page_width = 8.5 * 72
        page_height = 11.0 * 72

    c.setPageSize((page_width, page_height))

    # 0=butt,1=draw_with_curves,2=square
    c.setLineCap(1)

    left_margin = 15
    top_margin = 15

    # cells must be square, it's the math!, I'm not doing it again.
    # so scale the width if the height will go over the page

    org_width = width
    ratio = (page_height - 2 * top_margin) / (page_width - 2 * left_margin)
    if (float(height) / width > ratio):
        width = ceil(height / ratio)

    s = (page_width - 2 * left_margin) / width

    # center the maze, looks better for mazes that don't fit the page nicely
    left_margin -= (org_width - width) * s / 2.0
    top_margin -= (s * height - (page_height - 2.0 * top_margin)) / 2.0

    g = s * 0.2
    stroke = s / 7.0
    c.setLineWidth(stroke)

    k = 0.5

    n = -(g / k) + 0.5 * (s - sqrt((g *
        (4.0 * g - 3.0 * g * k + 2 * k * s)) / k))

    r = g / k
    q = n + r
    v = (g * (-1 + k)) / k

    theta = asin((2.0 * g - 2.0 * g * k + k * s) /
        (2.0 * g - g * k + k * s)) * 180 / pi

    delta = theta - 90

    for j, row in enumerate(grid):
        # upper/lower rows
        for i, cell in enumerate(row):

            x_offset = left_margin + i * s
            y_offset = top_margin + j * s

            c.translate(x_offset, y_offset)
            p = c.beginPath()

            a = g
            b = s - g

            # mark start and end
            start = False
            end = False
            if (i == 0 and j == height - 1):
                start = True

            if (i == width - 1 and j == 0):
                end = True

            if start or end:
                c.setStrokeColorRGB(0.9, 0.1, 0.1)
                c.setFillColorRGB(0.9, 0.1, 0.1)
                p.circle(s / 2.0, s / 2.0, g / 1.5)
                c.drawPath(p, fill=True)
                p = c.beginPath()
                c.setStrokeColorRGB(0.0, 0.0, 0.0)

            if cell == 3:

                '│ │'
                '│ │'

                p.moveTo(a, s)
                p.lineTo(a, 0)
                p.moveTo(b, s)
                p.lineTo(b, 0)

            if cell == 1:

                '│ │'
                '└─┘'

                p.moveTo(b, 0)
                if draw_with_curves:

                    p.lineTo(b, q)
                    x = s - v - r
                    y = n
                    p.arcTo(x, y, x + 2 * r, y + 2 * r, 180, delta)

                    p.arcTo(g / 2,
                            g / 2,
                            s - g / 2,
                            s - g / 2, theta - 90, 360 - 2 * theta)

                    x = v - r
                    p.arcTo(x, y,
                            x + 2 * r,
                            y + 2 * r, 90 - theta, delta)

                else:
                    p.lineTo(b, b)
                    p.lineTo(a, b)
                p.lineTo(g, 0)

            if cell == 2:

                '┌─┐'
                '│ │'

                p.moveTo(b, s)
                if draw_with_curves:

                    x = s - v - r
                    y = s - n - 2 * r
                    p.arcTo(x, y, x + 2 * r, y + 2 * r, 180, -delta)

                    p.arcTo(g / 2,
                            g / 2,
                            s - g / 2,
                            s - g / 2, 90 - theta, -360 + 2 * theta)

                    x = v - r
                    p.arcTo(x, y,
                            x + 2 * r,
                            y + 2 * r, 270 + theta, -delta)

                else:
                    p.lineTo(b, a)
                    p.lineTo(a, a)
                p.lineTo(a, s)

            if cell == 4:

                '┌──'
                '└──'

                p.moveTo(s, b)
                if draw_with_curves:
                    x = s - n - 2 * r
                    y = s - v - r
                    p.arcTo(x, y, x + 2 * r, y + 2 * r, 270, delta)

                    p.arcTo(g / 2,
                            g / 2,
                            s - g / 2,
                            s - g / 2, 90 + delta, 360 - 2 * theta)

                    y = v - r
                    p.arcTo(x, y,
                            x + 2 * r,
                            y + 2 * r, 180 - theta, delta)

                else:
                    p.lineTo(g, b)
                    p.lineTo(a, a)
                p.lineTo(s, a)

            if cell == 8:

                '──┐'
                '──┘'

                p.moveTo(0, b)
                if draw_with_curves:
                    x = n
                    y = s - v - r

                    p.arcTo(x, y, x + 2 * r, y + 2 * r, 270, -delta)

                    p.arcTo(g / 2,
                            g / 2,
                            s - g / 2,
                            s - g / 2, 90 - delta, -360 + 2 * theta)

                    y = v - r
                    p.arcTo(x, y,
                            x + 2 * r,
                            y + 2 * r, theta, -delta)
                else:
                    p.lineTo(b, b)
                    p.lineTo(b, a)
                p.lineTo(0, a)

            if cell == 5:

                '│ └'
                '└──'

                s_shape_10(p)

                p.moveTo(s, b)
                if draw_with_curves:
                    if start:
                        p.arcTo(a, a, b, b, 90, 90)
                    else:
                        p.arcTo(a, 2 * a - b, 2 * b - a, b, 90, 90)
                else:
                    p.lineTo(a, b)
                p.lineTo(a, 0)

            if cell == 6:

                '┌──'
                '│ ┌'

                s_shape_11(p)

                p.moveTo(s, a)
                if draw_with_curves:
                    p.arcTo(a, a, 2 * b + a, 2 * b + a, 270, -90)
                else:
                    p.lineTo(a, a)
                p.lineTo(a, s)

            if cell == 7:

                '│ └'
                '│ ┌'

                p.moveTo(a, s)
                p.lineTo(a, 0)

                s_shape_10(p)
                s_shape_11(p)

            if cell == 9:

                '┘ │'
                '──┘'

                s_shape_00(p)

                p.moveTo(b, 0)
                if draw_with_curves:
                    p.arcTo(2 * a - b, 2 * a - b, b, b, 0, 90)
                else:
                    p.lineTo(b, b)
                p.lineTo(0, b)

            if cell == 10:

                '──┐'
                '┐ │'

                s_shape_01(p)

                p.moveTo(0, a)
                if draw_with_curves:
                    if end:
                        p.arcTo(a, a, b, b, 270, 90)
                    else:
                        p.arcTo(2 * a - b, a, b, 2 * b + a, 270, 90)
                else:
                    p.lineTo(b, a)
                p.lineTo(b, s)

            if cell == 11:

                '┘ │'
                '┐ │'

                p.moveTo(b, s)
                p.lineTo(b, 0)

                s_shape_00(p)
                s_shape_01(p)

            if cell == 12:

                '───'
                '───'

                p.moveTo(0, b)
                p.lineTo(s, b)
                p.moveTo(0, a)
                p.lineTo(s, a)

            if cell == 13:

                '┘ └'
                '───'

                p.moveTo(0, b)
                p.lineTo(s, b)

                s_shape_00(p)
                s_shape_10(p)

            if cell == 14:

                '───'
                '┐ ┌'

                p.moveTo(0, a)
                p.lineTo(s, a)

                s_shape_01(p)
                s_shape_11(p)

            if cell == 15:

                '┘ └'
                '┐ ┌'

                s_shape_00(p)
                s_shape_10(p)
                s_shape_01(p)
                s_shape_11(p)

            if cell == 19:

                '┤ ├'
                '┤ ├'

                p.moveTo(a, s)
                p.lineTo(a, 0)
                p.moveTo(b, s)
                p.lineTo(b, 0)

                p.moveTo(0, a)
                p.lineTo(a, a)
                p.moveTo(0, b)
                p.lineTo(a, b)

                p.moveTo(s, a)
                p.lineTo(b, a)
                p.moveTo(s, b)
                p.lineTo(b, b)

            if cell == 28:

                '┴─┴'
                '┬─┬'

                p.moveTo(0, b)
                p.lineTo(s, b)
                p.moveTo(0, a)
                p.lineTo(s, a)

                p.moveTo(a, a)
                p.lineTo(a, 0)
                p.moveTo(a, b)
                p.lineTo(a, s)

                p.moveTo(b, a)
                p.lineTo(b, 0)
                p.moveTo(b, b)
                p.lineTo(b, s)

            c.drawPath(p)
            c.translate(-x_offset, -y_offset)

    c.save()
    pdf = ""
    if not filename:
        pdf = buffer.getvalue()
        buffer.close()

    return pdf

Example 100

Project: django-powerdns-manager Source File: utils.py
def generate_zone_file(origin):
    """Generates a zone file.
    
    Accepts the zone origin as string (no trailing dot).
     
    Returns the contents of a zone file that contains all the resource records
    associated with the domain with the provided origin.
    
    """
    Domain = get_model('powerdns_manager', 'Domain')
    Record = get_model('powerdns_manager', 'Record')
    
    the_domain = Domain.objects.get(name__exact=origin)
    the_rrs = Record.objects.filter(domain=the_domain).order_by('-type')
    
    # Generate the zone file
    
    origin = Name((origin.rstrip('.') + '.').split('.'))
    
    # Create an empty dns.zone object.
    # We set check_origin=False because the zone contains no records.
    zone = dns.zone.from_text('', origin=origin, relativize=False, check_origin=False)
    
    rdclass = dns.rdataclass._by_text.get('IN')
    
    for rr in the_rrs:
        
        # Add trailing dot to rr.name
        record_name = rr.name.rstrip('.') + '.'
        
        if rr.type == 'SOA':
            # Add SOA Resource Record
            
            # SOA content:  primary hostmaster serial refresh retry expire default_ttl
            bits = rr.content.split()
            # Primary nameserver of SOA record
            primary = bits[0].rstrip('.') + '.'
            mname = Name(primary.split('.'))
            # Responsible hostmaster from SOA record
            hostmaster = bits[1].rstrip('.') + '.'
            rname = Name(hostmaster.split('.'))
            
            rdtype = dns.rdatatype._by_text.get('SOA')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.SOA.SOA(rdclass, rdtype,
                mname = mname,
                rname = rname,
                serial = int(bits[2]),
                refresh = int(bits[3]),
                retry = int(bits[4]),
                expire = int(bits[5]),
                minimum = int(bits[6])
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'NS':
            # Add NS Resource Record
            rdtype = dns.rdatatype._by_text.get('NS')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.NS.NS(rdclass, rdtype,
                target = Name((rr.content.rstrip('.') + '.').split('.'))
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'MX':
            # Add MX Resource Record
            rdtype = dns.rdatatype._by_text.get('MX')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.MX.MX(rdclass, rdtype,
                preference = int(rr.prio),
                exchange = Name((rr.content.rstrip('.') + '.').split('.'))
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'TXT':
            # Add TXT Resource Record
            rdtype = dns.rdatatype._by_text.get('TXT')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.TXT.TXT(rdclass, rdtype,
                strings = [rr.content.strip('"')]
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'CNAME':
            # Add CNAME Resource Record
            rdtype = dns.rdatatype._by_text.get('CNAME')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.CNAME.CNAME(rdclass, rdtype,
                target = Name((rr.content.rstrip('.') + '.').split('.'))
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'A':
            # Add A Resource Record
            rdtype = dns.rdatatype._by_text.get('A')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.IN.A.A(rdclass, rdtype,
                address = rr.content
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'AAAA':
            # Add AAAA Resource Record
            rdtype = dns.rdatatype._by_text.get('AAAA')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype,
                address = rr.content
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'SPF':
            # Add SPF Resource Record
            rdtype = dns.rdatatype._by_text.get('SPF')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.SPF.SPF(rdclass, rdtype,
                strings = [rr.content.strip('"')]
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'PTR':
            # Add PTR Resource Record
            rdtype = dns.rdatatype._by_text.get('PTR')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.ANY.PTR.PTR(rdclass, rdtype,
                target = Name((rr.content.rstrip('.') + '.').split('.'))
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
        
        elif rr.type == 'SRV':
            # Add SRV Resource Record
            
            # weight port target
            weight, port, target = rr.content.split()
            
            rdtype = dns.rdatatype._by_text.get('SRV')
            rdataset = zone.find_rdataset(record_name, rdtype=rdtype, create=True)
            rdata = dns.rdtypes.IN.SRV.SRV(rdclass, rdtype,
                priority = int(rr.prio),
                weight = int(weight),
                port = int(port),
                target = Name((target.rstrip('.') + '.').split('.'))
            )
            rdataset.add(rdata, ttl=int(rr.ttl))
            
    
    # Export text (from the source code of http://www.dnspython.org/docs/1.10.0/html/dns.zone.Zone-class.html#to_file)
    EOL = '\n'
    f = StringIO.StringIO()
    f.write('$ORIGIN %s%s' % (origin, EOL))
    zone.to_file(f, sorted=True, relativize=False, nl=EOL)
    data = f.getvalue()
    f.close()
    return data
See More Examples - Go to Next Page
Page 1 Page 2 Selected Page 3