datetime.datetime.now

Here are the examples of the python api datetime.datetime.now taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

200 Examples 7

Example 1

Project: kingpin
Source File: thrift_client_mixin.py
View license
def ensure_connection(service_name, method_name, method):
    """Ensure that client is connected before executing method.

    .. note:: Class to which this decorator is applied **must** have
       ``get_connection_exception_class()`` method that would return the
       class of the exception to be thrown when retryable connection
       exception is repacked.

    This decorator can only be applied to class methods,
    not a non-class function.

    Args:
        method_name: A string, the name of the method to be ensured.
        method: A string, the actual method to be ensured.

    Returns:
        Whatever the executed method returns.

    """
    @functools.wraps(method)
    def method_wrapper(self, *args, **kwargs):

        req_timeout_ms = kwargs.pop('rpc_timeout_ms', self.timeout)
        conn_timeout_ms = kwargs.pop('rpc_timeout_ms',
                                     self.socket_connection_timeout)
        if conn_timeout_ms is None:
            conn_timeout_ms = req_timeout_ms
        retries_left = self.retry_count
        while retries_left:
            start_time = datetime.datetime.now()
            try:
                # Ensure connection.
                try:
                    self.connect(conn_timeout_ms, req_timeout_ms)
                except socket.timeout:
                    raise ThriftConnectionTimeoutError()
                result = method(self._client, *args, **kwargs)
                time_taken = datetime.datetime.now() - start_time
                # compute time taken into milliseconds
                time_taken_ms = time_taken.total_seconds() * 1000
                self.statsd_client.timing(
                    "client.requests.{0}.{1}".format(service_name, method_name),
                    time_taken_ms, sample_rate=0.001)
                self.refresh_connection_if_needed()
                return result
            except TApplicationException as e:
                handler_args = args_to_str(*args, **kwargs)
                time_taken = datetime.datetime.now() - start_time
                # compute time taken into milliseconds
                time_taken_ms = time_taken.total_seconds() * 1000
                log.info(
                    "Thrift call failed TApplicationException : %s(%s) : "
                    "%s:%d : time_taken_ms : %s : %s" % (
                        method_name, handler_args, self.host,
                        self.port, time_taken_ms, e))
                raise
            except Exception as e:
                t, v, tb = sys.exc_info()
                retries_left -= 1
                handler_args = args_to_str(*args, **kwargs)
                time_taken = datetime.datetime.now() - start_time
                # compute time taken into milliseconds
                time_taken_ms = time_taken.total_seconds() * 1000

                # application exception, if it is retriable as determined by
                # RetryPolicy then we simply raise the exception, no connection
                # teardown is needed, because the exception was thrown by the
                # server and transported back to the client.
                if _is_application_exception(e):
                    retry_policy_to_apply = self.retry_policy
                    if not retry_policy_to_apply:
                        retry_policy_to_apply = DEFAULT_RETRY_POLICY
                    if not retry_policy_to_apply.should_retry(e):
                        if random.random() < self.failed_retry_policy_log_sample_rate:
                            # Sample logging in case logging is too overwhelming.
                            log.info(
                                "Thrift call failed retry policy : %s(%s) :"
                                "%s:%d : time_taken_ms : %s : %s" % (
                                    method_name, handler_args, self.host,
                                    self.port, time_taken_ms, e))
                            # raise exception to stop it from being retried
                        raise t, v, tb
                elif _is_rpc_timeout(e):
                    # rpc socket timeout, not connection socket timeout
                    log.info(
                        "Thrift call failed rpc timeout : %s(%s) :"
                        "%s:%d : time_taken_ms : %s : %s" % (
                            method_name, handler_args, self.host,
                            self.port, time_taken_ms, e))
                    self.statsd_client.increment(
                        "errors.thriftclient.RpcTimeoutError",
                        sample_rate=0.01,
                        tags={'client': self.host})
                    # socket timeout, only reliable way to recover is to tear
                    # down the connection, it is probably good to select a
                    # new host, regardless whether we should retry this request
                    # or not.
                    self.teardown_connection(select_new_host=True)
                    # TODO(Yongsheng): temporarily disable this feature, we need
                    # a way to gauge the server healthiness before we can bring
                    # this feature back.
                    # raise exception to keep it from being retried.
                    # raise self.get_connection_exception_class()(e)
                else:
                    # at this point, we assume it is connectivity issue,
                    # socket read/write errors, or failing to establish
                    # connection, we will need to tear down the connection
                    # and re-establish it for subsequent calls
                    log.info(
                        "Thrift client connection fail : %s(%s) : %s:%d : "
                        "retries_left=%d : time_taken_ms : %s  %r",
                        method_name, handler_args, self.host,
                        self.port, retries_left, time_taken_ms, e)
                    self.statsd_client.increment(
                        "errors.thriftclient.ConnectionError",
                        sample_rate=0.01,
                        tags={'client': self.host})
                    # By default, for the first two retries, try the same host
                    # to rule out intermittent connectivity issue. For the last
                    # retry select a new host randomly.
                    # If ``always_retry_on_new_host`` is set True, always retry
                    # on a new host.
                    if self.always_retry_on_new_host or retries_left == 1:
                        # turn this on when we are ready to penalize bad hosts
                        # self._host_selector.invalidate()
                        self.teardown_connection(select_new_host=True)
                    else:
                        self.teardown_connection(select_new_host=False)

                # Retriable errors, but no retries left, bail.
                if not retries_left:
                    log.info(
                        "Thrift call failed all retries : %s(%s) : "
                        "%s:%d : time_taken_ms: %s %s" % (
                            method_name, handler_args, self.host, self.port,
                            time_taken_ms, e))
                    self.statsd_client.increment(
                        "errors.thriftclient.AllConnectionError",
                        sample_rate=0.01,
                        tags={'client': self.host})
                    # Repack the message and raise as a different exception.
                    raise self.get_connection_exception_class()(e), None, tb

    return method_wrapper

Example 2

Project: batch-shipyard
Source File: cascade.py
View license
    def _pull_and_save(self) -> None:
        """Thread main logic for pulling and saving docker image"""
        if _REGISTRY is None:
            raise RuntimeError(
                ('{} image specified for global resource, but there are '
                 'no registries available').format(self.resource))
        file = None
        resource_hash = compute_resource_hash(self.resource)
        image = get_docker_image_name_from_resource(self.resource)
        _record_perf('pull-start', 'img={}'.format(image))
        start = datetime.datetime.now()
        logger.info('pulling image {} from {}'.format(image, _REGISTRY))
        if _REGISTRY == 'registry.hub.docker.com':
            subprocess.check_output(
                'docker pull {}'.format(image), shell=True)
        else:
            _pub = False
            try:
                subprocess.check_output(
                    'docker pull {}/{}'.format(_REGISTRY, image),
                    shell=True)
            except subprocess.CalledProcessError:
                if _ALLOW_PUBLIC_PULL_WITH_PRIVATE:
                    logger.warning(
                        'could not pull from private registry, attempting '
                        'Docker Public Hub instead')
                    subprocess.check_output(
                        'docker pull {}'.format(image), shell=True)
                    _pub = True
                else:
                    raise
            # tag image to remove registry ip
            if not _pub:
                subprocess.check_call(
                    'docker tag {}/{} {}'.format(_REGISTRY, image, image),
                    shell=True)
            del _pub
        diff = (datetime.datetime.now() - start).total_seconds()
        logger.debug('took {} sec to pull docker image {} from {}'.format(
            diff, image, _REGISTRY))
        # register service
        _merge_service(
            self.table_client, self.resource, self.nglobalresources)
        # save docker image to seed to torrent
        if _ENABLE_P2P:
            _record_perf('pull-end', 'img={},diff={}'.format(
                image, diff))
            _record_perf('save-start', 'img={}'.format(image))
            start = datetime.datetime.now()
            if _COMPRESSION:
                # need to create reproducible compressed tarballs
                # 1. untar docker save file
                # 2. re-tar files sorted by name and set mtime/user/group
                #    to known values
                # 3. fast compress with parallel gzip ignoring certain file
                #    properties
                # 4. remove temporary directory
                tmpdir = _TORRENT_DIR / '{}-tmp'.format(resource_hash)
                tmpdir.mkdir(parents=True, exist_ok=True)
                file = _TORRENT_DIR / '{}.{}'.format(
                    resource_hash, _SAVELOAD_FILE_EXTENSION)
                logger.info('saving docker image {} to {} for seeding'.format(
                    image, file))
                subprocess.check_call(
                    ('(docker save {} | tar -xf -) '
                     '&& (tar --sort=name --mtime=\'1970-01-01\' '
                     '--owner=0 --group=0 -cf - . '
                     '| pigz --fast -n -T -c > {})').format(image, file),
                    cwd=str(tmpdir), shell=True)
                shutil.rmtree(str(tmpdir), ignore_errors=True)
                del tmpdir
                fsize = file.stat().st_size
            else:
                # tarball generated by docker save is not reproducible
                # we need to untar it and torrent the contents instead
                file = _TORRENT_DIR / '{}'.format(resource_hash)
                file.mkdir(parents=True, exist_ok=True)
                logger.info('saving docker image {} to {} for seeding'.format(
                    image, file))
                subprocess.check_call(
                    'docker save {} | tar -xf -'.format(image),
                    cwd=str(file), shell=True)
                fsize = 0
                for entry in scantree(str(file)):
                    if entry.is_file(follow_symlinks=False):
                        fsize += entry.stat().st_size
            diff = (datetime.datetime.now() - start).total_seconds()
            logger.debug('took {} sec to save docker image {} to {}'.format(
                diff, image, file))
            _record_perf('save-end', 'img={},size={},diff={}'.format(
                image, fsize, diff))
            # generate torrent file
            start = datetime.datetime.now()
            torrent_file, torrent_sha1 = generate_torrent(file, resource_hash)
            # check if blob exists and is non-zero length prior to uploading
            try:
                _bp = self.blob_client.get_blob_properties(
                    _STORAGE_CONTAINERS['blob_torrents'],
                    str(torrent_file.name))
                if _bp.properties.content_length == 0:
                    raise ValueError()
            except Exception:
                self.blob_client.create_blob_from_path(
                    _STORAGE_CONTAINERS['blob_torrents'],
                    str(torrent_file.name), str(torrent_file))
            diff = (datetime.datetime.now() - start).total_seconds()
            logger.debug(
                'took {} sec to generate and upload torrent file: {}'.format(
                    diff, torrent_file))
            start = datetime.datetime.now()
            # add to torrent dict (effectively enqueues for torrent start)
            entity = {
                'PartitionKey': _PARTITION_KEY,
                'RowKey': resource_hash,
                'Resource': self.resource,
                'TorrentFileLocator': '{},{}'.format(
                    _STORAGE_CONTAINERS['blob_torrents'],
                    str(torrent_file.name)),
                'TorrentFileSHA1': torrent_sha1,
                'TorrentIsDir': file.is_dir(),
                'TorrentContentSizeBytes': fsize,
            }
            with _PT_LOCK:
                _PENDING_TORRENTS[self.resource] = {
                    'entity': entity,
                    'torrent_file': torrent_file,
                    'started': False,
                    'seed': True,
                    'loaded': True,
                    'loading': False,
                    'registered': True,
                }
                _TORRENT_REVERSE_LOOKUP[resource_hash] = self.resource
            # wait until torrent has started
            logger.info(
                'waiting for torrent {} to start'.format(self.resource))
            while (self.resource not in _TORRENTS or
                   not _TORRENTS[self.resource]['started']):
                time.sleep(0.1)
            diff = (datetime.datetime.now() - start).total_seconds()
            logger.debug('took {} sec for {} torrent to start'.format(
                diff, self.resource))
        else:
            # get docker image size
            try:
                output = subprocess.check_output(
                    'docker images {}'.format(image), shell=True)
                size = ' '.join(output.decode('utf-8').split()[-2:])
                _record_perf('pull-end', 'img={},diff={},size={}'.format(
                    image, diff, size))
            except subprocess.CalledProcessError as ex:
                logger.exception(ex)
                _record_perf('pull-end', 'img={},diff={}'.format(image, diff))

Example 3

Project: dokomoforms
Source File: demo.py
View license
def _create_demo_user(session):
    with session.begin():
        user = Administrator(
            name='demo_user',
            emails=[Email(address='[email protected]')],
        )
        survey = models.construct_survey(
            title={'English': 'Demo Education Survey'},
            survey_type='public',
            url_slug='demo',
            nodes=[
                models.construct_survey_node(
                    node=models.construct_node(
                        type_constraint='photo',
                        title={'English': 'Photo of Facility Exterior'}
                    )
                ),
                models.construct_survey_node(
                    node=models.construct_node(
                        type_constraint='facility',
                        title={'English': 'Facility'},
                        hint={'English': (
                            'Select the facility from the list, or add'
                            ' a new one.'
                        )},
                        logic={
                            'slat': -85,
                            'nlat': 85,
                            'wlng': -180,
                            'elng': 180,
                        }
                    )
                ),
                models.construct_survey_node(
                    node=models.construct_node(
                        type_constraint='multiple_choice',
                        title={'English': 'Education Type'},
                        choices=[
                            models.Choice(
                                choice_text={
                                    'English': 'public',
                                }
                            ),
                            models.Choice(
                                choice_text={
                                    'English': 'private',
                                }
                            )
                        ]
                    )
                ),
                models.construct_survey_node(
                    node=models.construct_node(
                        type_constraint='multiple_choice',
                        title={'English': 'Education Level'},
                        allow_other=True,
                        choices=[
                            models.Choice(
                                choice_text={
                                    'English': 'primary',
                                }
                            ),
                            models.Choice(
                                choice_text={
                                    'English': 'secondary',
                                }
                            ),
                            models.Choice(
                                choice_text={
                                    'English': 'both',
                                }
                            )
                        ]
                    )
                ),
                models.construct_survey_node(
                    node=models.construct_node(
                        type_constraint='integer',
                        title={'English': 'Number of Students'},
                        logic={'min': 0}
                    )
                ),
            ],
        )
        user.surveys.append(survey)
        session.add(user)
        session.flush()
        survey.submissions.extend([
            models.construct_submission(
                submission_type='public_submission',
                submitter_name='Demo Submitter 1',
                submission_time=(
                    datetime.datetime.now() - datetime.timedelta(days=1)
                ),
                save_time=(
                    datetime.datetime.now() - datetime.timedelta(days=1)
                ),
                answers=[
                    models.construct_answer(
                        survey_node=survey.nodes[1],
                        type_constraint='facility',
                        answer={
                            'facility_id': 1,
                            'lat': 40.8,
                            'lng': -73.9,
                            'facility_name': 'Demo Facility 1',
                            'facility_sector': 'Demo',
                        },
                    ),
                    models.construct_answer(
                        survey_node=survey.nodes[2],
                        type_constraint='multiple_choice',
                        answer=survey.nodes[2].node.choices[1].id,
                    ),
                    models.construct_answer(
                        survey_node=survey.nodes[3],
                        type_constraint='multiple_choice',
                        other='Technical',
                    ),
                    models.construct_answer(
                        survey_node=survey.nodes[4],
                        type_constraint='integer',
                        answer=200,
                    ),
                ],
            ),
            models.construct_submission(
                submission_type='public_submission',
                submitter_name='Demo Submitter 2',
                submission_time=(
                    datetime.datetime.now() - datetime.timedelta(days=4)
                ),
                save_time=(
                    datetime.datetime.now() - datetime.timedelta(days=4)
                ),
                answers=[
                    models.construct_answer(
                        survey_node=survey.nodes[1],
                        type_constraint='facility',
                        answer={
                            'facility_id': 2,
                            'lat': 42,
                            'lng': -74,
                            'facility_name': 'Demo Facility 2',
                            'facility_sector': 'Demo',
                        },
                    ),
                    models.construct_answer(
                        survey_node=survey.nodes[2],
                        type_constraint='multiple_choice',
                        answer=survey.nodes[2].node.choices[0].id,
                    ),
                    models.construct_answer(
                        survey_node=survey.nodes[3],
                        type_constraint='multiple_choice',
                        answer=survey.nodes[3].node.choices[0].id,
                    ),
                    models.construct_answer(
                        survey_node=survey.nodes[4],
                        type_constraint='integer',
                        answer=300,
                    ),
                ],
            ),
        ])
        session.add(survey)
    return user

Example 4

View license
    def forwards(self, orm):

        # Deleting model 'Notification'
        db.delete_table('core_notification')

        # Adding model 'Comment'
        db.create_table('core_comment', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('author', self.gf('django.db.models.fields.related.ForeignKey')
             (to=orm['core.User'], null=True, blank=True)),
            ('body', self.gf('django.db.models.fields.TextField')
             (null=True, blank=True)),
            ('date_created', self.gf('django.db.models.fields.DateTimeField')
             (default=datetime.datetime.now)),
        ))
        db.send_create_signal('core', ['Comment'])

        # Adding M2M table for field likes on 'Comment'
        db.create_table('core_comment_likes', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('comment', models.ForeignKey(orm['core.comment'], null=False)),
            ('user', models.ForeignKey(orm['core.user'], null=False))
        ))
        db.create_unique('core_comment_likes', ['comment_id', 'user_id'])

        # Adding M2M table for field dislikes on 'Comment'
        db.create_table('core_comment_dislikes', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('comment', models.ForeignKey(orm['core.comment'], null=False)),
            ('user', models.ForeignKey(orm['core.user'], null=False))
        ))
        db.create_unique('core_comment_dislikes', ['comment_id', 'user_id'])

        # Adding model 'Tag'
        db.create_table('core_tag', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('name', self.gf('django.db.models.fields.CharField')
             (max_length=512)),
            ('date_created', self.gf('django.db.models.fields.DateTimeField')
             (default=datetime.datetime.now)),
        ))
        db.send_create_signal('core', ['Tag'])

        # Adding model 'RevisionField'
        db.create_table('core_revisionfield', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('revision', self.gf('django.db.models.fields.related.ForeignKey')
             (to=orm['core.Revision'])),
            ('field_type', self.gf('django.db.models.fields.CharField')
             (max_length=512, null=True, blank=True)),
            ('field', self.gf('django.db.models.fields.CharField')
             (max_length=512, null=True, blank=True)),
            ('value', self.gf('django.db.models.fields.TextField')
             (null=True, blank=True)),
            ('value_key', self.gf('django.db.models.fields.related.ForeignKey')(
                blank=True, related_name='revisionfield_key', null=True, to=orm['core.Object'])),
            ('value_key_acc', self.gf('django.db.models.fields.related.ForeignKey')(
                blank=True, related_name='revisionfield_key_acc', null=True, to=orm['core.AccessEntity'])),
        ))
        db.send_create_signal('core', ['RevisionField'])

        # Adding M2M table for field value_m2m on 'RevisionField'
        db.create_table('core_revisionfield_value_m2m', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('revisionfield', models.ForeignKey(
                orm['core.revisionfield'], null=False)),
            ('object', models.ForeignKey(orm['core.object'], null=False))
        ))
        db.create_unique(
            'core_revisionfield_value_m2m', ['revisionfield_id', 'object_id'])

        # Adding M2M table for field value_m2m_acc on 'RevisionField'
        db.create_table('core_revisionfield_value_m2m_acc', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('revisionfield', models.ForeignKey(
                orm['core.revisionfield'], null=False)),
            ('accessentity', models.ForeignKey(
                orm['core.accessentity'], null=False))
        ))
        db.create_unique(
            'core_revisionfield_value_m2m_acc', ['revisionfield_id', 'accessentity_id'])

        # Adding model 'Revision'
        db.create_table('core_revision', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('previous', self.gf('django.db.models.fields.related.OneToOneField')(
                blank=True, related_name='next_set', unique=True, null=True, to=orm['core.Revision'])),
            ('object', self.gf('django.db.models.fields.related.ForeignKey')
             (to=orm['core.Object'])),
            ('change_type', self.gf('django.db.models.fields.CharField')
             (max_length=512, null=True, blank=True)),
            ('date_created', self.gf('django.db.models.fields.DateTimeField')
             (default=datetime.datetime.now)),
        ))
        db.send_create_signal('core', ['Revision'])

        # Adding model 'Invitation'
        db.create_table('core_invitation', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('email', self.gf('django.db.models.fields.EmailField')
             (max_length=75)),
            ('key', self.gf('django.db.models.fields.CharField')
             (max_length=256)),
            ('sender', self.gf('django.db.models.fields.related.ForeignKey')
             (to=orm['core.User'], null=True, blank=True)),
            ('default_group', self.gf('django.db.models.fields.related.ForeignKey')(
                to=orm['core.Group'], null=True, blank=True)),
            ('date_created', self.gf('django.db.models.fields.DateTimeField')
             (default=datetime.datetime.now)),
        ))
        db.send_create_signal('core', ['Invitation'])

        # Adding model 'AccessEntity'
        db.create_table('core_accessentity', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('last_updated', self.gf('django.db.models.fields.DateTimeField')
             (auto_now=True, blank=True)),
        ))
        db.send_create_signal('core', ['AccessEntity'])

        # Adding model 'UpdateRecord'
        db.create_table('core_updaterecord', (
            ('id', self.gf('django.db.models.fields.AutoField')
             (primary_key=True)),
            ('author', self.gf('django.db.models.fields.related.ForeignKey')(
                blank=True, related_name='sent_updates', null=True, to=orm['core.User'])),
            ('sender', self.gf('django.db.models.fields.related.ForeignKey')(
                blank=True, related_name='sent_updates', null=True, to=orm['core.Object'])),
            ('record_type', self.gf(
                'django.db.models.fields.CharField')(max_length=32)),
            ('url', self.gf('django.db.models.fields.CharField')
             (max_length=512, null=True, blank=True)),
            ('body', self.gf('django.db.models.fields.TextField')
             (default='', null=True, blank=True)),
            ('score', self.gf(
                'django.db.models.fields.IntegerField')(default=0)),
            ('format_message', self.gf('django.db.models.fields.TextField')
             (null=True, blank=True)),
            ('format_strings', self.gf('django.db.models.fields.TextField')
             (null=True, blank=True)),
            ('date_created', self.gf('django.db.models.fields.DateTimeField')
             (default=datetime.datetime.now)),
        ))
        db.send_create_signal('core', ['UpdateRecord'])

        # Adding M2M table for field about on 'UpdateRecord'
        db.create_table('core_updaterecord_about', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('updaterecord', models.ForeignKey(
                orm['core.updaterecord'], null=False)),
            ('object', models.ForeignKey(orm['core.object'], null=False))
        ))
        db.create_unique(
            'core_updaterecord_about', ['updaterecord_id', 'object_id'])

        # Adding M2M table for field recipients on 'UpdateRecord'
        db.create_table('core_updaterecord_recipients', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('updaterecord', models.ForeignKey(
                orm['core.updaterecord'], null=False)),
            ('accessentity', models.ForeignKey(
                orm['core.accessentity'], null=False))
        ))
        db.create_unique(
            'core_updaterecord_recipients', ['updaterecord_id', 'accessentity_id'])

        # Adding M2M table for field comments on 'UpdateRecord'
        db.create_table('core_updaterecord_comments', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('updaterecord', models.ForeignKey(
                orm['core.updaterecord'], null=False)),
            ('comment', models.ForeignKey(orm['core.comment'], null=False))
        ))
        db.create_unique(
            'core_updaterecord_comments', ['updaterecord_id', 'comment_id'])

        # Adding M2M table for field likes on 'UpdateRecord'
        db.create_table('core_updaterecord_likes', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('updaterecord', models.ForeignKey(
                orm['core.updaterecord'], null=False)),
            ('user', models.ForeignKey(orm['core.user'], null=False))
        ))
        db.create_unique(
            'core_updaterecord_likes', ['updaterecord_id', 'user_id'])

        # Adding M2M table for field dislikes on 'UpdateRecord'
        db.create_table('core_updaterecord_dislikes', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('updaterecord', models.ForeignKey(
                orm['core.updaterecord'], null=False)),
            ('user', models.ForeignKey(orm['core.user'], null=False))
        ))
        db.create_unique(
            'core_updaterecord_dislikes', ['updaterecord_id', 'user_id'])

        # Deleting field 'Group.last_updated'
        db.delete_column('core_group', 'last_updated')

        # Adding field 'Group.accessentity_ptr'
        db.add_column('core_group', 'accessentity_ptr', self.gf('django.db.models.fields.related.OneToOneField')(
            to=orm['core.AccessEntity'], unique=True, null=True, blank=True), keep_default=False)

        # Deleting field 'Object.group_read'
        db.delete_column('core_object', 'group_read')

        # Deleting field 'Object.user_write'
        db.delete_column('core_object', 'user_write')

        # Deleting field 'Object.group'
        db.delete_column('core_object', 'group_id')

        # Deleting field 'Object.everybody_execute'
        db.delete_column('core_object', 'everybody_execute')

        # Deleting field 'Object.user_execute'
        db.delete_column('core_object', 'user_execute')

        # Deleting field 'Object.user_read'
        db.delete_column('core_object', 'user_read')

        # Deleting field 'Object.everybody_write'
        db.delete_column('core_object', 'everybody_write')

        # Deleting field 'Object.group_write'
        db.delete_column('core_object', 'group_write')

        # Deleting field 'Object.group_execute'
        db.delete_column('core_object', 'group_execute')

        # Deleting field 'Object.everybody_read'
        db.delete_column('core_object', 'everybody_read')

        # Adding field 'Object.creator'
        db.add_column('core_object', 'creator', self.gf('django.db.models.fields.related.ForeignKey')(
            blank=True, related_name='objects_created', null=True, to=orm['core.User']), keep_default=False)

        # Adding M2M table for field read_access on 'Object'
        db.create_table('core_object_read_access', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('object', models.ForeignKey(orm['core.object'], null=False)),
            ('accessentity', models.ForeignKey(
                orm['core.accessentity'], null=False))
        ))
        db.create_unique(
            'core_object_read_access', ['object_id', 'accessentity_id'])

        # Adding M2M table for field full_access on 'Object'
        db.create_table('core_object_full_access', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('object', models.ForeignKey(orm['core.object'], null=False)),
            ('accessentity', models.ForeignKey(
                orm['core.accessentity'], null=False))
        ))
        db.create_unique(
            'core_object_full_access', ['object_id', 'accessentity_id'])

        # Adding M2M table for field tags on 'Object'
        db.create_table('core_object_tags', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('object', models.ForeignKey(orm['core.object'], null=False)),
            ('tag', models.ForeignKey(orm['core.tag'], null=False))
        ))
        db.create_unique('core_object_tags', ['object_id', 'tag_id'])

        # Adding M2M table for field comments on 'Object'
        db.create_table('core_object_comments', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('object', models.ForeignKey(orm['core.object'], null=False)),
            ('comment', models.ForeignKey(orm['core.comment'], null=False))
        ))
        db.create_unique('core_object_comments', ['object_id', 'comment_id'])

        # Adding M2M table for field likes on 'Object'
        db.create_table('core_object_likes', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('object', models.ForeignKey(orm['core.object'], null=False)),
            ('user', models.ForeignKey(orm['core.user'], null=False))
        ))
        db.create_unique('core_object_likes', ['object_id', 'user_id'])

        # Adding M2M table for field dislikes on 'Object'
        db.create_table('core_object_dislikes', (
            ('id', models.AutoField(
                verbose_name='ID', primary_key=True, auto_created=True)),
            ('object', models.ForeignKey(orm['core.object'], null=False)),
            ('user', models.ForeignKey(orm['core.user'], null=False))
        ))
        db.create_unique('core_object_dislikes', ['object_id', 'user_id'])

        # Changing field 'Object.user'
        db.alter_column('core_object', 'user_id', self.gf(
            'django.db.models.fields.related.ForeignKey')(to=orm['core.User'], null=True))

        # Deleting field 'User.last_updated'
        db.delete_column('core_user', 'last_updated')

        # Adding field 'User.accessentity_ptr'
        db.add_column('core_user', 'accessentity_ptr', self.gf('django.db.models.fields.related.OneToOneField')(
            to=orm['core.AccessEntity'], unique=True, null=True, blank=True), keep_default=False)

        # Adding field 'User.disabled'
        db.add_column('core_user', 'disabled', self.gf(
            'django.db.models.fields.BooleanField')(default=False), keep_default=False)

        # Adding field 'User.last_access'
        db.add_column('core_user', 'last_access', self.gf('django.db.models.fields.DateTimeField')(
            default=datetime.datetime.now), keep_default=False)

        # Changing field 'User.default_group'
        db.alter_column('core_user', 'default_group_id', self.gf(
            'django.db.models.fields.related.ForeignKey')(null=True, to=orm['core.AccessEntity']))

Example 5

Project: theconversation
Source File: posts.py
View license
    @tornado.web.authenticated
    def post(self):
        sort_by = self.get_argument('sort_by', 'hot')
        page = abs(int(self.get_argument('page', '1')))
        per_page = abs(int(self.get_argument('per_page', '9')))
        is_blacklisted = False
        msg = 'success'
        if self.current_user:
            is_blacklisted = self.is_blacklisted(self.current_user)

        post = {}
        post['slug'] = self.get_argument('slug', None)
        post['title'] = self.get_argument('title', '')
        post['url'] = self.get_argument('url', '')
        post['body_raw'] = self.get_argument('body_raw', '')
        post['tags'] = self.get_argument('tags', '').split(',')
        post['featured'] = self.get_argument('featured', '')
        post['has_hackpad'] = self.get_argument('has_hackpad', '')
        post['slug'] = self.get_argument('slug', '')
        post['sort_score'] = 0
        post['daily_sort_score'] = 0
        if post['has_hackpad'] != '':
            post['has_hackpad'] = True
        else:
            post['has_hackpad'] = False

        deleted = self.get_argument('deleted', '')
        if deleted != '':
            post['deleted'] = True
            post['date_deleted'] = datetime.datetime.now()

        bypass_dup_check = self.get_argument('bypass_dup_check', '')
        is_edit = False
        if post['slug']:
            bypass_dup_check = "true"
            is_edit = True

        dups = []

        # make sure user isn't blacklisted
        if not self.is_blacklisted(self.current_user):
            # check if there is an existing URL
            if post['url'] != '':
                url = urlparse(post['url'])
                netloc = url.netloc.split('.')
                if netloc[0] == 'www':
                    del netloc[0]
                path = url.path
                if path and path[-1] == '/':
                    path = path[:-1]
                url = '%s%s' % ('.'.join(netloc), path)
                post['normalized_url'] = url

                long_url = post['url']
                if long_url.find('goo.gl') > -1:
                    long_url = google.expand_url(post['url'])
                if long_url.find('bit.ly') > -1 or long_url.find('bitly.com') > -1:
                    long_url = bitly.expand_url(post['url'].replace('http://bitly.com','').replace('http://bit.ly',''))
                post['domain'] = urlparse(long_url).netloc

            ok_to_post = True
            dups = postsdb.get_posts_by_normalized_url(post.get('normalized_url', ""), 1)
            if post['url'] != '' and len(dups) > 0 and bypass_dup_check != "true":
                ##
                ## If there are dupes, kick them back to the post add form
                ##
                return (self.render('post/new_post.html', post=post, dups=dups))

            # Handle tags
            post['tags'] = [t.strip().lower() for t in post['tags']]
            post['tags'] = [t for t in post['tags'] if t]
            userdb.add_tags_to_user(self.current_user, post['tags'])
            for tag in post['tags']:
                tagsdb.save_tag(tag)

            # format the content as needed
            post['body_html'] = sanitize.html_sanitize(post['body_raw'], media=self.current_user_can('post_rich_media'))
            post['body_text'] = sanitize.html_to_text(post['body_html'])
            post['body_truncated'] = sanitize.truncate(post['body_text'], 500)

            # determine if this should be a featured post or not
            if self.current_user_can('feature_posts') and post['featured'] != '':
                post['featured'] = True
                post['date_featured'] = datetime.datetime.now()
            else:
                post['featured'] = False
                post['date_featured'] = None

            user = userdb.get_user_by_screen_name(self.current_user)

            if not post['slug']:
                # No slug -- this is a new post.
                # initiate fields that are new
                post['disqus_shortname'] = settings.get('disqus_short_code')
                post['muted'] = False
                post['comment_count'] = 0
                post['disqus_thread_id_str'] = ''
                post['sort_score'] = 0.0
                post['downvotes'] = 0
                post['hackpad_url'] = ''
                post['date_created'] = datetime.datetime.now()
                post['user_id_str'] = user['user']['id_str']
                post['username'] = self.current_user
                post['user'] = user['user']
                post['votes'] = 1
                post['voted_users'] = [user['user']]
                #save it
                post['slug'] = postsdb.insert_post(post)
                msg = 'success'
            else:
                # this is an existing post.
                # attempt to edit the post (make sure they are the author)
                saved_post = postsdb.get_post_by_slug(post['slug'])
                if saved_post and self.current_user == saved_post['user']['screen_name']:
                    # looks good - let's update the saved_post values to new values
                    for key in post.keys():
                        saved_post[key] = post[key]
                    # finally let's save the updates
                    postsdb.save_post(saved_post)
                    msg = 'success'

            # log any @ mentions in the post
            mentions = re.findall(r'@([^\s]+)', post['body_raw'])
            for mention in mentions:
                mentionsdb.add_mention(mention.lower(), post['slug'])

        # Send email to USVers if OP is staff
        if self.current_user in settings.get('staff'):
            subject = 'USV.com: %s posted "%s"' % (self.current_user, post['title'])
            if 'url' in post and post['url']: # post.url is the link to external content (if any)
                post_link = 'External Link: %s \n\n' % post['url']
            else:
                post_link = ''
            post_url = "http://%s/posts/%s" % (settings.get('base_url'), post['slug'])
            text = '"%s" ( %s ) posted by %s. \n\n %s %s' % (post['title'].encode('ascii', errors='ignore'), post_url, self.current_user, post_link, post.get('body_text', ""))
            # now attempt to actually send the emails
            for u in settings.get('staff'):
                if u != self.current_user:
                    acc = userdb.get_user_by_screen_name(u)
                    if acc:
                        self.send_email('[email protected]', acc['email_address'], subject, text)

        # Subscribe to Disqus
        # Attempt to create the post's thread
        acc = userdb.get_user_by_screen_name(self.current_user)
        thread_id = 0
        try:
            # Attempt to create the thread.
            thread_details = disqus.create_thread(post, acc['disqus']['access_token'])
            thread_id = thread_details['response']['id']
        except:
            try:
                # trouble creating the thread, try to just get the thread via the slug
                thread_details = disqus.get_thread_details(post)
                thread_id = thread_details['response']['id']
            except:
                thread_id = 0
        if thread_id != 0:
            # Subscribe a user to the thread specified in response
            disqus.subscribe_to_thread(thread_id, acc['disqus']['access_token'])
            # update the thread with the disqus_thread_id_str
            saved_post = postsdb.get_post_by_slug(post['slug'])
            saved_post['disqus_thread_id_str'] = thread_id
            postsdb.save_post(saved_post)

        if is_edit:
            self.redirect('/posts/%s?msg=updated' % post['slug'])
        else:
            self.redirect('/?msg=success&slug=%s' % post['slug'])

Example 6

Project: sony_camera_api
Source File: pyLiveView.py
View license
   def run(self):
      global options, grabber, decoder, display, image_copy

      if options.debug:
         print "using LiveView grabber"
   
      self.active = False
      self.photomode = False

      # grabber control signals
      self.event_start_stream = threading.Event()
      self.event_stop_stream = threading.Event()
      self.event_stopped_stream = threading.Event()
      self.event_terminate = threading.Event()
      self.event_terminated = threading.Event()

      # decoder control signals
      self.event_decoding = threading.Event()
      self.event_decoder_terminated = threading.Event()

      # display control signals
      self.lock_offscreen = threading.Semaphore()

      # export to other threads
      self.frame_count = 0
      grabber = self

      # Search for available camera
      if options.debug:
         print "searching for camera"

      search = ControlPoint()
      cameras =  search.discover(1)

      if len(cameras):
         camera = SonyAPI(QX_ADDR=cameras[0])
      else:
         print "No camera found, aborting"
         return

      # Check if we need to do 'startRecMode'
      mode = camera.getAvailableApiList()

      # Need a better method to check for the presence of a camera
      if type(mode) != dict:
         print "No camera found, aborting"
         display.terminate_clicked()
         self.event_terminated.set()
         return

      # For those cameras which need it
      if 'startRecMode' in (mode['result'])[0]:
         camera.startRecMode()
         time.sleep(5)

         # and re-read capabilities
         mode = camera.getAvailableApiList()

      if options.debug:
         print "Versions:", camera.getVersions()
         print "API List:", mode

      if 'setLiveviewFrameInfo' in (mode['result'])[0]:
         if options.info:
            camera.setLiveviewFrameInfo([{"frameInfo": True}])
         else:
            camera.setLiveviewFrameInfo([{"frameInfo": False}])

      if 'getAvailableLiveviewSize' in (mode['result'])[0]:
         if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
            incoming = camera.liveview(["L"])
         else:
            incoming = camera.liveview()
      else:
         incoming = camera.liveview()

      incoming_image = None
      frame_sequence = None
      frame_info = None
      frame_data = None

      # Ensure that we're in correct mode (movie by default)
      mode = camera.getAvailableShootMode()
      if type(mode) == dict:
         if options.still:
            if (mode['result'])[0] != 'still':
               if 'still' in (mode['result'])[1]:
                  camera.setShootMode(["still"])
                  self.photomode = True
            else:
               self.photomode = True
         else:
            if (mode['result'])[0] != 'movie':
               if 'movie' in (mode['result'])[1]:
                  camera.setShootMode(["movie"])
               else:
                  self.photomode = True

      while not self.event_terminate.isSet():
         # Handle events from the camera (record start/stop)
         if self.frame_count % 50 == 0:
            mode = camera.getEvent(["false"])
         else:
            mode = None

         if mode and type(mode) == dict:
            status = mode['result'][1]
            if self.active == False and status['cameraStatus'] == 'MovieRecording':
               self.frame_count = 0
               self.start_time = datetime.datetime.now()
               self.active = True
               if options.debug:
                  print "started capture", self.start_time
            elif self.active == True and status['cameraStatus'] == 'IDLE':
               self.active = False
               self.end_time = datetime.datetime.now()
               if options.debug:
                  elapsed = self.end_time - self.start_time
                  print "Stopped capture: frames = ", self.frame_count,
                  print ", delta = ", elapsed.seconds + (float(elapsed.microseconds) / 1000000),
                  print ", fps = ", self.frame_count / (elapsed.seconds + (float(elapsed.microseconds) / 1000000))

         # read next image
         data = incoming.read(8)
         common = common_header(data)
         data = incoming.read(128)

         if common['payload_type']==1:
            payload = payload_header(data)
            image_file = io.BytesIO(incoming.read(payload['jpeg_data_size']))
            incoming_image = Image.open(image_file)
            incoming.read(payload['padding_size'])
         elif common['payload_type']==2:
            frame_info = payload_header(data, 2)
            if frame_info['jpeg_data_size']:
               frame_sequence = common['sequence_number']
               frame_data =  incoming.read(frame_info['jpeg_data_size'])
               incoming.read(frame_info['padding_size'])

         if options.gui == True :
            # Correct display size if changed
            if incoming_image and ((incoming_image.size)[0] != display.width):
               if options.debug:
                  print "adjusted width from", display.width, "to", (incoming_image.size)[0]
               display.width = (incoming_image.size)[0]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            if incoming_image and ((incoming_image.size)[1] != display.height):
               if options.debug:
                  print "adjusted height from", display.height, "to", (incoming_image.size)[1]
               display.height = (incoming_image.size)[1]
               display.offscreen = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8,
                  display.width, display.height)

            # copy image to the display
            if incoming_image:
               image_copy = incoming_image.convert("RGB")

               # only recent frame info to image
               if frame_info and frame_sequence >= common['sequence_number']-1 \
                     and frame_info['jpeg_data_size']:
                  for x in range(frame_info['frame_count']):
                     x = x * frame_info['frame_size']
                     (left, top, right, bottom) = struct.unpack(">HHHH", frame_data[x:x+8])
                     left = left * display.width / 10000
                     top = top * display.height / 10000
                     right = right * display.width / 10000
                     bottom = bottom * display.height / 10000

                     dr = ImageDraw.Draw(image_copy)
                     dr.line((left, top, left, bottom), fill="white", width=3)
                     dr.line((right, top, right, bottom), fill="white", width=3)
                     dr.line((left, top, right, top), fill="white", width=3)
                     dr.line((left, bottom, right, bottom), fill="white", width=3)

               display.copy_to_offscreen(image_copy)

         if options.debug:
            print "Frame:", common['sequence_number'], common['time_stemp'], datetime.datetime.now()

         # count frames
         self.frame_count = self.frame_count + 1

         # handle events
         if self.event_start_stream.isSet():
            if self.photomode == True:
               camera.actTakePicture()
            else:
               camera.startMovieRec()
            self.event_start_stream.clear()

         if self.event_stop_stream.isSet():
            camera.stopMovieRec()
            self.event_stop_stream.clear()

         # give OS a breather
         #time.sleep(0.01)

      # declare that we're done...
      self.event_terminated.set()
      self.event_terminate.clear()

Example 7

View license
    def forwards(self, orm):
        rephist = dict([(t, []) for t in range(-8, 6) if t != 0])

        r_count = orm.Repute.objects.count()
        print "\nCalculating rep gain/losses history through %d records:" % r_count
        progress = ProgressBar(r_count)

        for r in orm.Repute.objects.all():
            l = rephist.get(r.reputation_type, None)
            if l is None: continue

            if (len(l) == 0) or (l[-1][1] != r.value):
                l.append((r.reputed_at, r.value))

            progress.update()

        print "\n...done\n"


        def repval_at(reptype, repdate, default):
            l = rephist.get(reptype, None)

            if l is None: return 0
            if len(l) == 0: return default

            for r in l:
                if r[0] <= repdate:
                    return r[1] or default


        q_count = orm.Question.objects.count()
        print "\nConverting %d questions:" % q_count
        progress = ProgressBar(q_count)

        for q in orm.Question.objects.all():
            n = q.node_ptr
            n.last_activity_at = q.last_activity_at
            n.last_activity_by = q.last_activity_by

            if q.accepted_answer:
                n.extra_ref = q.accepted_answer.node_ptr
                
            n.extra_count = q.view_count

            n.marked = q.closed
            n.wiki = q.wiki

            n.save()

            ask = orm.Action(
                user = n.author,
                action_date = n.added_at,
                node = n,
                action_type = "ask",
                extra = ''
            )

            ask.save()

            if n.deleted:
                action = orm.Action(
                    user = n.deleted_by,
                    node = n,
                    action_type = "delete",
                    action_date = n.deleted_at or datetime.datetime.now(),
                    extra = ''
                )

                action.save()


            if n.marked:
                action = orm.Action(
                    user = q.closed_by,
                    node = n,
                    action_type = "close",
                    extra = q.close_reason,
                    action_date = q.closed_at or datetime.datetime.now(),
                )

                action.save()

            if n.wiki:
                action = orm.Action(
                    user = n.author,
                    node = n,
                    action_type = "wikify",
                    action_date = q.wikified_at or datetime.datetime.now(),
                    extra = ''
                )

                action.save()

            progress.update()

        print "\n...done\n"

        a_count = orm.Answer.objects.count()
        print "\nConverting %d answers:" % a_count
        progress = ProgressBar(a_count)

        for a in orm.Answer.objects.all():
            n = a.node_ptr

            n.marked = a.accepted
            n.wiki = a.wiki

            n.save()

            ans = orm.Action(
                user = n.author,
                action_date = n.added_at,
                node = n,
                action_type = "answer",
                extra = ''
            )

            ans.save()

            if n.deleted:
                action = orm.Action(
                    user = n.deleted_by,
                    node = n,
                    action_type = "delete",
                    action_date = n.deleted_at or datetime.datetime.now(),
                    extra = ''
                )

                action.save()

            if a.accepted:
                action = orm.Action(
                    user = a.accepted_by,
                    node = n,
                    action_type = "acceptanswer",
                    action_date = a.accepted_at or datetime.datetime.now(),
                    extra = ''
                )

                action.save()

                if not a.wiki or a.wikified_at > action.action_date:
                    if action.user == n.author:
                        rep = orm.ActionRepute(
                            action = action,
                            user = action.user,
                            value = repval_at(GAIN_BY_ACCEPTING_ANSWER, action.action_date, 2)
                        )
                        rep.save()

                    if n.author != n.parent.author:
                        rep = orm.ActionRepute(
                            action = action,
                            user = n.author,
                            value = repval_at(GAIN_BY_ANSWER_ACCEPTED, action.action_date, 15)
                        )
                        rep.save()

            if n.wiki:
                action = orm.Action(
                    user = n.author,
                    node = n,
                    action_type = "wikify",
                    action_date = a.wikified_at or datetime.datetime.now(),
                    extra = ''
                )

                action.save()

            progress.update()

        print "\n...done\n"

        v_count = orm.Vote.objects.count()
        print "\nConverting %d votes:" % v_count
        progress = ProgressBar(v_count)

        for v in orm.Vote.objects.exclude(canceled=True):
            a = orm.Action(
                action_type = (v.vote == 1) and ((v.node.node_type == "comment") and "voteupcomment" or "voteup") or "votedown",
                user = v.user,
                node = v.node,
                action_date = v.voted_at,
                canceled = v.canceled,
                extra = ''
            )

            a.save()

            def impl(node):
                if node.node_type == "question":
                    return orm.Question.objects.get(node_ptr=node)
                else:
                    return orm.Answer.objects.get(node_ptr=node)

            if a.node.node_type in ("question", "answer") and (not a.node.wiki or impl(a.node).wikified_at > a.action_date):
                reptype, default = (v.vote == 1) and (GAIN_BY_UPVOTED, 10) or (LOST_BY_DOWNVOTED, 2)
                rep = orm.ActionRepute(
                    action = a,
                    user = a.node.author,
                    value = repval_at(reptype, a.action_date, default) or default
                )
                rep.save()

                if v.vote == -1:
                    rep = orm.ActionRepute(
                        action = a,
                        user = a.node.author,
                        value = repval_at(LOST_BY_DOWNVOTING, a.action_date, 1) or default
                    )
                    rep.save()

            progress.update()

        print "\n...done\n"

        f_count = orm.FlaggedItem.objects.count()
        print "\nConverting %d flags:" % f_count
        progress = ProgressBar(f_count)

        for f in orm.FlaggedItem.objects.all():
            a = orm.Action(
                action_type = "flag",
                user = f.user,
                node = f.node,
                action_date = f.flagged_at,
                extra = f.reason or ''
            )

            a.save()

            rep = orm.ActionRepute(
                action = a,
                user = a.node.author,
                value = repval_at(LOST_BY_FLAGGED, a.action_date, 2) or 2
            )
            rep.save()

            progress.update()

        print "\n...done\n"

        n_count = orm.Node.objects.all().count()
        print "\nChecking flag count of %d nodes:" % n_count
        progress = ProgressBar(n_count)

        for n in orm.Node.objects.all():
            flags = list(orm.Action.objects.filter(action_type="flag", node=n, canceled=False).order_by('-action_date'))

            if len(flags) >= 3:
                a = flags[2]
                rep = orm.ActionRepute(
                    action = a,
                    user = n.author,
                    value = repval_at(LOST_BY_FLAGGED_3_TIMES, a.action_date, 30)
                )
                rep.save()


            if len(flags) >= 5:
                a = flags[4]
                rep = orm.ActionRepute(
                    action = a,
                    user = n.author,
                    value = repval_at(LOST_BY_FLAGGED_5_TIMES, a.action_date, 100)
                )
                rep.save()

            progress.update()

        print "\n...done\n"

        c_count = orm.Node.objects.filter(node_type="comment").count()
        print "\nCreating %d comment actions:" % c_count
        progress = ProgressBar(c_count)

        for c in orm.Node.objects.filter(node_type="comment").all():
            a = orm.Action(
                action_type = "comment",
                user = c.author,
                node = c,
                action_date = c.added_at,
                extra = ''
            )

            a.save()

            if c.deleted:
                action = orm.Action(
                    user = c.deleted_by,
                    node = c,
                    action_type = "delete",
                    action_date = c.deleted_at or datetime.datetime.now(),
                    extra = ''
                )

                action.save()

            progress.update()

        print "\n...done\n"


        r_count = orm.NodeRevision.objects.exclude(revision=1).count()
        print "\nCreating %d edit actions:" % r_count
        progress = ProgressBar(r_count)

        for r in orm.NodeRevision.objects.exclude(revision=1):
            a = orm.Action(
                action_type = "revise",
                user = r.author,
                node = r.node,
                action_date = r.revised_at,
                extra = r.revision
            )

            a.save()
            progress.update()

        print "\n...done\n"

        u_count = orm.User.objects.all().count()
        print "\nCreating %d user join actions and reputation recalculation:" % u_count
        progress = ProgressBar(u_count)

        for u in orm.User.objects.all():
            a = orm.Action(
                user = u,
                action_date = u.date_joined,
                action_type = "userjoins",
            )

            a.save()

            rep = orm.ActionRepute(
                action = a,
                user = u,
                value = 1
            )
            rep.save()

            new_rep = orm.ActionRepute.objects.filter(user=u).aggregate(reputation=models.Sum('value'))['reputation']

            if new_rep < 0:
                new_rep = 1

            u.reputation = new_rep
            u.save()

            progress.update()

        print "\n...done\n"

Example 8

Project: crunchy-xml-decoder
Source File: manga.py
View license
def login():
    # Load Persistent Vars
    global userdata
    try:
        change_language = "0"
        userdata['username'] = 'username'
        userdata['password'] = 'password'

        if change_language == "0":
            userdata.setdefault('API_LOCALE', "enUS")
        elif change_language == "1":
            userdata['API_LOCALE'] = "enUS"
        elif change_language == "2":
            userdata['API_LOCALE'] = "enGB"
        elif change_language == "3":
            userdata['API_LOCALE'] = "jaJP"
        elif change_language == "4":
            userdata['API_LOCALE'] = "frFR"
        elif change_language == "5":
            userdata['API_LOCALE'] = "deDE"
        elif change_language == "6":
            userdata['API_LOCALE'] = "ptBR"
        elif change_language == "7":
            userdata['API_LOCALE'] = "ptPT"
        elif change_language == "8":
            userdata['API_LOCALE'] = "esLA"
        elif change_language == "9":
            userdata['API_LOCALE'] = "esES"

        if not 'device_id' in userdata:
            char_set = string.ascii_letters + string.digits
            device_id = ''.join(random.sample(char_set, 15))
            userdata["device_id"] = device_id
            print 'Crunchyroll ----> New device_id created. New device_id is: ' + str(device_id)
        userdata['API_URL'] = 'http://api-manga.crunchyroll.com'
        userdata['API_HEADERS'] = {'User-Agent': 'Manga/2.1.2.2 (iPod touch; iOS 6.1.6; Scale/2.00)',
                                   'Host': 'api-manga.crunchyroll.com', 'Accept-Encoding': 'gzip, deflate',
                                   'Content-Type': 'application/x-www-form-urlencoded', 'Connection': 'keep-alive'}
        ### Android ###
        # userdata['API_VERSION'] = "1.0"
        # userdata['API_ACCESS_TOKEN'] = 'FLpcfZH4CbW4muO'  # formerly '1M8BbXptBS4VhMP'
        # userdata['API_DEVICE_TYPE'] = 'com.crunchyroll.manga.android'  # formerly 'com.crunchyroll.manga.crunchyroid'

        ### Flash ###
        # userdata['API_VERSION'] = "1"
        # userdata['API_ACCESS_TOKEN'] # none, refactor code for this
        # userdata['API_DEVICE_TYPE'] = 'com.crunchyroll.manga.flash'

        ### iOS ###
        userdata['API_VERSION'] = "1.0"
        userdata['API_ACCESS_TOKEN'] = 'Ge9rurkgXzzmzZQ'
        userdata['API_DEVICE_TYPE'] = 'com.crunchyroll.manga.iphone'

        userdata.setdefault('premium_type', 'UNKNOWN')
        current_datetime = datetime.datetime.now(dateutil.tz.tzutc())

    except:
        current_datetime = datetime.datetime.now(dateutil.tz.tzutc())
        print "Unexpected error:", sys.exc_info()
        userdata['session_id'] = ''
        userdata['auth_expires'] = current_datetime - dateutil.relativedelta.relativedelta(hours=+24)
        userdata['premium_type'] = 'unknown'
        userdata['auth_token'] = ''
        userdata['session_expires'] = current_datetime - dateutil.relativedelta.relativedelta(hours=+24)
        print "Crunchyroll Catch"
        return False

    # Create unique device_id or receive the existing device_id
    try:
        # userdata['username'] = __settings__.getSetting("crunchy_username")
        # userdata['password'] = __settings__.getSetting("crunchy_password")
        if not 'device_id' in userdata:
            char_set = string.ascii_letters + string.digits
            device_id = ''.join(random.sample(char_set, 15))
            userdata["device_id"] = device_id
            print "Crunchyroll ----> New device_id created. New device_id is: " + str(device_id)
        userdata.setdefault('premium_type', 'UNKNOWN')
        current_datetime = datetime.datetime.now(dateutil.tz.tzutc())
    except:
        print "Unexpected error:", sys.exc_info()
        userdata['session_id'] = ''
        userdata['auth_expires'] = current_datetime - dateutil.relativedelta.relativedelta(hours=+24)
        userdata['premium_type'] = 'unknown'
        userdata['auth_token'] = ''
        userdata['session_expires'] = current_datetime - dateutil.relativedelta.relativedelta(hours=+24)
        print "Crunchyroll Catch"
        return False

    # Check to see if a session_id doesn't exist or if the current auth token is invalid
    # and if so start a new session and log it in.
    if (not 'session_id' in userdata) or (not 'auth_expires' in userdata)\
            or current_datetime > userdata['auth_expires']:
        # Start new session
        print "Crunchyroll ----> Starting new session"
        request = makeapi('cr_start_session', {'device_id': userdata["device_id"],
                                               'access_token': userdata['API_ACCESS_TOKEN']})
        # print request
        if request['error'] is False:
            # userdata['session_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
            userdata['session_id'] = request['data']['session_id']
            userdata['session_expires'] = (current_datetime + dateutil.relativedelta.relativedelta(hours=+4))
            userdata['test_session'] = current_datetime
            print "Crunchyroll ----> New session created! Session ID is: " + str(userdata['session_id'])
        elif request['error'] is True:
            print "Crunchyroll ----> Error starting new session. Error message is: " + str(request['message'])
            return False
        # Login the session we just started.
        if not userdata['username'] or not userdata['password']:
            print "Crunchyroll ----> No Username or Password set"
            print "Crunchyroll ----> NO CRUNCHYROLL ACCOUNT FOUND!"
            return False
        else:
            print "Crunchyroll ----> Logging in the new session we just created."
            char_set = string.ascii_letters + string.digits
            hash_id = ''.join(random.sample(char_set, 40))
            userdata["hash_id"] = hash_id
            request = makeapi('cr_login', {'session_id': userdata['session_id'],
                                           'password': userdata['password'],
                                           'account': userdata['username'],
                                           'hash_id': userdata["hash_id"]})
            # print request
            if request['error'] is False:
                userdata['auth_token'] = request['data']['auth']
                userdata['auth_expires'] = dateutil.parser.parse(request['data']['expires'])
                userdata['user_id'] = request['data']['user']['user_id']
                userdata['premium_type'] = 'free'\
                    if not request['data']['user']['premium'] else request['data']['user']['premium']
                print "Crunchyroll ----> Login successful."
            elif request['error'] is True:
                print "Crunchyroll ----> Error logging in new session. Error message was: " + str(request['message'])
                return False
        # Verify user is premium
        if userdata['premium_type'] in 'anime|drama|manga':
            print "Crunchyroll ----> User is a premium "+str(userdata['premium_type'])+" member."
            return True
        else:
            print "Crunchyroll ----> User is not premium. "
            return True

    # Check to see if a valid session and auth token exist and if so reinitialize a new session using the auth token.
    elif "session_id" in userdata and "auth_expires" in userdata\
            and userdata['auth_expires'] > current_datetime > userdata['session_expires']:

        # Restart new session
        print "Crunchyroll ----> Valid auth token was detected. Restarting session."
        request = makeapi('cr_start_session', {'device_id': userdata["device_id"],
                                               'access_token': userdata['API_ACCESS_TOKEN'],
                                               'auth': userdata['auth_token']})
        try:
            if request['error'] is False:
                # userdata['session_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
                userdata['session_id'] = request['data']['session_id']
                userdata['auth_expires'] = dateutil.parser.parse(request['data']['expires'])
                userdata['premium_type'] = 'free'\
                    if not request['data']['user']['premium'] else request['data']['user']['premium']
                userdata['auth_token'] = request['data']['auth']
                # 4 hours is a guess. Might be +/- 4.
                userdata['session_expires'] = (current_datetime + dateutil.relativedelta.relativedelta(hours=+4))
                userdata['test_session'] = current_datetime
                print "Crunchyroll ----> Session restart successful. New session_id is: " + str(userdata['session_id'])

                # Verify user is premium
                if userdata['premium_type'] in 'anime|drama|manga':
                    print "Crunchyroll ----> User is a premium "+str(userdata['premium_type'])+" member."
                    return True
                else:
                    print "Crunchyroll ----> User is not premium."
                    return True

            elif request['error'] is True:
                # Remove userdata so we start a new session next time around.
                del userdata['session_id']
                del userdata['auth_expires']
                del userdata['premium_type']
                del userdata['auth_token']
                del userdata['session_expires']
                print "Crunchyroll ----> Error restarting session. Error message was: " + str(request['message'])
                userdata.Save()
                return False
        except:
            userdata['session_id'] = ''
            userdata['auth_expires'] = current_datetime - dateutil.relativedelta.relativedelta(hours=+24)
            userdata['premium_type'] = 'unknown'
            userdata['auth_token'] = ''
            userdata['session_expires'] = current_datetime - dateutil.relativedelta.relativedelta(hours=+24)
            print "Crunchyroll ----> Error restarting session. Error message was: " + str(request['message'])
            userdata.Save()
            return False

    # If we got to this point that means a session exists and it's still valid, we don't need to do anything.
    elif "session_id" in userdata and current_datetime < userdata['session_expires']:
        # This section below is Stupid Slow
        # return True
        if userdata['test_session'] is None or current_datetime > userdata['test_session']:
            # Test once every 10 min
            userdata['test_session'] = (current_datetime + dateutil.relativedelta.relativedelta(minutes=+10))
            print "Crunchyroll ----> A valid session was detected. Using existing session_id of: "\
                  + str(userdata['session_id'])

    # This is here as a catch all in case something gets messed up along the way.
    # Remove userdata variables so we start a new session next time around.
    else:
        del userdata['session_id']
        del userdata['auth_expires']
        del userdata['premium_type']
        del userdata['auth_token']
        del userdata['session_expires']
        print "Crunchyroll ----> Something in the login process went wrong."
        return False

Example 9

Project: ReadableWebProxy
Source File: Engine.py
View license
	def upsertResponseLinks(self, job, plain=[], resource=[]):
		self.log.info("Updating database with response links")
		plain    = set(plain)
		resource = set(resource)

		unfiltered = len(plain)+len(resource)

		badwords = self.getBadWords(job)

		plain    = self.filterContentLinks(job,  plain,    badwords)
		resource = self.filterResourceLinks(job, resource, badwords)

		filtered = len(plain)+len(resource)
		self.log.info("Upserting %s links (%s filtered)" % (filtered, unfiltered))

		items = []
		[items.append((link, True))  for link in plain]
		[items.append((link, False)) for link in resource]

		self.log.info("Page had %s unfiltered content links, %s unfiltered resource links.", len(plain), len(resource))



		new_starturl = job.starturl,
		new_distance = job.distance+1
		new_priority = job.priority
		new_type     = job.type

		raw_cur = self.db_sess.connection().connection.cursor()
		full_printer = 100
		if self.resp_q != None:
			for link, istext in items:
				start = urllib.parse.urlsplit(link).netloc

				assert link.startswith("http")
				assert start
				new = {
						'url'             : link,
						'starturl'        : new_starturl,
						'netloc'          : start,
						'distance'        : new_distance,
						'is_text'         : istext,
						'priority'        : new_priority,
						'type'            : new_type,
						'state'           : "new",
						'addtime'         : datetime.datetime.now(),

						# Don't retrigger unless the ignore time has elaped.
						'ignoreuntiltime' : datetime.datetime.now(),
					}
				self.resp_q.put(("new_link", new))

				while self.resp_q.qsize() > 1000:
					time.sleep(0.1)
					full_printer -= 1
					if full_printer <= 0:
						self.log.error("NewLinkQueue seems to have too many URLs in it (%s). Sleeping until it drains.", self.resp_q.qsize())
						full_printer = 25

			self.log.info("Links upserted. Items in processing queue: %s", self.resp_q.qsize())
		else:

			#  Fucking huzzah for ON CONFLICT!
			cmd = """
					INSERT INTO
						web_pages
						(url, starturl, netloc, distance, is_text, priority, type, addtime, state)
					VALUES
						(%(url)s, %(starturl)s, %(netloc)s, %(distance)s, %(is_text)s, %(priority)s, %(type)s, %(addtime)s, %(state)s)
					ON CONFLICT (url) DO
						UPDATE
							SET
								state           = EXCLUDED.state,
								starturl        = EXCLUDED.starturl,
								netloc          = EXCLUDED.netloc,
								is_text         = EXCLUDED.is_text,
								distance        = LEAST(EXCLUDED.distance, web_pages.distance),
								priority        = GREATEST(EXCLUDED.priority, web_pages.priority),
								addtime         = LEAST(EXCLUDED.addtime, web_pages.addtime)
							WHERE
							(
									web_pages.ignoreuntiltime < %(ignoreuntiltime)s
								AND
									web_pages.url = EXCLUDED.url
								AND
									(web_pages.state = 'complete' OR web_pages.state = 'error')
							)
						;
					""".replace("	", " ").replace("\n", " ")

			# cmd = text("""
			# 		INSERT INTO
			# 			web_pages
			# 			(url, starturl, netloc, distance, is_text, priority, type, addtime, state)
			# 		VALUES
			# 			(:url, :starturl, :netloc, :distance, :is_text, :priority, :type, :addtime, :state)
			# 		ON CONFLICT DO NOTHING
			# 			;
			# 		""".replace("	", " ").replace("\n", " "))

			# Only commit per-URL if we're tried to do the update in batch, and failed.
			commit_each = False
			for link, istext in items:
				while 1:
					try:
						start = urllib.parse.urlsplit(link).netloc

						assert link.startswith("http")
						assert start


						# Forward-data the next walk, time, rather then using now-value for the thresh.
						data = {
							'url'             : link,
							'starturl'        : new_starturl,
							'netloc'          : start,
							'distance'        : new_distance,
							'is_text'         : istext,
							'priority'        : new_priority,
							'type'            : new_type,
							'state'           : "new",
							'addtime'         : datetime.datetime.now(),

							# Don't retrigger unless the ignore time has elaped.
							'ignoreuntiltime' : datetime.datetime.now(),
							}
						raw_cur.execute(cmd, data)
						if commit_each:
							raw_cur.execute("COMMIT;")
						break
					except psycopg2.Error:
						if commit_each is False:
							self.log.warn("psycopg2.Error - Retrying with commit each.")
						else:
							self.log.warn("psycopg2.Error - Retrying.")
							traceback.print_exc()

						raw_cur.execute("ROLLBACK;")
						commit_each = True

			raw_cur.execute("COMMIT;")

Example 10

Project: freeipa
Source File: migration.py
View license
    def migrate(self, ldap, config, ds_ldap, ds_base_dn, options):
        """
        Migrate objects from DS to LDAP.
        """
        assert isinstance(ds_base_dn, DN)
        migrated = {} # {'OBJ': ['PKEY1', 'PKEY2', ...], ...}
        failed = {} # {'OBJ': {'PKEY1': 'Failed 'cos blabla', ...}, ...}
        search_bases = self._get_search_bases(options, ds_base_dn, self.migrate_order)
        migration_start = datetime.datetime.now()

        scope = _supported_scopes[options.get('scope')]

        for ldap_obj_name in self.migrate_order:
            ldap_obj = self.api.Object[ldap_obj_name]

            template = self.migrate_objects[ldap_obj_name]['filter_template']
            oc_list = options[to_cli(self.migrate_objects[ldap_obj_name]['oc_option'])]
            search_filter = construct_filter(template, oc_list)

            exclude = options['exclude_%ss' % to_cli(ldap_obj_name)]
            context = dict(ds_ldap = ds_ldap)

            migrated[ldap_obj_name] = []
            failed[ldap_obj_name] = {}

            try:
                entries, truncated = ds_ldap.find_entries(
                    search_filter, ['*'], search_bases[ldap_obj_name],
                    scope,
                    time_limit=0, size_limit=-1,
                    search_refs=True    # migrated DS may contain search references
                )
            except errors.NotFound:
                if not options.get('continue',False):
                    raise errors.NotFound(
                        reason=_('%(container)s LDAP search did not return any result '
                                 '(search base: %(search_base)s, '
                                 'objectclass: %(objectclass)s)')
                                 % {'container': ldap_obj_name,
                                    'search_base': search_bases[ldap_obj_name],
                                    'objectclass': ', '.join(oc_list)}
                    )
                else:
                    truncated = False
                    entries = []
            if truncated:
                self.log.error(
                    '%s: %s' % (
                        ldap_obj.name, self.truncated_err_msg
                    )
                )

            blacklists = {}
            for blacklist in ('oc_blacklist', 'attr_blacklist'):
                blacklist_option = self.migrate_objects[ldap_obj_name][blacklist+'_option']
                if blacklist_option is not None:
                    blacklists[blacklist] = options.get(blacklist_option, tuple())
                else:
                    blacklists[blacklist] = tuple()

            # get default primary group for new users
            if 'def_group_dn' not in context and options.get('use_def_group'):
                def_group = config.get('ipadefaultprimarygroup')
                context['def_group_dn'] = api.Object.group.get_dn(def_group)
                try:
                    ldap.get_entry(context['def_group_dn'], ['gidnumber', 'cn'])
                except errors.NotFound:
                    error_msg = _('Default group for new users not found')
                    raise errors.NotFound(reason=error_msg)

            context['has_upg'] = ldap.has_upg()

            valid_gids = set()
            invalid_gids = set()
            migrate_cnt = 0
            context['migrate_cnt'] = 0
            for entry_attrs in entries:
                context['migrate_cnt'] = migrate_cnt
                s = datetime.datetime.now()

                ava = entry_attrs.dn[0][0]
                if ava.attr == ldap_obj.primary_key.name:
                    # In case if pkey attribute is in the migrated object DN
                    # and the original LDAP is multivalued, make sure that
                    # we pick the correct value (the unique one stored in DN)
                    pkey = ava.value.lower()
                else:
                    pkey = entry_attrs[ldap_obj.primary_key.name][0].lower()

                if pkey in exclude:
                    continue

                entry_attrs.dn = ldap_obj.get_dn(pkey)
                entry_attrs['objectclass'] = list(
                    set(
                        config.get(
                            ldap_obj.object_class_config, ldap_obj.object_class
                        ) + [o.lower() for o in entry_attrs['objectclass']]
                    )
                )
                entry_attrs[ldap_obj.primary_key.name][0] = entry_attrs[ldap_obj.primary_key.name][0].lower()

                callback = self.migrate_objects[ldap_obj_name]['pre_callback']
                if callable(callback):
                    try:
                        entry_attrs.dn = callback(
                            ldap, pkey, entry_attrs.dn, entry_attrs,
                            failed[ldap_obj_name], config, context,
                            schema=options['schema'],
                            search_bases=search_bases,
                            valid_gids=valid_gids,
                            invalid_gids=invalid_gids,
                            **blacklists
                        )
                        if not entry_attrs.dn:
                            continue
                    except errors.NotFound as e:
                        failed[ldap_obj_name][pkey] = unicode(e.reason)
                        continue

                try:
                    ldap.add_entry(entry_attrs)
                except errors.ExecutionError as e:
                    callback = self.migrate_objects[ldap_obj_name]['exc_callback']
                    if callable(callback):
                        try:
                            callback(
                                ldap, entry_attrs.dn, entry_attrs, e, options)
                        except errors.ExecutionError as e:
                            failed[ldap_obj_name][pkey] = unicode(e)
                            continue
                    else:
                        failed[ldap_obj_name][pkey] = unicode(e)
                        continue

                migrated[ldap_obj_name].append(pkey)

                callback = self.migrate_objects[ldap_obj_name]['post_callback']
                if callable(callback):
                    callback(
                        ldap, pkey, entry_attrs.dn, entry_attrs,
                        failed[ldap_obj_name], config, context)
                e = datetime.datetime.now()
                d = e - s
                total_dur = e - migration_start
                migrate_cnt += 1
                if migrate_cnt > 0 and migrate_cnt % 100 == 0:
                    api.log.info("%d %ss migrated. %s elapsed." % (migrate_cnt, ldap_obj_name, total_dur))
                api.log.debug("%d %ss migrated, duration: %s (total %s)" % (migrate_cnt, ldap_obj_name, d, total_dur))

        if 'def_group_dn' in context:
            _update_default_group(ldap, context, True)

        return (migrated, failed)

Example 11

Project: gnowsys-studio
Source File: 0001_initial.py
View license
    def forwards(self, orm):
        
        # Adding model 'Gbobject'
        db.create_table('objectapp_gbobject', (
            ('node_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['gstudio.Node'], unique=True, primary_key=True)),
            ('content', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
            ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
            ('excerpt', self.gf('django.db.models.fields.TextField')(blank=True)),
            ('tags', self.gf('tagging.fields.TagField')()),
            ('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, db_index=True)),
            ('status', self.gf('django.db.models.fields.IntegerField')(default=2)),
            ('featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('comment_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('pingback_enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
            ('last_update', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
            ('start_publication', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
            ('end_publication', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2042, 3, 15, 0, 0))),
            ('login_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('password', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
            ('template', self.gf('django.db.models.fields.CharField')(default='objectapp/gbobject_detail.html', max_length=250)),
        ))
        db.send_create_signal('objectapp', ['Gbobject'])

        # Adding M2M table for field prior_nodes on 'Gbobject'
        db.create_table('objectapp_gbobject_prior_nodes', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('from_gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False)),
            ('to_gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False))
        ))
        db.create_unique('objectapp_gbobject_prior_nodes', ['from_gbobject_id', 'to_gbobject_id'])

        # Adding M2M table for field posterior_nodes on 'Gbobject'
        db.create_table('objectapp_gbobject_posterior_nodes', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('from_gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False)),
            ('to_gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False))
        ))
        db.create_unique('objectapp_gbobject_posterior_nodes', ['from_gbobject_id', 'to_gbobject_id'])

        # Adding M2M table for field objecttypes on 'Gbobject'
        db.create_table('objectapp_gbobject_objecttypes', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False)),
            ('nodetype', models.ForeignKey(orm['gstudio.nodetype'], null=False))
        ))
        db.create_unique('objectapp_gbobject_objecttypes', ['gbobject_id', 'nodetype_id'])

        # Adding M2M table for field authors on 'Gbobject'
        db.create_table('objectapp_gbobject_authors', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False)),
            ('user', models.ForeignKey(orm['auth.user'], null=False))
        ))
        db.create_unique('objectapp_gbobject_authors', ['gbobject_id', 'user_id'])

        # Adding M2M table for field sites on 'Gbobject'
        db.create_table('objectapp_gbobject_sites', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False)),
            ('site', models.ForeignKey(orm['sites.site'], null=False))
        ))
        db.create_unique('objectapp_gbobject_sites', ['gbobject_id', 'site_id'])

        # Adding model 'Process'
        db.create_table('objectapp_process', (
            ('gbobject_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['objectapp.Gbobject'], unique=True, primary_key=True)),
        ))
        db.send_create_signal('objectapp', ['Process'])

        # Adding M2M table for field processtypes on 'Process'
        db.create_table('objectapp_process_processtypes', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('process', models.ForeignKey(orm['objectapp.process'], null=False)),
            ('processtype', models.ForeignKey(orm['gstudio.processtype'], null=False))
        ))
        db.create_unique('objectapp_process_processtypes', ['process_id', 'processtype_id'])

        # Adding M2M table for field priorstate_attribute_set on 'Process'
        db.create_table('objectapp_process_priorstate_attribute_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('process', models.ForeignKey(orm['objectapp.process'], null=False)),
            ('attribute', models.ForeignKey(orm['gstudio.attribute'], null=False))
        ))
        db.create_unique('objectapp_process_priorstate_attribute_set', ['process_id', 'attribute_id'])

        # Adding M2M table for field priorstate_relation_set on 'Process'
        db.create_table('objectapp_process_priorstate_relation_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('process', models.ForeignKey(orm['objectapp.process'], null=False)),
            ('relation', models.ForeignKey(orm['gstudio.relation'], null=False))
        ))
        db.create_unique('objectapp_process_priorstate_relation_set', ['process_id', 'relation_id'])

        # Adding M2M table for field poststate_attribute_set on 'Process'
        db.create_table('objectapp_process_poststate_attribute_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('process', models.ForeignKey(orm['objectapp.process'], null=False)),
            ('attribute', models.ForeignKey(orm['gstudio.attribute'], null=False))
        ))
        db.create_unique('objectapp_process_poststate_attribute_set', ['process_id', 'attribute_id'])

        # Adding M2M table for field poststate_relation_set on 'Process'
        db.create_table('objectapp_process_poststate_relation_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('process', models.ForeignKey(orm['objectapp.process'], null=False)),
            ('relation', models.ForeignKey(orm['gstudio.relation'], null=False))
        ))
        db.create_unique('objectapp_process_poststate_relation_set', ['process_id', 'relation_id'])

        # Adding model 'System'
        db.create_table('objectapp_system', (
            ('gbobject_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['objectapp.Gbobject'], unique=True, primary_key=True)),
        ))
        db.send_create_signal('objectapp', ['System'])

        # Adding M2M table for field systemtypes on 'System'
        db.create_table('objectapp_system_systemtypes', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('system', models.ForeignKey(orm['objectapp.system'], null=False)),
            ('systemtype', models.ForeignKey(orm['gstudio.systemtype'], null=False))
        ))
        db.create_unique('objectapp_system_systemtypes', ['system_id', 'systemtype_id'])

        # Adding M2M table for field gbobject_set on 'System'
        db.create_table('objectapp_system_gbobject_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('system', models.ForeignKey(orm['objectapp.system'], null=False)),
            ('gbobject', models.ForeignKey(orm['objectapp.gbobject'], null=False))
        ))
        db.create_unique('objectapp_system_gbobject_set', ['system_id', 'gbobject_id'])

        # Adding M2M table for field relation_set on 'System'
        db.create_table('objectapp_system_relation_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('system', models.ForeignKey(orm['objectapp.system'], null=False)),
            ('relation', models.ForeignKey(orm['gstudio.relation'], null=False))
        ))
        db.create_unique('objectapp_system_relation_set', ['system_id', 'relation_id'])

        # Adding M2M table for field attribute_set on 'System'
        db.create_table('objectapp_system_attribute_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('system', models.ForeignKey(orm['objectapp.system'], null=False)),
            ('attribute', models.ForeignKey(orm['gstudio.attribute'], null=False))
        ))
        db.create_unique('objectapp_system_attribute_set', ['system_id', 'attribute_id'])

        # Adding M2M table for field process_set on 'System'
        db.create_table('objectapp_system_process_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('system', models.ForeignKey(orm['objectapp.system'], null=False)),
            ('process', models.ForeignKey(orm['objectapp.process'], null=False))
        ))
        db.create_unique('objectapp_system_process_set', ['system_id', 'process_id'])

        # Adding M2M table for field system_set on 'System'
        db.create_table('objectapp_system_system_set', (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('from_system', models.ForeignKey(orm['objectapp.system'], null=False)),
            ('to_system', models.ForeignKey(orm['objectapp.system'], null=False))
        ))
        db.create_unique('objectapp_system_system_set', ['from_system_id', 'to_system_id'])

Example 12

Project: Icarra
Source File: autoUpdater.py
View license
	def run(self):
		global haveKeyring
		app = appGlobal.getApp()
		while self.running:
			self.freshStart = False
			now = datetime.datetime.now()

			#print "Check for new stock data at %s" % now.strftime("%Y-%m-%d %H:%M:%S")

			# Determine the cutoff date for downloading new stock data
			# If before 7pm EST then go back to 7pm EST of the previous day
			# Go back by one day until we hit a week day
			# Then zero out H:M:S
			cutoffTime = datetime.datetime.utcnow() - datetime.timedelta(hours = 5)
			if cutoffTime.hour < 19:
				cutoffTime -= datetime.timedelta(days = 1)
			while cutoffTime.weekday() >= 5:
				cutoffTime -= datetime.timedelta(days = 1)
			cutoffTime = datetime.datetime(cutoffTime.year, cutoffTime.month, cutoffTime.day, 0, 0, 0)

			# Load all portfolios
			# Build list of tickers, update stock data
			names = self.prefs.getPortfolios()
			tickers = []
			max = len(names)
			tickerPorts = {}
			ports = {}
			for name in names:
				p = portfolio.Portfolio(name)
				p.readFromDb()
				ports[name] = p
				
			# Auto update
			if app.prefs.getBackgroundImport() and haveKeyring:
				# Import once every 20 hours
				diff = now - app.prefs.getLastBackgroundImport()
				#print "time since last auto import", diff
				if diff > datetime.timedelta(hours = 20):
					print "Background import transactions at %s" % now.strftime("%Y-%m-%d %H:%M:%S")
					for name, p in ports.items():
						if not self.running:
							break

						if p.isBrokerage() and p.brokerage != "" and p.username != "" and p.account != "":
							# check for password and brokerage
							brokerage = app.plugins.getBrokerage(p.brokerage)
							if not brokerage:
								continue
							try:
								password = keyring.get_password("Icarra-ofx-" + name, p.username)
								if not password:
									continue
							except:
								haveKeyring = False
								continue
							print "import from", name
							# Get ofx data, update if not empty
							# It may be an error string, in which case it's ignored
							ofx = getOfx(p.username, password, brokerage, p.account)
							if ofx != "":
								(numNew, numOld, newTickers) = p.updateFromFile(ofx, app)
								if numNew > 0 or newTickers:
									p.portPrefs.setDirty(True)
							print "imported"

					# Update time only if not aborted
					if self.running:
						app.prefs.setLastBackgroundImport()

			# Build list of tickers
			for name, p in ports.items():
				pTickers = p.getTickers(includeAllocation = True)
				for ticker in pTickers:
					if ticker in ["__CASH__", "__COBMINED__"]:
						continue
					
					# Add to tickerPorts map
					if ticker in tickerPorts:
						tickerPorts[ticker].append(p)
					else:
						tickerPorts[ticker] = [p]
					
					# Add to list of tickers
					if not ticker in tickers:
						tickers.append(ticker)

			# Remove tickers that do not need to be updated
			for ticker in tickers[:]:
				# Check if we do not have data after the cutoffTime
				last = self.stockData.getLastDate(ticker)
				if 0 and last and last >= cutoffTime:
					tickers.remove(ticker)
					continue
				
				# Check if we tried downloading within one hour
				lastDownload = self.stockData.getLastDownload(ticker)
				if lastDownload and datetime.datetime.now() - lastDownload < datetime.timedelta(hours = 1):
					tickers.remove(ticker)
					continue
					
			portsToUpdate = {}
			self.tickersToImport = len(tickers)
			
			# Lump all tickers that are less than 2 weeks old into one request
			updateNow = []
			for ticker in tickers[:]:
				lastDownload = self.stockData.getLastDownload(ticker)
				if lastDownload and datetime.datetime.now() - lastDownload < datetime.timedelta(days = 14):
					updateNow.append(ticker)
					tickers.remove(ticker)
					continue
			
			# Download the 2 week lump 10 tickers at a time
			while updateNow and self.running and not appGlobal.getFailConnected():
				downloadPart = updateNow[0:10]
				updateNow = updateNow[10:]
				try:
					new = self.stockData.updateStocks(downloadPart)
					appGlobal.setConnected(True)
				except Exception, e:
					appGlobal.setFailConnected(True)
					break
				for ticker in downloadPart:
					self.tickerCount += 1
					if new:
						for p in tickerPorts[ticker]:
							portsToUpdate[p.name] = True

			# Update each remaining ticker while still running
			for ticker in tickers:
				if self.running and not appGlobal.getFailConnected():
					try:
						new = self.stockData.updateStocks([ticker])
						appGlobal.setConnected(True)
					except Exception, e:
						appGlobal.setFailConnected(True)
						break
					if new:
						for p in tickerPorts[ticker]:
							# Add 3 for every port
							if not p.name in portsToUpdate:
								if app.prefs.getBackgroundRebuild():
									self.tickersToImport += 3
								portsToUpdate[p.name] = True

				self.tickerCount += 1
			
			# Mark every portfolio as dirty
			for name in portsToUpdate:
				ports[name].portPrefs.setDirty(True)
			
			# Next rebuild if the user has it configured
			if app.prefs.getBackgroundRebuild():
				self.rebuilding = True
				# Rebuild benchmarks, portfolios, combined portfolios
				for name, p in ports.items():
					if not self.running:
						break
					if p.portPrefs.getDirty() and p.isBenchmark():
						try:
							#print "Rebuilding benchmark", name
							ports[name].rebuildPositionHistory(self.stockData)
						except Exception, e:
							print traceback.format_exc()
						self.tickerCount += 3
				for name, p in ports.items():
					if not self.running:
						break
					if p.portPrefs.getDirty() and p.isBrokerage():
						try:
							#print "Rebuilding brokerage", name
							ports[name].rebuildPositionHistory(self.stockData)
						except Exception, e:
							print traceback.format_exc()
						self.tickerCount += 3
				for name, p in ports.items():
					if not self.running:
						break
					if p.portPrefs.getDirty() and p.isCombined():
						try:
							#print "Rebuilding combined", name
							ports[name].rebuildPositionHistory(self.stockData)
						except Exception, e:
							print traceback.format_exc()
						self.tickerCount += 3
				self.rebuilding = False
			
			# Close all opened portfolios
			for name, p in ports.items():
				p.close()
			
			now = datetime.datetime.now()
			#print "Finished checking for new stock data at %s" % now.strftime("%Y-%m-%d %H:%M:%S")

			# Sleep for up to one hour unless we are notified to wake up
			# Do not sleep if asked to do a fresh start
			if self.running and not self.freshStart:
				self.sleepCond.acquire()
				self.sleeping = True
				self.tickerCount = self.tickersToImport # Percent done = 100%
				self.sleepCond.wait(60 * 60)
				self.sleeping = False
				self.tickerCount = 0
				self.tickersToImport = 1
				self.sleepCond.release()

		self.finished = True

Example 13

Project: quantdsl
Source File: services.py
View license
def dsl_eval(dsl_source, filename='<unknown>', is_parallel=None, dsl_classes=None, compile_kwds=None,
             evaluation_kwds=None, price_process_name=None, is_multiprocessing=False, pool_size=0, is_verbose=False,
             is_show_source=False, **extra_evaluation_kwds):
    """
    Returns the result of evaluating a compiled module (an expression, or a user defined function).

    An expression (with optional function defs) will evaluate to a simple value.

    A function def will evaluate to a DSL expression, will may then be evaluated (more than one
    function def without an expression is an error).
    """
    if price_process_name is None:
        price_process_name = DEFAULT_PRICE_PROCESS_NAME

    if evaluation_kwds is None:
        evaluation_kwds = DslNamespace()
    assert isinstance(evaluation_kwds, dict)
    evaluation_kwds.update(extra_evaluation_kwds)

    if is_show_source:
        print_("Reading DSL source:")
        print_()
        print_('"""')
        print_(dsl_source.strip())
        print_('"""')
        print_()

    if is_verbose:
        print_("Compiling DSL source, please wait...")
        print_()
    compile_start_time = datetime.datetime.now()

    # Compile the source into a primitive DSL expression, with optional dependency graph.
    dsl_expr = dsl_compile(dsl_source, filename=filename, is_parallel=is_parallel, dsl_classes=dsl_classes,
                           compile_kwds=compile_kwds)

    # Measure the compile_dsl_module time.
    compile_time_delta = datetime.datetime.now() - compile_start_time

    # Check the result of the compilation.
    # Todo: This feels unnecessary?
    if is_parallel:
        assert isinstance(dsl_expr, DependencyGraph), type(dsl_expr)
    else:
        assert isinstance(dsl_expr, DslExpression), type(dsl_expr)

    if is_verbose:
        if isinstance(dsl_expr, DependencyGraph):

            print_("Compiled DSL source into %d partial expressions (root ID: %s)." % (
                len(dsl_expr.stubbed_calls), dsl_expr.root_stub_id))
            print_()

        print_("Duration of compilation: %s" % compile_time_delta)
        print_()

        if isinstance(dsl_expr, DependencyGraph):
            if is_show_source:
                print_("Expression stack:")
                for stubbed_exprData in dsl_expr.stubbed_calls:
                    print_("  " + str(stubbed_exprData[0]) + ": " + str(stubbed_exprData[1]))
                print_()

    # If the expression has any stochastic elements, the evaluation kwds must have an 'observation_date' (datetime).
    if dsl_expr.has_instances(dsl_type=StochasticObject):
        observation_date = evaluation_kwds['observation_date']
        assert isinstance(observation_date, datetime.date)

        if is_verbose:
            print_("Observation time: %s" % observation_date)
            print_()

        # Avoid any confusion with the internal 'present_time' variable.
        if 'present_time' in evaluation_kwds:
            msg = ("Don't set present_time here, set observation_date instead. "
                   "Hint: Adjust effective present time with Fixing or Wait elements.")
            raise DslError(msg)

        # Initialise present_time as observation_date.
        evaluation_kwds['present_time'] = observation_date

        # If the expression has any Market elements, a market simulation is required
        if dsl_expr.has_instances(dsl_type=Market):

            # If a market simulation is required, evaluation kwds must have 'path_count' (integer).
            if 'path_count' not in evaluation_kwds:
                evaluation_kwds['path_count'] = DEFAULT_PATH_COUNT
            path_count = evaluation_kwds['path_count']
            assert isinstance(path_count, int)

            # If a market simulation is required, evaluation_kwds must have 'market_calibration' (integer).
            market_calibration = evaluation_kwds['market_calibration']
            assert isinstance(market_calibration, dict)

            # If a market simulation is required, generate the simulated prices using the price process.
            if not 'all_market_prices' in evaluation_kwds:

                if is_verbose:
                    print_("Price process: %s" % price_process_name)
                    print_()

                price_process = get_price_process(price_process_name)

                if is_verbose:
                    print_("Path count: %d" % path_count)
                    print_()

                if is_verbose:
                    print_("Finding all Market names and Fixing dates...")
                    print_()

                # Extract market names from the expression.
                # Todo: Avoid doing this on the dependency graph, when all the Market elements must be in the original.
                market_names = find_market_names(dsl_expr)

                # Extract fixing dates from the expression.
                # Todo: Perhaps collect the fixing dates?
                fixing_dates = list_fixing_dates(dsl_expr)

                if is_verbose:
                    print_("Simulating future prices for Market%s '%s' from observation time %s through fixing dates: %s." % (
                        '' if len(market_names) == 1 else 's',
                        ", ".join(market_names),
                        "'%04d-%02d-%02d'" % (observation_date.year, observation_date.month, observation_date.day),
                        # Todo: Only print first and last few, if there are loads.
                        ", ".join(["'%04d-%02d-%02d'" % (d.year, d.month, d.day) for d in fixing_dates[:8]]) + \
                        (", [...]" if len(fixing_dates) > 9 else '') + \
                        ((", '%04d-%02d-%02d'" % (fixing_dates[-1].year, fixing_dates[-1].month, fixing_dates[-1].day)) if len(fixing_dates) > 8 else '')
                    ))
                    print_()

                # Simulate the future prices.
                all_market_prices = price_process.simulate_future_prices(market_names, fixing_dates, observation_date, path_count, market_calibration)

                # Add future price simulation to evaluation_kwds.
                evaluation_kwds['all_market_prices'] = all_market_prices

    # Initialise the evaluation timer variable (needed by showProgress thread).
    evalStartTime = None

    if is_parallel:
        if is_verbose:

            len_stubbed_exprs = len(dsl_expr.stubbed_calls)
            lenLeafIds = len(dsl_expr.leaf_ids)

            msg = "Evaluating %d expressions (%d %s) with " % (len_stubbed_exprs, lenLeafIds, 'leaf' if lenLeafIds == 1 else 'leaves')
            if is_multiprocessing and pool_size:
                msg += "a multiprocessing pool of %s workers" % pool_size
            else:
                msg += "a single thread"
            msg += ", please wait..."

            print_(msg)
            print_()

            # Define showProgress() thread.
            def showProgress(stop):
                progress = 0
                movingRates = []
                while progress < 100 and not stop.is_set():
                    time.sleep(0.3)
                    if evalStartTime is None:
                        continue
                    # Avoid race condition.
                    if not hasattr(dsl_expr, 'runner') or not hasattr(dsl_expr.runner, 'resultIds'):
                        continue
                    if stop.is_set():
                        break

                    try:
                        lenResults = len(dsl_expr.runner.resultIds)
                    except IOError:
                         break
                    resultsTime = datetime.datetime.now()
                    movingRates.append((lenResults, resultsTime))
                    if len(movingRates) >= 15:
                        movingRates.pop(0)
                    if len(movingRates) > 1:
                        firstLenResults, firstTimeResults = movingRates[0]
                        lastLenResults, lastTimeResults = movingRates[-1]
                        lenDelta = lastLenResults - firstLenResults
                        resultsTimeDelta = lastTimeResults - firstTimeResults
                        timeDeltaSeconds = resultsTimeDelta.seconds + resultsTimeDelta.microseconds * 0.000001
                        rateStr = "%.2f expr/s" % (lenDelta / timeDeltaSeconds)
                    else:
                        rateStr = ''
                    progress = 100.0 * lenResults / len_stubbed_exprs
                    sys.stdout.write("\rProgress: %01.2f%% (%s/%s) %s " % (progress, lenResults, len_stubbed_exprs, rateStr))
                    sys.stdout.flush()
                sys.stdout.write("\r")
                sys.stdout.flush()
            stop = threading.Event()
            thread = threading.Thread(target=showProgress, args=(stop,))

            # Start showProgress() thread.
            thread.start()

    # Start timing the evaluation.
    evalStartTime = datetime.datetime.now()
    try:
        # Evaluate the primitive DSL expression.
        if is_parallel:
            if is_multiprocessing:
                dependency_graph_runner_class = MultiProcessingDependencyGraphRunner
            else:
                dependency_graph_runner_class = SingleThreadedDependencyGraphRunner
            value = dsl_expr.evaluate(dependency_graph_runner_class=dependency_graph_runner_class, pool_size=pool_size, **evaluation_kwds)
        else:
            value = dsl_expr.evaluate(**evaluation_kwds)
    except:
        if is_parallel:
            if is_verbose:
                if thread.isAlive():
                    # print "Thread is alive..."
                    stop.set()
                    # print "Waiting to join with thread..."
                    thread.join(timeout=1)
                    # print "Joined with thread..."
        raise

    # Stop timing the evaluation.
    evalTimeDelta = datetime.datetime.now() - evalStartTime

    if isinstance(dsl_expr, DependencyGraph):
        if is_verbose:
            # Join with showProgress thread.
            thread.join(timeout=3)

    if is_verbose:
        timeDeltaSeconds = evalTimeDelta.seconds + evalTimeDelta.microseconds * 0.000001
        if is_parallel:
            len_stubbed_exprs = len(dsl_expr.stubbed_calls)
            rateStr = "(%.2f expr/s)" % (len_stubbed_exprs / timeDeltaSeconds)
        else:
            rateStr = ''
        print_("Duration of evaluation: %s    %s" % (evalTimeDelta, rateStr))
        print_()

    # Prepare the result.
    import scipy
    if isinstance(value, scipy.ndarray):
        mean = value.mean()
        stderr = value.std() / math.sqrt(path_count)
        return {
            'mean': mean,
            'stderr': stderr
        }
    else:
        return value

Example 14

Project: quantdsl
Source File: services.py
View license
def dsl_eval(dsl_source, filename='<unknown>', is_parallel=None, dsl_classes=None, compile_kwds=None,
             evaluation_kwds=None, price_process_name=None, is_multiprocessing=False, pool_size=0, is_verbose=False,
             is_show_source=False, **extra_evaluation_kwds):
    """
    Returns the result of evaluating a compiled module (an expression, or a user defined function).

    An expression (with optional function defs) will evaluate to a simple value.

    A function def will evaluate to a DSL expression, will may then be evaluated (more than one
    function def without an expression is an error).
    """
    if price_process_name is None:
        price_process_name = DEFAULT_PRICE_PROCESS_NAME

    if evaluation_kwds is None:
        evaluation_kwds = DslNamespace()
    assert isinstance(evaluation_kwds, dict)
    evaluation_kwds.update(extra_evaluation_kwds)

    if is_show_source:
        print_("Reading DSL source:")
        print_()
        print_('"""')
        print_(dsl_source.strip())
        print_('"""')
        print_()

    if is_verbose:
        print_("Compiling DSL source, please wait...")
        print_()
    compile_start_time = datetime.datetime.now()

    # Compile the source into a primitive DSL expression, with optional dependency graph.
    dsl_expr = dsl_compile(dsl_source, filename=filename, is_parallel=is_parallel, dsl_classes=dsl_classes,
                           compile_kwds=compile_kwds)

    # Measure the compile_dsl_module time.
    compile_time_delta = datetime.datetime.now() - compile_start_time

    # Check the result of the compilation.
    # Todo: This feels unnecessary?
    if is_parallel:
        assert isinstance(dsl_expr, DependencyGraph), type(dsl_expr)
    else:
        assert isinstance(dsl_expr, DslExpression), type(dsl_expr)

    if is_verbose:
        if isinstance(dsl_expr, DependencyGraph):

            print_("Compiled DSL source into %d partial expressions (root ID: %s)." % (
                len(dsl_expr.stubbed_calls), dsl_expr.root_stub_id))
            print_()

        print_("Duration of compilation: %s" % compile_time_delta)
        print_()

        if isinstance(dsl_expr, DependencyGraph):
            if is_show_source:
                print_("Expression stack:")
                for stubbed_exprData in dsl_expr.stubbed_calls:
                    print_("  " + str(stubbed_exprData[0]) + ": " + str(stubbed_exprData[1]))
                print_()

    # If the expression has any stochastic elements, the evaluation kwds must have an 'observation_date' (datetime).
    if dsl_expr.has_instances(dsl_type=StochasticObject):
        observation_date = evaluation_kwds['observation_date']
        assert isinstance(observation_date, datetime.date)

        if is_verbose:
            print_("Observation time: %s" % observation_date)
            print_()

        # Avoid any confusion with the internal 'present_time' variable.
        if 'present_time' in evaluation_kwds:
            msg = ("Don't set present_time here, set observation_date instead. "
                   "Hint: Adjust effective present time with Fixing or Wait elements.")
            raise DslError(msg)

        # Initialise present_time as observation_date.
        evaluation_kwds['present_time'] = observation_date

        # If the expression has any Market elements, a market simulation is required
        if dsl_expr.has_instances(dsl_type=Market):

            # If a market simulation is required, evaluation kwds must have 'path_count' (integer).
            if 'path_count' not in evaluation_kwds:
                evaluation_kwds['path_count'] = DEFAULT_PATH_COUNT
            path_count = evaluation_kwds['path_count']
            assert isinstance(path_count, int)

            # If a market simulation is required, evaluation_kwds must have 'market_calibration' (integer).
            market_calibration = evaluation_kwds['market_calibration']
            assert isinstance(market_calibration, dict)

            # If a market simulation is required, generate the simulated prices using the price process.
            if not 'all_market_prices' in evaluation_kwds:

                if is_verbose:
                    print_("Price process: %s" % price_process_name)
                    print_()

                price_process = get_price_process(price_process_name)

                if is_verbose:
                    print_("Path count: %d" % path_count)
                    print_()

                if is_verbose:
                    print_("Finding all Market names and Fixing dates...")
                    print_()

                # Extract market names from the expression.
                # Todo: Avoid doing this on the dependency graph, when all the Market elements must be in the original.
                market_names = find_market_names(dsl_expr)

                # Extract fixing dates from the expression.
                # Todo: Perhaps collect the fixing dates?
                fixing_dates = list_fixing_dates(dsl_expr)

                if is_verbose:
                    print_("Simulating future prices for Market%s '%s' from observation time %s through fixing dates: %s." % (
                        '' if len(market_names) == 1 else 's',
                        ", ".join(market_names),
                        "'%04d-%02d-%02d'" % (observation_date.year, observation_date.month, observation_date.day),
                        # Todo: Only print first and last few, if there are loads.
                        ", ".join(["'%04d-%02d-%02d'" % (d.year, d.month, d.day) for d in fixing_dates[:8]]) + \
                        (", [...]" if len(fixing_dates) > 9 else '') + \
                        ((", '%04d-%02d-%02d'" % (fixing_dates[-1].year, fixing_dates[-1].month, fixing_dates[-1].day)) if len(fixing_dates) > 8 else '')
                    ))
                    print_()

                # Simulate the future prices.
                all_market_prices = price_process.simulate_future_prices(market_names, fixing_dates, observation_date, path_count, market_calibration)

                # Add future price simulation to evaluation_kwds.
                evaluation_kwds['all_market_prices'] = all_market_prices

    # Initialise the evaluation timer variable (needed by showProgress thread).
    evalStartTime = None

    if is_parallel:
        if is_verbose:

            len_stubbed_exprs = len(dsl_expr.stubbed_calls)
            lenLeafIds = len(dsl_expr.leaf_ids)

            msg = "Evaluating %d expressions (%d %s) with " % (len_stubbed_exprs, lenLeafIds, 'leaf' if lenLeafIds == 1 else 'leaves')
            if is_multiprocessing and pool_size:
                msg += "a multiprocessing pool of %s workers" % pool_size
            else:
                msg += "a single thread"
            msg += ", please wait..."

            print_(msg)
            print_()

            # Define showProgress() thread.
            def showProgress(stop):
                progress = 0
                movingRates = []
                while progress < 100 and not stop.is_set():
                    time.sleep(0.3)
                    if evalStartTime is None:
                        continue
                    # Avoid race condition.
                    if not hasattr(dsl_expr, 'runner') or not hasattr(dsl_expr.runner, 'resultIds'):
                        continue
                    if stop.is_set():
                        break

                    try:
                        lenResults = len(dsl_expr.runner.resultIds)
                    except IOError:
                         break
                    resultsTime = datetime.datetime.now()
                    movingRates.append((lenResults, resultsTime))
                    if len(movingRates) >= 15:
                        movingRates.pop(0)
                    if len(movingRates) > 1:
                        firstLenResults, firstTimeResults = movingRates[0]
                        lastLenResults, lastTimeResults = movingRates[-1]
                        lenDelta = lastLenResults - firstLenResults
                        resultsTimeDelta = lastTimeResults - firstTimeResults
                        timeDeltaSeconds = resultsTimeDelta.seconds + resultsTimeDelta.microseconds * 0.000001
                        rateStr = "%.2f expr/s" % (lenDelta / timeDeltaSeconds)
                    else:
                        rateStr = ''
                    progress = 100.0 * lenResults / len_stubbed_exprs
                    sys.stdout.write("\rProgress: %01.2f%% (%s/%s) %s " % (progress, lenResults, len_stubbed_exprs, rateStr))
                    sys.stdout.flush()
                sys.stdout.write("\r")
                sys.stdout.flush()
            stop = threading.Event()
            thread = threading.Thread(target=showProgress, args=(stop,))

            # Start showProgress() thread.
            thread.start()

    # Start timing the evaluation.
    evalStartTime = datetime.datetime.now()
    try:
        # Evaluate the primitive DSL expression.
        if is_parallel:
            if is_multiprocessing:
                dependency_graph_runner_class = MultiProcessingDependencyGraphRunner
            else:
                dependency_graph_runner_class = SingleThreadedDependencyGraphRunner
            value = dsl_expr.evaluate(dependency_graph_runner_class=dependency_graph_runner_class, pool_size=pool_size, **evaluation_kwds)
        else:
            value = dsl_expr.evaluate(**evaluation_kwds)
    except:
        if is_parallel:
            if is_verbose:
                if thread.isAlive():
                    # print "Thread is alive..."
                    stop.set()
                    # print "Waiting to join with thread..."
                    thread.join(timeout=1)
                    # print "Joined with thread..."
        raise

    # Stop timing the evaluation.
    evalTimeDelta = datetime.datetime.now() - evalStartTime

    if isinstance(dsl_expr, DependencyGraph):
        if is_verbose:
            # Join with showProgress thread.
            thread.join(timeout=3)

    if is_verbose:
        timeDeltaSeconds = evalTimeDelta.seconds + evalTimeDelta.microseconds * 0.000001
        if is_parallel:
            len_stubbed_exprs = len(dsl_expr.stubbed_calls)
            rateStr = "(%.2f expr/s)" % (len_stubbed_exprs / timeDeltaSeconds)
        else:
            rateStr = ''
        print_("Duration of evaluation: %s    %s" % (evalTimeDelta, rateStr))
        print_()

    # Prepare the result.
    import scipy
    if isinstance(value, scipy.ndarray):
        mean = value.mean()
        stderr = value.std() / math.sqrt(path_count)
        return {
            'mean': mean,
            'stderr': stderr
        }
    else:
        return value

Example 15

Project: openjumo
Source File: views.py
View license
@PostOnly
def update_org(request):
    try:
        org = json.loads(request.POST.get('org', {}))
        org_id = int(org['id'])
    except AttributeError:
        json_error(INVALID_ORG_ID_ERROR)

    str_fields = [
                    'name', 'email', 'phone_number', 'url', 'img_url',
                    'revenue', 'size', 'vision_statement', 'mission_statement',
                    'blog_url', 'twitter_id', 'flickr_id', 'vimeo_id', 'youtube_id',
                 ]

    int_fields = [
                    'year_founded', 'facebook_id', # changed 'year' to 'year_founded' here -b
                 ]

    bool_fields = [
                    'fb_fetch_enabled', 'twitter_fetch_enabled',
                  ]

    original = Org.objects.get(id = org_id)


    if 'parent_orgs' in org:
        if org['parent_orgs']:
            original.parent_org = Org.objects.get(id = org['parent_orgs'][0])

    if 'ein' in org and org['ein'] != original.ein:
        original.donation_enabled = False
        if org['ein'] == '':
            original.ein = ''
        else:
            original.ein = org['ein']
            try:
                original.donation_enable = False
              #  if nfg_api.npo_is_donation_enabled(org['ein']):
              #      original.donation_enabled = True
            except Exception, inst:
                logging.exception("Error checking donation status with nfs")


    if 'child_orgs' in org:
        org['child_orgs'] = [int(o) for o in org['child_orgs']]
        for o in org['child_orgs']:
            if o not in [l.id for l in original.parentorg.all()]:
                original.parentorg.add(Org.objects.get(id = o))
        for o in original.parentorg.all():
            if o.id not in org['child_orgs']:
                original.parentorg.remove(Org.objects.get(id = o.id))

    # this probably needs to change down the road because i can't imagine this is very sustainable.
    for i in org['tags']['context']:
        iss = Issue.objects.get(name__iexact = i['name'])

        try:
            r = OrgIssueRelationship.objects.get(issue = iss, org = original)
            r.rank = i['tag_rank']
            r.date_updated = datetime.datetime.now()
            r.save()
        except:
            r = OrgIssueRelationship()
            r.issue = iss
            r.org = original
            r.date_created = datetime.datetime.now()
            r.date_updated = datetime.datetime.now()
            r.rank = i['tag_rank']
            r.save()

    '''
    {u'locality': u'New York', u'region': u'Brooklyn', u'longitude': u'-73.948883', u'country_name': u'United States', u'postal_code': u'', u'address': u'', u'latitude': u'40.655071', u'type': u'County', u'raw_geodata': {u'lang': u'en-US', u'popRank': u'0', u'name': u'Brooklyn', u'woeid': u'12589335', u'uri': u'http://where.yahooapis.com/v1/place/12589335', u'admin1': {u'content': u'New York', u'code': u'US-NY', u'type': u'State'}, u'admin3': None, u'admin2': {u'content': u'Brooklyn', u'code': u'', u'type': u'County'}, u'centroid': {u'latitude': u'40.655071', u'longitude': u'-73.948883'}, u'locality1': {u'content': u'New York', u'type': u'Town'}, u'locality2': None, u'country': {u'content': u'United States', u'code': u'US', u'type': u'Country'}, u'boundingBox': {u'northEast': {u'latitude': u'40.739471', u'longitude': u'-73.833359'}, u'southWest': {u'latitude': u'40.570679', u'longitude': u'-74.042068'}}, u'areaRank': u'5', u'postal': None, u'placeTypeName': {u'content': u'County', u'code': u'9'}}}
    '''

    if 'location' in org and org['location']:
        loc = org['location']
        raw_geodata = json.dumps(loc["raw_geodata"]) if isinstance(loc.get("raw_geodata"), dict) else loc.get("raw_geodata")
        #Until we fix duplicate locations we have to do the following...lame.
        _locs = Location.objects.filter(raw_geodata = raw_geodata,
            longitude = loc.get('longitude', None),
            latitude = loc.get('latitude', None),
            address = loc.get('address', ' '),
            region = loc.get('region', ' '),
            locality = loc.get('locality', ' '),
            postal_code = loc.get('postal_code', ' '),
            country_name = loc.get('country_name', ' '))

        if len(_locs) > 0:
            _loc = _locs[0]
        else:
            _loc = Location(raw_geodata = raw_geodata,
                longitude = loc.get('longitude', None),
                latitude = loc.get('latitude', None),
                address = loc.get('address', ' '),
                region = loc.get('region', ' '),
                locality = loc.get('locality', ' '),
                postal_code = loc.get('postal_code', ' '),
                country_name = loc.get('country_name', ' '),)
            _loc.save()
        original.location = _loc
    else:
        original.location = None

    if 'working_locations' in org:
        for loc in org['working_locations']:
            raw_geodata = json.dumps(loc["raw_geodata"]) if isinstance(loc.get("raw_geodata"), dict) else loc.get("raw_geodata")
            if raw_geodata not in [l.raw_geodata for l in original.working_locations.all()]:
                locs = Location.objects.filter(raw_geodata = raw_geodata,
                        longitude = loc.get('longitude', None),
                        latitude = loc.get('latitude', None),
                        address = loc.get('address', ' '),
                        region = loc.get('region', ' '),
                        locality = loc.get('locality', ' '),
                        postal_code = loc.get('postal_code', ' '),
                        country_name = loc.get('country_name', ' '),
                        )

                if len(locs) > 0:
                    new_l = locs[0]
                else:
                    new_l = Location(raw_geodata = raw_geodata,
                        longitude = loc.get('longitude', None),
                        latitude = loc.get('latitude', None),
                        address = loc.get('address', ' '),
                        region = loc.get('region', ' '),
                        locality = loc.get('locality', ' '),
                        postal_code = loc.get('postal_code', ' '),
                        country_name = loc.get('country_name', ' '),
                        )
                    new_l.save()


                #Until we clean up the location DB we can't use get.
                """
                new_l, created = Location.objects.get_or_create(
                        raw_geodata = json.dumps(loc["raw_geodata"]) if isinstance(loc.get("raw_geodata"), dict) else loc.get("raw_geodata"),
                        longitude = loc.get('longitude', None),
                        latitude = loc.get('latitude', None),
                        address = loc.get('address', ' '),
                        region = loc.get('region', ' '),
                        locality = loc.get('locality', ' '),
                        postal_code = loc.get('postal_code', ' '),
                        country_name = loc.get('country_name', ' '),
                        )
                """
                original.working_locations.add(new_l)
                original.save()

        raw_geos = []
        for new_loc in org['working_locations']:
            raw_geodata = json.dumps(new_loc["raw_geodata"]) if isinstance(new_loc.get("raw_geodata"), dict) else new_loc.get("raw_geodata")
            raw_geos.append(raw_geodata)

        for old_loc in original.working_locations.all():
            if old_loc.raw_geodata not in raw_geos:
                original.working_locations.remove(old_loc)



    for issue in original.issues.all():
        if issue.name.lower() not in [l['name'].lower() for l in org['tags']['context']]:
            r = OrgIssueRelationship.objects.get(issue = issue, org = original)
            r.delete()

    for f in str_fields:
        if f in org and org[f] != getattr(original, f):
            setattr(original, f, smart_str(org[f], encoding='utf-8'))

    for f in int_fields:
        if f in org and org[f] != getattr(original, f):
            if org[f]:
                setattr(original, f, int(org[f]))
            else:
                setattr(original, f, None)

    for f in bool_fields:
        if f in org and org[f] != getattr(original, f):
            setattr(original, f, org[f])

    if 'handle' in org and org['handle'] != original.handle:
        _handle = original.handle
        original.handle = create_handle(org['handle'])
        cache.bust_on_handle(original, _handle, False)

    if 'methods' in org:
        for method in org['methods']:
            if method not in [l.method for l in original.method_set.all()]:
                m = Method()
                m.method = method
                m.date_created = datetime.datetime.now()
                m.date_updated = datetime.datetime.now()
                m.org = original
                m.save()

        for method in original.method_set.all():
            if method.method not in org['methods']:
                method.delete()

    if 'accomplishments' in org:
        for acc in org['accomplishments']:
            if acc['text'] not in [l.description for l in original.accomplishment_set.all()]:
                a = Accomplishment()
                a.org = original
                a.header = acc.get('year', '')
                a.description = acc.get('text', '')
                a.save()

        for acc in original.accomplishment_set.all():
            acc_header = acc.header
            acc_description = acc.description
            delete = True
            for new_acc in org["accomplishments"]:
                if new_acc["year"] == acc_header and new_acc["text"] == acc_description:
                    delete = False
            if delete:
                acc.delete()

    original.save()
    try:
        cache.bust_on_handle(original, original.handle)
    except:
        pass
    return json_response({'result' : original.handle})

Example 16

Project: openjumo
Source File: views.py
View license
@PostOnly
def update_org(request):
    try:
        org = json.loads(request.POST.get('org', {}))
        org_id = int(org['id'])
    except AttributeError:
        json_error(INVALID_ORG_ID_ERROR)

    str_fields = [
                    'name', 'email', 'phone_number', 'url', 'img_url',
                    'revenue', 'size', 'vision_statement', 'mission_statement',
                    'blog_url', 'twitter_id', 'flickr_id', 'vimeo_id', 'youtube_id',
                 ]

    int_fields = [
                    'year_founded', 'facebook_id', # changed 'year' to 'year_founded' here -b
                 ]

    bool_fields = [
                    'fb_fetch_enabled', 'twitter_fetch_enabled',
                  ]

    original = Org.objects.get(id = org_id)


    if 'parent_orgs' in org:
        if org['parent_orgs']:
            original.parent_org = Org.objects.get(id = org['parent_orgs'][0])

    if 'ein' in org and org['ein'] != original.ein:
        original.donation_enabled = False
        if org['ein'] == '':
            original.ein = ''
        else:
            original.ein = org['ein']
            try:
                original.donation_enable = False
              #  if nfg_api.npo_is_donation_enabled(org['ein']):
              #      original.donation_enabled = True
            except Exception, inst:
                logging.exception("Error checking donation status with nfs")


    if 'child_orgs' in org:
        org['child_orgs'] = [int(o) for o in org['child_orgs']]
        for o in org['child_orgs']:
            if o not in [l.id for l in original.parentorg.all()]:
                original.parentorg.add(Org.objects.get(id = o))
        for o in original.parentorg.all():
            if o.id not in org['child_orgs']:
                original.parentorg.remove(Org.objects.get(id = o.id))

    # this probably needs to change down the road because i can't imagine this is very sustainable.
    for i in org['tags']['context']:
        iss = Issue.objects.get(name__iexact = i['name'])

        try:
            r = OrgIssueRelationship.objects.get(issue = iss, org = original)
            r.rank = i['tag_rank']
            r.date_updated = datetime.datetime.now()
            r.save()
        except:
            r = OrgIssueRelationship()
            r.issue = iss
            r.org = original
            r.date_created = datetime.datetime.now()
            r.date_updated = datetime.datetime.now()
            r.rank = i['tag_rank']
            r.save()

    '''
    {u'locality': u'New York', u'region': u'Brooklyn', u'longitude': u'-73.948883', u'country_name': u'United States', u'postal_code': u'', u'address': u'', u'latitude': u'40.655071', u'type': u'County', u'raw_geodata': {u'lang': u'en-US', u'popRank': u'0', u'name': u'Brooklyn', u'woeid': u'12589335', u'uri': u'http://where.yahooapis.com/v1/place/12589335', u'admin1': {u'content': u'New York', u'code': u'US-NY', u'type': u'State'}, u'admin3': None, u'admin2': {u'content': u'Brooklyn', u'code': u'', u'type': u'County'}, u'centroid': {u'latitude': u'40.655071', u'longitude': u'-73.948883'}, u'locality1': {u'content': u'New York', u'type': u'Town'}, u'locality2': None, u'country': {u'content': u'United States', u'code': u'US', u'type': u'Country'}, u'boundingBox': {u'northEast': {u'latitude': u'40.739471', u'longitude': u'-73.833359'}, u'southWest': {u'latitude': u'40.570679', u'longitude': u'-74.042068'}}, u'areaRank': u'5', u'postal': None, u'placeTypeName': {u'content': u'County', u'code': u'9'}}}
    '''

    if 'location' in org and org['location']:
        loc = org['location']
        raw_geodata = json.dumps(loc["raw_geodata"]) if isinstance(loc.get("raw_geodata"), dict) else loc.get("raw_geodata")
        #Until we fix duplicate locations we have to do the following...lame.
        _locs = Location.objects.filter(raw_geodata = raw_geodata,
            longitude = loc.get('longitude', None),
            latitude = loc.get('latitude', None),
            address = loc.get('address', ' '),
            region = loc.get('region', ' '),
            locality = loc.get('locality', ' '),
            postal_code = loc.get('postal_code', ' '),
            country_name = loc.get('country_name', ' '))

        if len(_locs) > 0:
            _loc = _locs[0]
        else:
            _loc = Location(raw_geodata = raw_geodata,
                longitude = loc.get('longitude', None),
                latitude = loc.get('latitude', None),
                address = loc.get('address', ' '),
                region = loc.get('region', ' '),
                locality = loc.get('locality', ' '),
                postal_code = loc.get('postal_code', ' '),
                country_name = loc.get('country_name', ' '),)
            _loc.save()
        original.location = _loc
    else:
        original.location = None

    if 'working_locations' in org:
        for loc in org['working_locations']:
            raw_geodata = json.dumps(loc["raw_geodata"]) if isinstance(loc.get("raw_geodata"), dict) else loc.get("raw_geodata")
            if raw_geodata not in [l.raw_geodata for l in original.working_locations.all()]:
                locs = Location.objects.filter(raw_geodata = raw_geodata,
                        longitude = loc.get('longitude', None),
                        latitude = loc.get('latitude', None),
                        address = loc.get('address', ' '),
                        region = loc.get('region', ' '),
                        locality = loc.get('locality', ' '),
                        postal_code = loc.get('postal_code', ' '),
                        country_name = loc.get('country_name', ' '),
                        )

                if len(locs) > 0:
                    new_l = locs[0]
                else:
                    new_l = Location(raw_geodata = raw_geodata,
                        longitude = loc.get('longitude', None),
                        latitude = loc.get('latitude', None),
                        address = loc.get('address', ' '),
                        region = loc.get('region', ' '),
                        locality = loc.get('locality', ' '),
                        postal_code = loc.get('postal_code', ' '),
                        country_name = loc.get('country_name', ' '),
                        )
                    new_l.save()


                #Until we clean up the location DB we can't use get.
                """
                new_l, created = Location.objects.get_or_create(
                        raw_geodata = json.dumps(loc["raw_geodata"]) if isinstance(loc.get("raw_geodata"), dict) else loc.get("raw_geodata"),
                        longitude = loc.get('longitude', None),
                        latitude = loc.get('latitude', None),
                        address = loc.get('address', ' '),
                        region = loc.get('region', ' '),
                        locality = loc.get('locality', ' '),
                        postal_code = loc.get('postal_code', ' '),
                        country_name = loc.get('country_name', ' '),
                        )
                """
                original.working_locations.add(new_l)
                original.save()

        raw_geos = []
        for new_loc in org['working_locations']:
            raw_geodata = json.dumps(new_loc["raw_geodata"]) if isinstance(new_loc.get("raw_geodata"), dict) else new_loc.get("raw_geodata")
            raw_geos.append(raw_geodata)

        for old_loc in original.working_locations.all():
            if old_loc.raw_geodata not in raw_geos:
                original.working_locations.remove(old_loc)



    for issue in original.issues.all():
        if issue.name.lower() not in [l['name'].lower() for l in org['tags']['context']]:
            r = OrgIssueRelationship.objects.get(issue = issue, org = original)
            r.delete()

    for f in str_fields:
        if f in org and org[f] != getattr(original, f):
            setattr(original, f, smart_str(org[f], encoding='utf-8'))

    for f in int_fields:
        if f in org and org[f] != getattr(original, f):
            if org[f]:
                setattr(original, f, int(org[f]))
            else:
                setattr(original, f, None)

    for f in bool_fields:
        if f in org and org[f] != getattr(original, f):
            setattr(original, f, org[f])

    if 'handle' in org and org['handle'] != original.handle:
        _handle = original.handle
        original.handle = create_handle(org['handle'])
        cache.bust_on_handle(original, _handle, False)

    if 'methods' in org:
        for method in org['methods']:
            if method not in [l.method for l in original.method_set.all()]:
                m = Method()
                m.method = method
                m.date_created = datetime.datetime.now()
                m.date_updated = datetime.datetime.now()
                m.org = original
                m.save()

        for method in original.method_set.all():
            if method.method not in org['methods']:
                method.delete()

    if 'accomplishments' in org:
        for acc in org['accomplishments']:
            if acc['text'] not in [l.description for l in original.accomplishment_set.all()]:
                a = Accomplishment()
                a.org = original
                a.header = acc.get('year', '')
                a.description = acc.get('text', '')
                a.save()

        for acc in original.accomplishment_set.all():
            acc_header = acc.header
            acc_description = acc.description
            delete = True
            for new_acc in org["accomplishments"]:
                if new_acc["year"] == acc_header and new_acc["text"] == acc_description:
                    delete = False
            if delete:
                acc.delete()

    original.save()
    try:
        cache.bust_on_handle(original, original.handle)
    except:
        pass
    return json_response({'result' : original.handle})

Example 17

Project: Mycodo
Source File: controller_relay.py
View license
    def relay_on_off(self, relay_id, state,
                     duration=0.0, trigger_conditionals=True,
                     datetime_now=datetime.datetime.now()):
        """
        Turn a relay on or off
        The GPIO may be either HIGH or LOW to activate a relay. This trigger
        state will be referenced to determine if the GPIO needs to be high or
        low to turn the relay on or off.

        Conditionals will be checked for each action requested of a relay, and
        if true, those conditional actions will be executed. For example:
            'If relay 1 turns on, turn relay 3 off'

        :param relay_id: Unique ID for relay
        :type relay_id: str
        :param state: What state is desired? 'on' or 'off'
        :type state: str
        :param duration: If state is 'on', a duration can be set to turn the relay off after
        :type duration: float
        :param trigger_conditionals: Whether to trigger condionals to act or not
        :type trigger_conditionals: bool
        """
        # Check if relay exists
        if relay_id not in self.relay_id:
            self.logger.warning("[Relay] Cannot turn {} Relay with ID {}. It "
                                "doesn't exist".format(state, relay_id))
            return 1
        if state == 'on':
            if not self.relay_pin[relay_id]:
                self.logger.warning("[Relay] Invalid pin for relay "
                                    "{} ({}).".format(self.relay_id[relay_id],
                                                      self.relay_name[relay_id]))
                return 1

            current_amps = self.current_amp_load()
            if current_amps+self.relay_amps[relay_id] > MAX_AMPS:
                self.logger.warning("[Relay] Cannot turn relay {} "
                                    "({}) On. If this relay turns on, "
                                    "there will be {} amps being drawn, "
                                    "which exceeds the maximum set draw of {}"
                                    " amps.".format(self.relay_id[relay_id],
                                                    self.relay_name[relay_id],
                                                    current_amps,
                                                    MAX_AMPS))
                return 1

            else:
                if duration:
                    time_now = datetime.datetime.now()
                    if self.is_on(relay_id) and self.relay_on_duration[relay_id]:
                        if self.relay_on_until[relay_id] > time_now:
                            remaining_time = (self.relay_on_until[relay_id]-time_now).seconds
                        else:
                            remaining_time = 0
                        time_on = self.relay_last_duration[relay_id] - remaining_time
                        self.logger.debug("[Relay] Relay {} ({}) is already "
                                            "on for a duration of {:.1f} seconds (with "
                                            "{:.1f} seconds remaining). Recording the "
                                            "amount of time the relay has been on ({:.1f} "
                                            "sec) and updating the on duration to {:.1f} "
                                            "seconds.".format(self.relay_id[relay_id],
                                                              self.relay_name[relay_id],
                                                              self.relay_last_duration[relay_id],
                                                              remaining_time,
                                                              time_on,
                                                              duration))
                        if time_on > 0:
                            # Write the duration the relay was ON to the
                            # database at the timestamp it turned ON
                            duration = float(time_on)
                            timestamp = datetime.datetime.utcnow()-datetime.timedelta(seconds=duration)
                            write_db = threading.Thread(
                                target=write_influxdb_value,
                                args=(self.logger, INFLUXDB_HOST,
                                      INFLUXDB_PORT, INFLUXDB_USER,
                                      INFLUXDB_PASSWORD, INFLUXDB_DATABASE,
                                      'relay', relay_id, 'duration_sec',
                                      duration, timestamp,))
                            write_db.start()

                        self.relay_on_until[relay_id] = time_now+datetime.timedelta(seconds=duration)
                        self.relay_last_duration[relay_id] = duration
                        return 0
                    elif self.is_on(relay_id) and not self.relay_on_duration:
                        self.relay_on_duration[relay_id] = True
                        self.relay_on_until[relay_id] = time_now+datetime.timedelta(seconds=duration)
                        self.relay_last_duration[relay_id] = duration

                        self.logger.debug("[Relay] Relay {} ({}) is currently"
                                          " on without a duration. Turning "
                                          "into a duration  of {:.1f} "
                                          "seconds.".format(self.relay_id[relay_id],
                                                            self.relay_name[relay_id],
                                                            duration))
                        return 0
                    else:
                        self.relay_on_until[relay_id] = time_now+datetime.timedelta(seconds=duration)
                        self.relay_on_duration[relay_id] = True
                        self.relay_last_duration[relay_id] = duration
                        self.logger.debug("[Relay] Relay {} ({}) on for {:.1f} "
                                          "seconds.".format(self.relay_id[relay_id],
                                                             self.relay_name[relay_id],
                                                             duration))
                        GPIO.output(self.relay_pin[relay_id], self.relay_trigger[relay_id])

                else:
                    if self.is_on(relay_id):
                        self.logger.warning("[Relay] Relay {} ({}) is already on.".format(
                                self.relay_id[relay_id],
                                self.relay_name[relay_id]))
                        return 1
                    else:
                        # Record the time the relay was turned on in order to
                        # calculate and log the total duration is was on, when
                        # it evetually turns off.
                        self.relay_time_turned_on[relay_id] = datetime.datetime.now()
                        self.logger.debug("[Relay] Relay {rid} ({rname}) ON "
                            "at {timeon}.".format(rid=self.relay_id[relay_id],
                                                  rname=self.relay_name[relay_id],
                                                  timeon=self.relay_time_turned_on[relay_id]))
                        GPIO.output(self.relay_pin[relay_id],
                                    self.relay_trigger[relay_id])

        else:
            # Turn relay off
            if self._is_setup(self.relay_pin[relay_id]) and self.relay_pin[relay_id]:  # if pin not 0
                self.relay_on_duration[relay_id] = False
                self.relay_on_until[relay_id] = datetime.datetime.now()
                GPIO.output(self.relay_pin[relay_id], not self.relay_trigger[relay_id])
                self.logger.debug("[Relay] Relay {} ({}) turned off.".format(
                        self.relay_id[relay_id],
                        self.relay_name[relay_id]))

                if self.relay_time_turned_on[relay_id] != None:
                    # Write the duration the relay was ON to the database
                    # at the timestamp it turned ON
                    duration = (datetime.datetime.now()-self.relay_time_turned_on[relay_id]).total_seconds()
                    timestamp = datetime.datetime.utcnow()-datetime.timedelta(seconds=duration)
                    write_db = threading.Thread(
                        target=write_influxdb_value,
                        args=(self.logger, INFLUXDB_HOST,
                              INFLUXDB_PORT, INFLUXDB_USER,
                              INFLUXDB_PASSWORD, INFLUXDB_DATABASE,
                              'relay', relay_id, 'duration_sec',
                              duration, timestamp,))
                    write_db.start()
                    self.relay_time_turned_on[relay_id] = None

        if trigger_conditionals:
            if state == 'on' and duration != 0:
                self.checkConditionals(relay_id, 0)
            self.checkConditionals(relay_id, duration)

Example 18

Project: galaxy
Source File: 0001_initial.py
View license
    def forwards(self, orm):
        # Adding model 'Category'
        db.create_table(u'main_category', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
            ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
            ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True)),
            ('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=512, db_index=True)),
        ))
        db.send_create_signal(u'main', ['Category'])

        # Adding model 'Platform'
        db.create_table(u'main_platform', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
            ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
            ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True)),
            ('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('name', self.gf('django.db.models.fields.CharField')(max_length=512, db_index=True)),
            ('release', self.gf('django.db.models.fields.CharField')(max_length=50)),
        ))
        db.send_create_signal(u'main', ['Platform'])

        # Adding model 'Role'
        db.create_table(u'main_role', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
            ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
            ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True)),
            ('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('name', self.gf('django.db.models.fields.CharField')(max_length=512, db_index=True)),
            ('owner', self.gf('django.db.models.fields.related.ForeignKey')(related_name='roles', to=orm['accounts.CustomUser'])),
            ('github_user', self.gf('django.db.models.fields.CharField')(max_length=256)),
            ('github_repo', self.gf('django.db.models.fields.CharField')(max_length=256)),
            ('min_ansible_version', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
            ('issue_tracker_url', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
            ('license', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
            ('company', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
            ('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
            ('is_valid', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('featured', self.gf('django.db.models.fields.BooleanField')(default=False)),
            ('bayesian_score', self.gf('django.db.models.fields.FloatField')(default=0.0, db_index=True)),
        ))
        db.send_create_signal(u'main', ['Role'])

        # Adding unique constraint on 'Role', fields ['owner', 'name']
        db.create_unique(u'main_role', ['owner_id', 'name'])

        # Adding M2M table for field authors on 'Role'
        m2m_table_name = db.shorten_name(u'main_role_authors')
        db.create_table(m2m_table_name, (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('role', models.ForeignKey(orm[u'main.role'], null=False)),
            ('customuser', models.ForeignKey(orm[u'accounts.customuser'], null=False))
        ))
        db.create_unique(m2m_table_name, ['role_id', 'customuser_id'])

        # Adding M2M table for field dependencies on 'Role'
        m2m_table_name = db.shorten_name(u'main_role_dependencies')
        db.create_table(m2m_table_name, (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('from_role', models.ForeignKey(orm[u'main.role'], null=False)),
            ('to_role', models.ForeignKey(orm[u'main.role'], null=False))
        ))
        db.create_unique(m2m_table_name, ['from_role_id', 'to_role_id'])

        # Adding M2M table for field categories on 'Role'
        m2m_table_name = db.shorten_name(u'main_role_categories')
        db.create_table(m2m_table_name, (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('role', models.ForeignKey(orm[u'main.role'], null=False)),
            ('category', models.ForeignKey(orm[u'main.category'], null=False))
        ))
        db.create_unique(m2m_table_name, ['role_id', 'category_id'])

        # Adding model 'RoleVersion'
        db.create_table(u'main_roleversion', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
            ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
            ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True)),
            ('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('name', self.gf('django.db.models.fields.CharField')(max_length=512, db_index=True)),
            ('role', self.gf('django.db.models.fields.related.ForeignKey')(related_name='versions', to=orm['main.Role'])),
            ('release_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
            ('loose_version', self.gf('galaxy.main.fields.LooseVersionField')(db_index=True)),
        ))
        db.send_create_signal(u'main', ['RoleVersion'])

        # Adding M2M table for field platforms on 'RoleVersion'
        m2m_table_name = db.shorten_name(u'main_roleversion_platforms')
        db.create_table(m2m_table_name, (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('roleversion', models.ForeignKey(orm[u'main.roleversion'], null=False)),
            ('platform', models.ForeignKey(orm[u'main.platform'], null=False))
        ))
        db.create_unique(m2m_table_name, ['roleversion_id', 'platform_id'])

        # Adding model 'RoleImport'
        db.create_table(u'main_roleimport', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
            ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
            ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True)),
            ('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('role', self.gf('django.db.models.fields.related.ForeignKey')(related_name='imports', to=orm['main.Role'])),
            ('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, db_index=True, blank=True)),
            ('released', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
        ))
        db.send_create_signal(u'main', ['RoleImport'])

        # Adding model 'RoleRating'
        db.create_table(u'main_rolerating', (
            (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
            ('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
            ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
            ('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, auto_now=True, blank=True)),
            ('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
            ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=512, db_index=True)),
            ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['accounts.CustomUser'])),
            ('role', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['main.Role'])),
            ('ease_of_use', self.gf('django.db.models.fields.IntegerField')(default=5)),
            ('documentation', self.gf('django.db.models.fields.IntegerField')(default=5)),
            ('best_practices', self.gf('django.db.models.fields.IntegerField')(default=5)),
            ('repeatability', self.gf('django.db.models.fields.IntegerField')(default=5)),
            ('platform_support', self.gf('django.db.models.fields.IntegerField')(default=5)),
            ('overall', self.gf('django.db.models.fields.IntegerField')(default=5)),
            ('comment', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
            ('created_on', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
            ('last_edited', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, auto_now_add=True, db_index=True, blank=True)),
            ('score', self.gf('django.db.models.fields.IntegerField')(default=0, db_index=True)),
        ))
        db.send_create_signal(u'main', ['RoleRating'])

        # Adding M2M table for field up_votes on 'RoleRating'
        m2m_table_name = db.shorten_name(u'main_rolerating_up_votes')
        db.create_table(m2m_table_name, (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('rolerating', models.ForeignKey(orm[u'main.rolerating'], null=False)),
            ('customuser', models.ForeignKey(orm[u'accounts.customuser'], null=False))
        ))
        db.create_unique(m2m_table_name, ['rolerating_id', 'customuser_id'])

        # Adding M2M table for field down_votes on 'RoleRating'
        m2m_table_name = db.shorten_name(u'main_rolerating_down_votes')
        db.create_table(m2m_table_name, (
            ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
            ('rolerating', models.ForeignKey(orm[u'main.rolerating'], null=False)),
            ('customuser', models.ForeignKey(orm[u'accounts.customuser'], null=False))
        ))
        db.create_unique(m2m_table_name, ['rolerating_id', 'customuser_id'])

Example 19

Project: Cerebrum
Source File: perception.py
View license
	@staticmethod
	def start(video_input, vision_perception_stimulated):

		if video_input == "0": # If the video_input is None, then we are reading from webcam
			camera = cv2.VideoCapture(0)
			time.sleep(0.25)
		else: # Otherwise, we are reading from a video file
			time.sleep(0.25)
			camera = cv2.VideoCapture(video_input)

		referenceFrame = None # Initialize the reference frame in the video stream

		starting_time = None
		memory_data_thresh = []
		memory_data_frameDeltaColored = []

		(grabbed, first_frame) = camera.read() # Grab the first frame

		height, width = first_frame.shape[:2] # Get video height and width  from first frame(size)
		#if not height == 720 or not width == 1280:
		if float(width) / float(height) != float(16) / float(9):
			if video_input == "0":
				# There is a STUPIDTY in here
				pass
			else:
				raise ValueError('Aspect ratio of input stream must be [16:9]')
			#warnings.warn("Aspect ratio of input stream must be [16:9]")

		frame_counter = 1 # Define frame counter variable
		motion_detected = 0 # Delta situation checking variable
		delta_value_stack = [] # List of delta values
		non_stationary_camera = 0
		motion_counter = 0
		nonzero_toolow = 0

		beginning_of_stream = datetime.datetime.now()
		while True: # Loop over the frames of the video

			(grabbed, frame) = camera.read() # Grab the current frame and initialize the occupied/unoccupied
			if not grabbed: # If the frame could not be grabbed, then we have reached the end of the video
				break
			frame_counter += 1 # Increase frame counter's value

			if video_input == "0":
				# If we are capturing from camera fuck Time Correction, there is also a STUPIDTY in here
				pass
			else:
				# -------------------- TIME CORRECTION --------------------
				time_delta = datetime.datetime.now() - beginning_of_stream
				current_time_of_realworld = time_delta.seconds + time_delta.microseconds / float(1000000)
				current_time_of_stream = frame_counter / camera.get(cv2.cv.CV_CAP_PROP_FPS)
				diff_of_time = current_time_of_stream - current_time_of_realworld
				if abs(diff_of_time) > (1 / camera.get(cv2.cv.CV_CAP_PROP_FPS)):
					if diff_of_time > 0:
						time.sleep(1 / camera.get(cv2.cv.CV_CAP_PROP_FPS))
					else:
						(grabbed, frame) = camera.read() # Grab the current frame and initialize the occupied/unoccupied
						if not grabbed: # If the frame could not be grabbed, then we have reached the end of the video
							break
						frame_counter += 1 # Increase frame counter's value
						continue
				# -------------------- TIME CORRECTION --------------------

			delta_value = 0 # Delta Value for storing max continuous contour area for current frame

			frame = imutils.resize(frame, height=TARGET_HEIGHT) # Resize frame to 360p. Alternative resizing method:
			height, width = frame.shape[:2] # Get video height and width  from first frame(size)

			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert frame to grayscale

			gray = cv2.bilateralFilter(gray,9,75,75) # Blur current frame with Bilateral Filter for noise reduction

			if referenceFrame is None: # If Reference Frame is None, initialize it
				referenceFrame = gray
				continue

			frameDelta = cv2.absdiff(referenceFrame, gray) # Compute the absolute difference between the current frame and reference frame
			thresh = cv2.threshold(frameDelta, 12, 255, cv2.THRESH_BINARY)[1] # Apply OpenCV's threshold function to get binary frame

			thresh = cv2.dilate(thresh, None, iterations=1) # Dilate the thresholded image to fill in holes
			frameDeltaColored = cv2.bitwise_and(frame,frame, mask= thresh) # Bitwise and - to get delta frame

			# Find contours on thresholded image
			(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
				cv2.CHAIN_APPROX_SIMPLE)

			contour_area_stack = [] # List of contour areas's values

			# Loop over the contours
			if cnts:
				for c in cnts: # Contour in Contours
					contour_area_stack.append(cv2.contourArea(c)) # Calculate contour area and append to contour stack
					if cv2.contourArea(c) > MIN_AREA: # If contour area greater than min area
						(x, y, w, h) = cv2.boundingRect(c) # Compute the bounding box for this contour
						cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # Draw it on the frame
				delta_value = max(contour_area_stack) # Assign max contour area to delta value

				if delta_value > MIN_AREA: # If max contour area (delta value) greater than min area
					motion_detected = 1 # Initialize delta situation

				if delta_value > (height * width * float(NON_STATIONARY_PERCENTAGE) / float(100)): # If delta value is too much
					non_stationary_camera = 1
					status_text = "WARNING: NON-STATIONARY CAMERA"
					frameDeltaColored = numpy.zeros_like(frame)
				else:
					non_stationary_camera = 0

				if cv2.countNonZero(thresh) < (height * width * float(NON_ZERO_PERCENTAGE) / float(100)): # If Non Zero count is too low
					nonzero_toolow = 1
					status_text = "WARNING: NON-ZERO TOO LOW"
					frameDeltaColored = numpy.zeros_like(frame)
				else:
					nonzero_toolow = 0

			if motion_detected: # If we are on delta situation

				if starting_time is None:
					starting_time = datetime.datetime.now() # Starting time of the memory
					vision_perception_stimulated.value = 1 # Vision perception stimulated

				if random.randint(0,2) == 1: # IMPORTANT
					memory_data_thresh.append(thresh.tostring())
					memory_data_frameDeltaColored.append(frameDeltaColored.tostring())
					#print type(memory_data_thresh[0])

				if not non_stationary_camera:
					status_text = "MOTION DETECTED"
				delta_value_stack.append(delta_value) # Append max contour area (delta value) to delta value stack

				if len(delta_value_stack) >= STABILIZATION_DETECTION: # If length of delta value stack is greater than or equal to STABILIZATION_DETECTION constant
					delta_value_stack.pop(0) # Pop first element of delta value stack
					# If minimum delta value is greater than (mean of last 5 frame - minimum area / 2) and maximum delta value is less than (mean of last 5 frame + minimum area / 2)
					if min(delta_value_stack) > (numpy.mean(delta_value_stack) - MIN_AREA / 2) and max(delta_value_stack) < (numpy.mean(delta_value_stack) + MIN_AREA / 2):
						ending_time = datetime.datetime.now() # Ending time of the memory
						vision_perception_stimulated.value = 0 # Vision perception NOT stimulated

						if memory_data_thresh and memory_data_frameDeltaColored:
							process4 = multiprocessing.Process(target=VisionMemoryUtil.add_memory, args=(memory_data_thresh, memory_data_frameDeltaColored, starting_time, ending_time)) # Define write memory process
							process4.start() # Start write memory process
						memory_data_thresh = []
						memory_data_frameDeltaColored = []
						starting_time = None

						motion_detected = 0 # Then video STABILIZED
						delta_value_stack = [] # Empty delta value stack
						referenceFrame = None  # Clear reference frame
						if not non_stationary_camera and not nonzero_toolow:
							motion_counter += 1
			else:
				if not non_stationary_camera and not nonzero_toolow:
					status_text = "MOTION UNDETECTED"
					frameDeltaColored = numpy.zeros_like(frame)

			# Draw the text and timestamp on the frame
			cv2.putText(frame, "Diff    : {}".format(delta_value), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
			cv2.putText(frame, "Thresh : {}".format(MIN_AREA), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
			cv2.putText(frame, "Frame : {}".format(frame_counter), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
			cv2.putText(frame, "Status  : {}".format(status_text), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
			cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)

			# Show the frames and record if the user presses ESC or q
			cv2.imshow("Original Frame", frame)
			cv2.moveWindow("Original Frame",50 * SCREEN_WIDTH / 1920,100 * SCREEN_HEIGHT / 1080)
			cv2.imshow("Frame Threshhold", thresh)
			cv2.moveWindow("Frame Threshhold",50 * SCREEN_WIDTH / 1920,550 * SCREEN_HEIGHT / 1080)
			cv2.imshow("Frame Delta", frameDelta)
			cv2.moveWindow("Frame Delta",1200 * SCREEN_WIDTH / 1920,550 * SCREEN_HEIGHT / 1080)
			cv2.imshow("Frame Delta Colored", frameDeltaColored)
			cv2.moveWindow("Frame Delta Colored",1200 * SCREEN_WIDTH / 1920,100 * SCREEN_HEIGHT / 1080)
			key = cv2.waitKey(1) & 0xFF

			# if the `ESC` or `q` key is pressed, break the loop
			if key == ord("q") or key == ord("\x1b"):
				os.system("killall python") # Temporary line for practicality in DEVELOPMENT
				break

		cv2.destroyAllWindows() # Close any open windows
		camera.release() # Release the capture device

Example 20

Project: Cerebrum
Source File: perception.py
View license
	@staticmethod
	def start(video_input, vision_perception_stimulated):

		if video_input == "0": # If the video_input is None, then we are reading from webcam
			camera = cv2.VideoCapture(0)
			time.sleep(0.25)
		else: # Otherwise, we are reading from a video file
			time.sleep(0.25)
			camera = cv2.VideoCapture(video_input)

		referenceFrame = None # Initialize the reference frame in the video stream

		starting_time = None
		memory_data_thresh = []
		memory_data_frameDeltaColored = []

		(grabbed, first_frame) = camera.read() # Grab the first frame

		height, width = first_frame.shape[:2] # Get video height and width  from first frame(size)
		#if not height == 720 or not width == 1280:
		if float(width) / float(height) != float(16) / float(9):
			if video_input == "0":
				# There is a STUPIDTY in here
				pass
			else:
				raise ValueError('Aspect ratio of input stream must be [16:9]')
			#warnings.warn("Aspect ratio of input stream must be [16:9]")

		frame_counter = 1 # Define frame counter variable
		motion_detected = 0 # Delta situation checking variable
		delta_value_stack = [] # List of delta values
		non_stationary_camera = 0
		motion_counter = 0
		nonzero_toolow = 0

		beginning_of_stream = datetime.datetime.now()
		while True: # Loop over the frames of the video

			(grabbed, frame) = camera.read() # Grab the current frame and initialize the occupied/unoccupied
			if not grabbed: # If the frame could not be grabbed, then we have reached the end of the video
				break
			frame_counter += 1 # Increase frame counter's value

			if video_input == "0":
				# If we are capturing from camera fuck Time Correction, there is also a STUPIDTY in here
				pass
			else:
				# -------------------- TIME CORRECTION --------------------
				time_delta = datetime.datetime.now() - beginning_of_stream
				current_time_of_realworld = time_delta.seconds + time_delta.microseconds / float(1000000)
				current_time_of_stream = frame_counter / camera.get(cv2.cv.CV_CAP_PROP_FPS)
				diff_of_time = current_time_of_stream - current_time_of_realworld
				if abs(diff_of_time) > (1 / camera.get(cv2.cv.CV_CAP_PROP_FPS)):
					if diff_of_time > 0:
						time.sleep(1 / camera.get(cv2.cv.CV_CAP_PROP_FPS))
					else:
						(grabbed, frame) = camera.read() # Grab the current frame and initialize the occupied/unoccupied
						if not grabbed: # If the frame could not be grabbed, then we have reached the end of the video
							break
						frame_counter += 1 # Increase frame counter's value
						continue
				# -------------------- TIME CORRECTION --------------------

			delta_value = 0 # Delta Value for storing max continuous contour area for current frame

			frame = imutils.resize(frame, height=TARGET_HEIGHT) # Resize frame to 360p. Alternative resizing method:
			height, width = frame.shape[:2] # Get video height and width  from first frame(size)

			gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert frame to grayscale

			gray = cv2.bilateralFilter(gray,9,75,75) # Blur current frame with Bilateral Filter for noise reduction

			if referenceFrame is None: # If Reference Frame is None, initialize it
				referenceFrame = gray
				continue

			frameDelta = cv2.absdiff(referenceFrame, gray) # Compute the absolute difference between the current frame and reference frame
			thresh = cv2.threshold(frameDelta, 12, 255, cv2.THRESH_BINARY)[1] # Apply OpenCV's threshold function to get binary frame

			thresh = cv2.dilate(thresh, None, iterations=1) # Dilate the thresholded image to fill in holes
			frameDeltaColored = cv2.bitwise_and(frame,frame, mask= thresh) # Bitwise and - to get delta frame

			# Find contours on thresholded image
			(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
				cv2.CHAIN_APPROX_SIMPLE)

			contour_area_stack = [] # List of contour areas's values

			# Loop over the contours
			if cnts:
				for c in cnts: # Contour in Contours
					contour_area_stack.append(cv2.contourArea(c)) # Calculate contour area and append to contour stack
					if cv2.contourArea(c) > MIN_AREA: # If contour area greater than min area
						(x, y, w, h) = cv2.boundingRect(c) # Compute the bounding box for this contour
						cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # Draw it on the frame
				delta_value = max(contour_area_stack) # Assign max contour area to delta value

				if delta_value > MIN_AREA: # If max contour area (delta value) greater than min area
					motion_detected = 1 # Initialize delta situation

				if delta_value > (height * width * float(NON_STATIONARY_PERCENTAGE) / float(100)): # If delta value is too much
					non_stationary_camera = 1
					status_text = "WARNING: NON-STATIONARY CAMERA"
					frameDeltaColored = numpy.zeros_like(frame)
				else:
					non_stationary_camera = 0

				if cv2.countNonZero(thresh) < (height * width * float(NON_ZERO_PERCENTAGE) / float(100)): # If Non Zero count is too low
					nonzero_toolow = 1
					status_text = "WARNING: NON-ZERO TOO LOW"
					frameDeltaColored = numpy.zeros_like(frame)
				else:
					nonzero_toolow = 0

			if motion_detected: # If we are on delta situation

				if starting_time is None:
					starting_time = datetime.datetime.now() # Starting time of the memory
					vision_perception_stimulated.value = 1 # Vision perception stimulated

				if random.randint(0,2) == 1: # IMPORTANT
					memory_data_thresh.append(thresh.tostring())
					memory_data_frameDeltaColored.append(frameDeltaColored.tostring())
					#print type(memory_data_thresh[0])

				if not non_stationary_camera:
					status_text = "MOTION DETECTED"
				delta_value_stack.append(delta_value) # Append max contour area (delta value) to delta value stack

				if len(delta_value_stack) >= STABILIZATION_DETECTION: # If length of delta value stack is greater than or equal to STABILIZATION_DETECTION constant
					delta_value_stack.pop(0) # Pop first element of delta value stack
					# If minimum delta value is greater than (mean of last 5 frame - minimum area / 2) and maximum delta value is less than (mean of last 5 frame + minimum area / 2)
					if min(delta_value_stack) > (numpy.mean(delta_value_stack) - MIN_AREA / 2) and max(delta_value_stack) < (numpy.mean(delta_value_stack) + MIN_AREA / 2):
						ending_time = datetime.datetime.now() # Ending time of the memory
						vision_perception_stimulated.value = 0 # Vision perception NOT stimulated

						if memory_data_thresh and memory_data_frameDeltaColored:
							process4 = multiprocessing.Process(target=VisionMemoryUtil.add_memory, args=(memory_data_thresh, memory_data_frameDeltaColored, starting_time, ending_time)) # Define write memory process
							process4.start() # Start write memory process
						memory_data_thresh = []
						memory_data_frameDeltaColored = []
						starting_time = None

						motion_detected = 0 # Then video STABILIZED
						delta_value_stack = [] # Empty delta value stack
						referenceFrame = None  # Clear reference frame
						if not non_stationary_camera and not nonzero_toolow:
							motion_counter += 1
			else:
				if not non_stationary_camera and not nonzero_toolow:
					status_text = "MOTION UNDETECTED"
					frameDeltaColored = numpy.zeros_like(frame)

			# Draw the text and timestamp on the frame
			cv2.putText(frame, "Diff    : {}".format(delta_value), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
			cv2.putText(frame, "Thresh : {}".format(MIN_AREA), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
			cv2.putText(frame, "Frame : {}".format(frame_counter), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
			cv2.putText(frame, "Status  : {}".format(status_text), (10, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
			cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)

			# Show the frames and record if the user presses ESC or q
			cv2.imshow("Original Frame", frame)
			cv2.moveWindow("Original Frame",50 * SCREEN_WIDTH / 1920,100 * SCREEN_HEIGHT / 1080)
			cv2.imshow("Frame Threshhold", thresh)
			cv2.moveWindow("Frame Threshhold",50 * SCREEN_WIDTH / 1920,550 * SCREEN_HEIGHT / 1080)
			cv2.imshow("Frame Delta", frameDelta)
			cv2.moveWindow("Frame Delta",1200 * SCREEN_WIDTH / 1920,550 * SCREEN_HEIGHT / 1080)
			cv2.imshow("Frame Delta Colored", frameDeltaColored)
			cv2.moveWindow("Frame Delta Colored",1200 * SCREEN_WIDTH / 1920,100 * SCREEN_HEIGHT / 1080)
			key = cv2.waitKey(1) & 0xFF

			# if the `ESC` or `q` key is pressed, break the loop
			if key == ord("q") or key == ord("\x1b"):
				os.system("killall python") # Temporary line for practicality in DEVELOPMENT
				break

		cv2.destroyAllWindows() # Close any open windows
		camera.release() # Release the capture device

Example 21

Project: edx2bigquery
Source File: run_external.py
View license
def run_external_script(extcmd, param, ecinfo, course_id):
    """
    Run external script on specified course.

    extcmd = string specifying external command to run
    param = command line parameters, including extparam
    ecinfo = external command info from edx2bigquery_config
    course_id = course_id to run external command on
    """
    # use default for base set of parameters
    ed_name = ecinfo.get('default_parameters', 'DEFAULT')
    settings = ecinfo.get(ed_name, {})
    settings.update(ecinfo.get(extcmd))
    # print "settings: ", json.dumps(settings, indent=4)
    
    print settings['name']
    
    if param.verbose:
        print settings.get('description', '')

    cidns = course_id.replace('/', '__')
    cidns_nodots = course_id.replace('/', '__').replace('.', '_').replace('-', '_')

    mypath = path(os.path.realpath(__file__)).dirname()
    edx2bigquery_context = {'lib': mypath / "lib",
                            'bin': mypath / "bin",
                        }

    the_template = settings['template'].format(**edx2bigquery_context)
    fnpre = settings['filename_prefix']
    lfn = "%s-%s.log" % (fnpre, cidns)
    if settings.get('logs_dir'):
        lfn = path(settings['logs_dir']) / lfn

    try:
        ofn = settings['script_fn'].format(filename_prefix=fnpre, cidns=cidns)
    except Exception as err:
        print "oops, errr %s" % str(err)
        print "settings=", json.dumps(settings, indent=4)
        raise
    cwd = os.getcwd()

    the_date = str(datetime.datetime.now())

    dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=param.use_dataset_latest)
    table_prefix = dataset

    if param.force_recompute:
        param.force_recompute = 1
    else:
        param.force_recompute = 0

    context = {'course_id': course_id,
               'script_name': ofn,
               'the_date': the_date,
               'cidns': cidns,
               'cidns_nodots': cidns,
               'template_file': the_template,
               'log_file': lfn,
               'filename_prefix': fnpre,
               'filename_prefix_cidns': "%s__%s" % (fnpre, cidns),
               'working_dir': cwd,
               'table_prefix': table_prefix,
               'lib_dir': edx2bigquery_context['lib'],
               'bin_dir': edx2bigquery_context['bin'],
    }
    context.update(settings)
    context.update(param.__dict__)

    rundir = settings['run_dir'].format(**context)
    runcmd = settings['script_cmd'].format(**context)

    tem = codecs.open(the_template).read()
    tem = unicode(tem)
    try:
        # script_file = tem.format(**context)
        script_file = Template(tem).render(**context)
    except Exception as err:
        print "Oops, cannot properly format template %s" % the_template
        print "Error %s" % str(err)
        print "context: ", json.dumps(context, indent=4)
        raise
    ofndir = path(ofn).dirname()
    if not os.path.exists(ofndir):
        print "[Warning] Directory %s doesn't exist - creating it" % ofndir
        os.mkdir(ofndir)
    fp = codecs.open(ofn, 'w', encoding="utf8")
    fp.write(script_file)
    fp.close()
    print "Generated %s" % ofn

    # if depends_on is defined, and force_recompute is not true, then skip
    # run if output already exists and is newer than all depends_on tables.

    depends_on = settings.get('depends_on')
    output_table = settings.get('output_table')
    if depends_on and not type(depends_on)==list:
        depends_on = [ depends_on ]
    do_compute = param.force_recompute
    if (not param.force_recompute) and depends_on and output_table:
        # does output already exist?
        has_output = False
        try:
            tinfo = bqutil.get_bq_table_info(dataset, output_table)
            if tinfo:
                has_output = True
        except:
            pass
        if not has_output:
            print "Output table %s.%s doesn't exist: running" % (dataset, output_table)
            do_compute = True
        else:
            table_date = tinfo['lastModifiedTime']
            for deptab in depends_on:
                try:
                    dtab_date = bqutil.get_bq_table_last_modified_datetime(dataset, deptab)
                except Exception as err:
                    raise Exception("[run_external] missing dependent table %s.%s" % (dataset, deptab))
                if not dtab_date:
                    raise Exception("[run_external] missing dependent table %s.%s" % (dataset, deptab))
                if table_date and dtab_date > table_date:
                    do_compute = True
                    break
            if not do_compute:
                print "Output table %s.%s exists and is newer than %s, skipping" % (dataset, output_table, depends_on)
            
    if do_compute:
        os.chdir(rundir)
        print "Working directory: %s" % rundir
        print "Logging to %s" % lfn
        print "Run command: %s" % runcmd
        sys.stdout.flush()
        if not param.skiprun:
            start = datetime.datetime.now()

            if param.submit_condor:
                condor_template_fn = settings.get('condor_job_template', '').format(**edx2bigquery_context)
                if not condor_template_fn:
                    raise Exception("[run_external] missing condor_job_template specification for %s" % (extcmd))
                condor_submit_fn = "CONDOR/{filename_prefix}-{cidns}.submit".format(**context)
                context.update({ 'MEMORY': 32768,
                                 'arguments': '{script_name}'.format(**context),
                                 'executable': context['script_cmd'],
                                 'input_file': '',
                                 'filename': condor_submit_fn,
                                 })
                condor_template = Template(open(condor_template_fn).read()).render(**context)
                dirs = ['CONDOR', 'JOBS']
                for dir in dirs:
                    if not os.path.exists(dir):
                        os.mkdir(dir)
                fp = open(condor_submit_fn, 'w')
                fp.write(condor_template)
                fp.close()
                cmd = "condor_submit %s" % condor_submit_fn
                print cmd
                jobid = None
                for k in os.popen(cmd):
                    m = re.search('submitted to cluster ([0-9]+)', k)
                    if m:
                        jobid = m.group(1)
                dt = str(datetime.datetime.now())
                jobfile = 'condor_jobs.csv'
                open(jobfile, 'a').write("%s,%s,%s,%s\n" % (course_id, dt, jobid, lfn))
                print "[%s] Submitted as condor job %s at %s" % (course_id, jobid, dt)
                # print "[run_external] submitted %s, job=%s" % (extcmd, jobnum)
                return
            else:
                os.system(runcmd)

            if settings.get('type')=="stata":
                # cleanup leftover log file after stata batch run
                batch_log = ofn.split('.')[0] + ".log"
                if os.path.exists(batch_log):
                    os.unlink(batch_log)
                    print "Removed old log file %s" % batch_log

            end = datetime.datetime.now()
            has_output = False
            try:
                tinfo = bqutil.get_bq_table_info(dataset, output_table)
                if tinfo:
                    has_output = True
            except:
                pass
            success = has_output
            dt = end-start
            print "[run_external] DONE WITH %s, success=%s, dt=%s" % (extcmd, success, dt)
            sys.stdout.flush()
            if param.parallel and not success:
                raise Exception("[run_external] External command %s failed on %s" % (extcmd, course_id))

Example 22

Project: edx2bigquery
Source File: run_external.py
View license
def run_external_script(extcmd, param, ecinfo, course_id):
    """
    Run external script on specified course.

    extcmd = string specifying external command to run
    param = command line parameters, including extparam
    ecinfo = external command info from edx2bigquery_config
    course_id = course_id to run external command on
    """
    # use default for base set of parameters
    ed_name = ecinfo.get('default_parameters', 'DEFAULT')
    settings = ecinfo.get(ed_name, {})
    settings.update(ecinfo.get(extcmd))
    # print "settings: ", json.dumps(settings, indent=4)
    
    print settings['name']
    
    if param.verbose:
        print settings.get('description', '')

    cidns = course_id.replace('/', '__')
    cidns_nodots = course_id.replace('/', '__').replace('.', '_').replace('-', '_')

    mypath = path(os.path.realpath(__file__)).dirname()
    edx2bigquery_context = {'lib': mypath / "lib",
                            'bin': mypath / "bin",
                        }

    the_template = settings['template'].format(**edx2bigquery_context)
    fnpre = settings['filename_prefix']
    lfn = "%s-%s.log" % (fnpre, cidns)
    if settings.get('logs_dir'):
        lfn = path(settings['logs_dir']) / lfn

    try:
        ofn = settings['script_fn'].format(filename_prefix=fnpre, cidns=cidns)
    except Exception as err:
        print "oops, errr %s" % str(err)
        print "settings=", json.dumps(settings, indent=4)
        raise
    cwd = os.getcwd()

    the_date = str(datetime.datetime.now())

    dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=param.use_dataset_latest)
    table_prefix = dataset

    if param.force_recompute:
        param.force_recompute = 1
    else:
        param.force_recompute = 0

    context = {'course_id': course_id,
               'script_name': ofn,
               'the_date': the_date,
               'cidns': cidns,
               'cidns_nodots': cidns,
               'template_file': the_template,
               'log_file': lfn,
               'filename_prefix': fnpre,
               'filename_prefix_cidns': "%s__%s" % (fnpre, cidns),
               'working_dir': cwd,
               'table_prefix': table_prefix,
               'lib_dir': edx2bigquery_context['lib'],
               'bin_dir': edx2bigquery_context['bin'],
    }
    context.update(settings)
    context.update(param.__dict__)

    rundir = settings['run_dir'].format(**context)
    runcmd = settings['script_cmd'].format(**context)

    tem = codecs.open(the_template).read()
    tem = unicode(tem)
    try:
        # script_file = tem.format(**context)
        script_file = Template(tem).render(**context)
    except Exception as err:
        print "Oops, cannot properly format template %s" % the_template
        print "Error %s" % str(err)
        print "context: ", json.dumps(context, indent=4)
        raise
    ofndir = path(ofn).dirname()
    if not os.path.exists(ofndir):
        print "[Warning] Directory %s doesn't exist - creating it" % ofndir
        os.mkdir(ofndir)
    fp = codecs.open(ofn, 'w', encoding="utf8")
    fp.write(script_file)
    fp.close()
    print "Generated %s" % ofn

    # if depends_on is defined, and force_recompute is not true, then skip
    # run if output already exists and is newer than all depends_on tables.

    depends_on = settings.get('depends_on')
    output_table = settings.get('output_table')
    if depends_on and not type(depends_on)==list:
        depends_on = [ depends_on ]
    do_compute = param.force_recompute
    if (not param.force_recompute) and depends_on and output_table:
        # does output already exist?
        has_output = False
        try:
            tinfo = bqutil.get_bq_table_info(dataset, output_table)
            if tinfo:
                has_output = True
        except:
            pass
        if not has_output:
            print "Output table %s.%s doesn't exist: running" % (dataset, output_table)
            do_compute = True
        else:
            table_date = tinfo['lastModifiedTime']
            for deptab in depends_on:
                try:
                    dtab_date = bqutil.get_bq_table_last_modified_datetime(dataset, deptab)
                except Exception as err:
                    raise Exception("[run_external] missing dependent table %s.%s" % (dataset, deptab))
                if not dtab_date:
                    raise Exception("[run_external] missing dependent table %s.%s" % (dataset, deptab))
                if table_date and dtab_date > table_date:
                    do_compute = True
                    break
            if not do_compute:
                print "Output table %s.%s exists and is newer than %s, skipping" % (dataset, output_table, depends_on)
            
    if do_compute:
        os.chdir(rundir)
        print "Working directory: %s" % rundir
        print "Logging to %s" % lfn
        print "Run command: %s" % runcmd
        sys.stdout.flush()
        if not param.skiprun:
            start = datetime.datetime.now()

            if param.submit_condor:
                condor_template_fn = settings.get('condor_job_template', '').format(**edx2bigquery_context)
                if not condor_template_fn:
                    raise Exception("[run_external] missing condor_job_template specification for %s" % (extcmd))
                condor_submit_fn = "CONDOR/{filename_prefix}-{cidns}.submit".format(**context)
                context.update({ 'MEMORY': 32768,
                                 'arguments': '{script_name}'.format(**context),
                                 'executable': context['script_cmd'],
                                 'input_file': '',
                                 'filename': condor_submit_fn,
                                 })
                condor_template = Template(open(condor_template_fn).read()).render(**context)
                dirs = ['CONDOR', 'JOBS']
                for dir in dirs:
                    if not os.path.exists(dir):
                        os.mkdir(dir)
                fp = open(condor_submit_fn, 'w')
                fp.write(condor_template)
                fp.close()
                cmd = "condor_submit %s" % condor_submit_fn
                print cmd
                jobid = None
                for k in os.popen(cmd):
                    m = re.search('submitted to cluster ([0-9]+)', k)
                    if m:
                        jobid = m.group(1)
                dt = str(datetime.datetime.now())
                jobfile = 'condor_jobs.csv'
                open(jobfile, 'a').write("%s,%s,%s,%s\n" % (course_id, dt, jobid, lfn))
                print "[%s] Submitted as condor job %s at %s" % (course_id, jobid, dt)
                # print "[run_external] submitted %s, job=%s" % (extcmd, jobnum)
                return
            else:
                os.system(runcmd)

            if settings.get('type')=="stata":
                # cleanup leftover log file after stata batch run
                batch_log = ofn.split('.')[0] + ".log"
                if os.path.exists(batch_log):
                    os.unlink(batch_log)
                    print "Removed old log file %s" % batch_log

            end = datetime.datetime.now()
            has_output = False
            try:
                tinfo = bqutil.get_bq_table_info(dataset, output_table)
                if tinfo:
                    has_output = True
            except:
                pass
            success = has_output
            dt = end-start
            print "[run_external] DONE WITH %s, success=%s, dt=%s" % (extcmd, success, dt)
            sys.stdout.flush()
            if param.parallel and not success:
                raise Exception("[run_external] External command %s failed on %s" % (extcmd, course_id))

Example 23

Project: mongo-python-driver
Source File: cursor.py
View license
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except Exception as exc:
                if publish:
                    duration = datetime.datetime.now() - start
                    listeners.publish_command_failure(
                        duration, _convert_exception(exc), cmd_name, rqst_id,
                        self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, _convert_exception(exc), cmd_name, rqst_id,
                    self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = doc['data'][0]
            elif cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {"cursor": {"id": doc["cursor_id"],
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            listeners.publish_command_success(
                duration, res, cmd_name, rqst_id, self.__address)

        if from_command and cmd_name != "explain":
            cursor = doc['data'][0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = doc["cursor_id"]
            self.__data = deque(doc["data"])
            self.__retrieved += doc["number_returned"]

        if self.__id == 0:
            self.__killed = True


        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()

Example 24

Project: mongo-python-driver
Source File: cursor.py
View license
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except Exception as exc:
                if publish:
                    duration = datetime.datetime.now() - start
                    listeners.publish_command_failure(
                        duration, _convert_exception(exc), cmd_name, rqst_id,
                        self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, _convert_exception(exc), cmd_name, rqst_id,
                    self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = doc['data'][0]
            elif cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {"cursor": {"id": doc["cursor_id"],
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            listeners.publish_command_success(
                duration, res, cmd_name, rqst_id, self.__address)

        if from_command and cmd_name != "explain":
            cursor = doc['data'][0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = doc["cursor_id"]
            self.__data = deque(doc["data"])
            self.__retrieved += doc["number_returned"]

        if self.__id == 0:
            self.__killed = True


        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()

Example 25

Project: openode
Source File: thread.py
View license
@csrf.csrf_protect
#@cache_page(60 * 5)
def thread(request, node_id, node_slug, module, thread_id, thread_slug):
    # TODO: refactor - long subroutine. display question body, answers and comments
    """view that displays body of the question and
    all answers to it
    """
    node = get_object_or_404(Node, pk=node_id)
    thread = get_object_or_404(Thread, pk=thread_id, node=node)

    # raise not found if module is disabled
    if not getattr(node, "module_%s" % const.NODE_MODULE_BY_THREAD_TYPE[thread.thread_type], False):
        raise Http404

    if not request.user.has_openode_perm('%s_read' % thread.thread_type, thread):
        return render_forbidden(request)

    if module not in const.THREAD_TYPE_BY_NODE_MODULE or const.THREAD_TYPE_BY_NODE_MODULE[module] != thread.thread_type:
        raise Http404()

    if module == const.NODE_MODULE_LIBRARY:
        return document_detail_view(request, node, thread)

    if node.slug != node_slug or thread.slug != thread_slug:
        return HttpResponseRedirect(reverse('thread', kwargs={
            'node_id': node_id,
            'node_slug': node.slug,
            'module': module,
            'thread_id': thread_id,
            'thread_slug': thread.slug
        }))

    # process url parameters
    # todo: fix inheritance of sort method from questions
    # before = datetime.datetime.now()
    default_sort_method = request.session.get('questions_sort_method', thread.get_default_sort_method())
    form = ShowQuestionForm(request.GET, default_sort_method)
    form.full_clean()  # always valid
    show_answer = form.cleaned_data['show_answer']
    show_comment = form.cleaned_data['show_comment']
    show_page = form.cleaned_data['show_page']
    answer_sort_method = form.cleaned_data['answer_sort_method']

    main_post = thread._main_post()
    try:
        main_post.assert_is_visible_to(request.user)
    except openode_exceptions.QuestionHidden, error:
        request.user.message_set.create(message=unicode(error))
        return HttpResponseRedirect(reverse('index'))

    # redirect if slug in the url is wrong
    # if request.path.split('/')[-2] != question_post.slug:
    #     logging.debug('no slug match!')
    #     question_url = '?'.join((
    #                         question_post.get_absolute_url(),
    #                         urllib.urlencode(request.GET)
    #                     ))
    #     return HttpResponseRedirect(question_url)

    # resolve comment and answer permalinks
    # they go first because in theory both can be moved to another question
    # this block "returns" show_post and assigns actual comment and answer
    # to show_comment and show_answer variables
    # in the case if the permalinked items or their parents are gone - redirect
    # redirect also happens if id of the object's origin post != requested id
    show_post = None  # used for permalinks

    if show_comment:
        # if url calls for display of a specific comment,
        # check that comment exists, that it belongs to
        # the current question
        # if it is an answer comment and the answer is hidden -
        # redirect to the default view of the question
        # if the question is hidden - redirect to the main page
        # in addition - if url points to a comment and the comment
        # is for the answer - we need the answer object
        try:
            show_comment = models.Post.objects.get_comments().get(id=show_comment)
        except models.Post.DoesNotExist:
            error_message = _(
                'Sorry, the comment you are looking for has been '
                'deleted and is no longer accessible'
            )
            request.user.message_set.create(message=error_message)
            return HttpResponseRedirect(thread.get_absolute_url())

        if str(show_comment.thread.id) != str(thread_id):
            return HttpResponseRedirect(show_comment.get_absolute_url())
        show_post = show_comment.parent

        try:
            show_comment.assert_is_visible_to(request.user)
        except openode_exceptions.AnswerHidden, error:
            request.user.message_set.create(message=unicode(error))
            #use reverse function here because question is not yet loaded
            return HttpResponseRedirect(thread.get_absolute_url())
        except openode_exceptions.QuestionHidden, error:
            request.user.message_set.create(message=unicode(error))
            return HttpResponseRedirect(reverse('index'))

    elif show_answer:
        # if the url calls to view a particular answer to
        # question - we must check whether the question exists
        # whether answer is actually corresponding to the current question
        # and that the visitor is allowed to see it
        show_post = get_object_or_404(models.Post, post_type='answer', id=show_answer)
        if str(show_post.thread.id) != str(thread_id):
            return HttpResponseRedirect(show_post.get_absolute_url())

        try:
            show_post.assert_is_visible_to(request.user)
        except django_exceptions.PermissionDenied, error:
            request.user.message_set.create(message=unicode(error))
            return HttpResponseRedirect(thread.get_absolute_url())

    # logging.debug('answer_sort_method=' + unicode(answer_sort_method))

    # load answers and post id's->athor_id mapping
    # posts are pre-stuffed with the correctly ordered comments

    # authors = request.GET.get("authors", "")
    from openode.utils.text import extract_numbers
    authors_ids = extract_numbers(request.GET.get("authors", ""))
    authors = []

    qs = None
    if authors_ids:
        authors = User.objects.filter(
            pk__in=authors_ids,
            is_active=True,
            is_hidden=False
        )
        qs = thread.posts.filter(
            author__in=authors,
            deleted=False
        )

    # Question flow: show only published answers
    if node.is_question_flow_enabled and (request.user not in thread.node.get_responsible_persons()):
        qs = qs or thread.posts.all()
        qs = qs.filter(pk=thread.accepted_answer_id
            # question_flow_is_published=True
            # | Q(thread__question_flow_responsible_user=request.user)
            # | Q(thread__question_flow_interviewee_user=request.user)
        )

    updated_main_post, answers, post_to_author = thread.get_cached_post_data(
        sort_method=answer_sort_method,
        user=request.user,
        qs=qs
    )

    if updated_main_post:
        main_post.set_cached_comments(
            updated_main_post.get_cached_comments()
        )

    # Post.objects.precache_comments(for_posts=[question_post] + answers, visitor=request.user)

    user_votes = {}
    user_post_id_list = list()
    # TODO: cache this query set, but again takes only 3ms!
    if request.user.is_authenticated():
        user_votes = Vote.objects.filter(
            user=request.user,
            voted_post__id__in=post_to_author.keys()
        ).values_list(
            'voted_post_id',
            'vote'
        )
        user_votes = dict(user_votes)
        # we can avoid making this query by iterating through
        # already loaded posts
        user_post_id_list = [
            post_id for post_id in post_to_author if post_to_author[post_id] == request.user.id
        ]

    # resolve page number and comment number for permalinks
    show_comment_position = None
    if show_comment:
        show_page = show_comment.get_page_number(answer_posts=answers)
        show_comment_position = show_comment.get_order_number()
    elif show_answer:
        show_page = show_post.get_page_number(answer_posts=answers)

    ###################################
    # paginator
    ###################################

    if thread.is_question():
        per_page = maxint
    else:
        per_page = const.ANSWERS_PAGE_SIZE

    # define posts position on paginator pages
    posts_per_pages = {}
    for i, post in enumerate(answers):
        posts_per_pages[post.pk] = 1 + (i // per_page)

    objects_list = Paginator(answers, per_page)
    if show_page > objects_list.num_pages:
        return HttpResponseRedirect(main_post.get_absolute_url())
    page_objects = objects_list.page(show_page)

    count_visit(request, thread, main_post)

    base_url = request.path + '?sort=%s&amp;' % answer_sort_method
    if authors:
        base_url = "%sauthors=%s&amp;" % (
            base_url,
            ",".join([str(pk) for pk in authors.values_list("pk", flat=True)])
        )

    paginator_data = {
        'is_paginated': (objects_list.count > per_page),
        'pages': objects_list.num_pages,
        'page': show_page,
        'has_previous': page_objects.has_previous(),
        'has_next': page_objects.has_next(),
        'previous': page_objects.previous_page_number(),
        'next': page_objects.next_page_number(),
        'base_url': base_url,
    }
    paginator_context = functions.setup_paginator(paginator_data)

    ###################################

    initial = {
        'email_notify': thread.is_subscribed_by(request.user)
    }

    # maybe load draft
    if request.user.is_authenticated():
        # todo: refactor into methor on thread
        drafts = models.DraftAnswer.objects.filter(
            author=request.user,
            thread=thread
        )
        if drafts.count() > 0:
            initial['text'] = drafts[0].text

    # answer form
    if request.method == "POST":

        if not thread.has_response_perm(request.user):
            return render_forbidden(request)

        answer_form = AnswerForm(request.POST, node=node)
        if answer_form.is_valid():
            text = answer_form.cleaned_data['text']
            update_time = datetime.datetime.now()

            if request.user.is_authenticated():
                drafts = models.DraftAnswer.objects.filter(
                    author=request.user,
                    thread=thread
                    )
                drafts.delete()
                try:
                    follow = answer_form.cleaned_data['email_notify']

                    user = request.user

                    if thread.node.is_question_flow_enabled:
                        question_flow_state_original = thread.question_flow_state


                    answer = user.post_answer(
                        question=main_post,
                        body_text=text,
                        follow=follow,
                        timestamp=update_time,
                        )

                    if thread.node.is_question_flow_enabled:
                        if (answer.thread.question_flow_state == const.QUESTION_FLOW_STATE_ANSWERED) \
                            and (question_flow_state_original != const.QUESTION_FLOW_STATE_ANSWERED):
                            request.user.message_set.create(
                                message=_(u"Your answer was sent to the club manager and will be published after approval.")
                            )


                    return HttpResponseRedirect(answer.get_absolute_url())
                except openode_exceptions.AnswerAlreadyGiven, e:
                    request.user.message_set.create(message=unicode(e))
                    answer = thread.get_answers_by_user(request.user)[0]
                    return HttpResponseRedirect(answer.get_absolute_url())
                except django_exceptions.PermissionDenied, e:
                    request.user.message_set.create(message=unicode(e))
            else:
                request.session.flush()
                models.AnonymousAnswer.objects.create(
                    question=main_post,
                    text=text,
                    summary=strip_tags(text)[:120],
                    session_key=request.session.session_key,
                    ip_addr=request.META['REMOTE_ADDR'],
                )
                return HttpResponseRedirect(url_utils.get_login_url())
    else:
        answer_form = AnswerForm(initial=initial, node=node)

    user_can_post_comment = (
        request.user.is_authenticated() and request.user.can_post_comment()
    )

    user_already_gave_answer = False
    previous_answer = None
    if request.user.is_authenticated():
        if openode_settings.LIMIT_ONE_ANSWER_PER_USER and module == const.NODE_MODULE_QA:
            for answer in answers:
                if answer.author == request.user:
                    user_already_gave_answer = True
                    previous_answer = answer
                    break

    from openode.views.readers import SearchUserForm

    search_user_form = SearchUserForm()

    # authors

    context = {
        "search_user_form": search_user_form,
        "authors": authors,

        'is_cacheable': False,  # is_cacheable, #temporary, until invalidation fix
        'long_time': const.LONG_TIME,  # "forever" caching
        'page_class': 'question-page',
        'active_tab': 'questions',
        'main_post': main_post,
        'thread': thread,
        'answer_form': answer_form,
        'answers': page_objects.object_list,
        'answer_count': thread.get_answer_count(request.user),
        'user_votes': user_votes,
        'user_post_id_list': user_post_id_list,
        'user_can_post_comment': user_can_post_comment,  # in general
        'user_already_gave_answer': user_already_gave_answer,
        'previous_answer': previous_answer,
        'tab_id': answer_sort_method,
        'similar_threads': thread.get_similar_threads(),
        'language_code': translation.get_language(),
        'paginator_context': paginator_context,
        'show_post': show_post,
        'show_comment': show_comment,
        'show_comment_position': show_comment_position,
        'enable_comments': module == const.NODE_MODULE_QA,
        'thread': thread,
        'module': module,
        "posts_per_pages": posts_per_pages,
    }

    # show last visit for posts (comments, ...)
    try:
        thread_view = thread.viewed.get(user=request.user)
        thread_view_last_visit = thread_view.last_visit

    except (ObjectDoesNotExist, TypeError):
        # print 8*'-', 'EXCEPT'
        thread_view = None
        thread_view_last_visit = datetime.datetime.now()

    # print thread_view_last_visit
    # thread_view_last_visit = datetime.datetime(2000,1,1,15,00)

    context.update({
        "thread_view": thread_view,
        "thread_view_last_visit": thread_view_last_visit
    })

    context.update(views_context.get_for_tag_editor())

    thread.visit(request.user)

    # future functions
    template = 'node/%s/detail.html' % thread.thread_type

    return render_into_skin(template, context, request)

Example 26

Project: poppy
Source File: certificates.py
View license
    def create_certificate(self, cert_obj, enqueue=True, https_upgrade=False):
        if cert_obj.cert_type == 'san':
            try:
                found, found_cert = (
                    self._check_domain_already_exists_on_san_certs(
                        cert_obj.domain_name
                    )
                )
                if found is True:
                    return self.responder.ssl_certificate_provisioned(None, {
                        'status': 'failed',
                        'san cert': None,
                        'created_at': str(datetime.datetime.now()),
                        'action': (
                            'Domain {0} already exists '
                            'on san cert {1}.'.format(
                                cert_obj.domain_name, found_cert
                            )
                        )
                    })

                if enqueue:
                    self.mod_san_queue.enqueue_mod_san_request(
                        json.dumps(cert_obj.to_dict()))
                    extras = {
                        'status': 'create_in_progress',
                        'san cert': None,
                        # Add logging so it is easier for testing
                        'created_at': str(datetime.datetime.now()),
                        'action': (
                            'San cert request for {0} has been '
                            'enqueued.'.format(cert_obj.domain_name)
                        )
                    }
                    if https_upgrade is True:
                        extras['https upgrade notes'] = (
                            "This domain was upgraded from HTTP to HTTPS SAN."
                            "Take note of the domain name. Where applicable, "
                            "delete the old HTTP policy after the upgrade is "
                            "complete or the old policy is no longer in use."
                        )
                    return self.responder.ssl_certificate_provisioned(
                        None,
                        extras
                    )

                san_cert_hostname_limit = (
                    self.cert_info_storage.get_san_cert_hostname_limit()
                )

                for san_cert_name in self.san_cert_cnames:
                    enabled = (
                        self.cert_info_storage.get_enabled_status(
                            san_cert_name
                        )
                    )
                    if not enabled:
                        LOG.info("SAN cert {0} is disabled.".format(
                            san_cert_name))
                        continue

                    # if the limit provided as an arg to this function is None
                    # default san_cert_hostname_limit to the value provided in
                    # the config file.
                    san_cert_hostname_limit = (
                        san_cert_hostname_limit or
                        self.driver.san_cert_hostname_limit
                    )

                    # Check san_cert to enforce number of hosts hasn't
                    # reached the limit. If the current san_cert is at max
                    # capacity continue to the next san_cert
                    san_hosts = utils.get_ssl_number_of_hosts(
                        '.'.join(
                            [
                                san_cert_name,
                                self.driver.akamai_https_access_url_suffix
                            ]
                        )
                    )
                    if san_hosts >= san_cert_hostname_limit:
                        LOG.info(
                            "SAN cert {0} has {1} hosts, "
                            "limit is {2}.".format(
                                san_cert_name,
                                san_hosts,
                                san_cert_hostname_limit))
                        continue

                    last_sps_id = (
                        self.cert_info_storage.get_cert_last_spsid(
                            san_cert_name
                        )
                    )
                    if last_sps_id not in [None, ""]:
                        LOG.info('Latest spsId for {0} is: {1}'.format(
                            san_cert_name,
                            last_sps_id)
                        )
                        resp = self.sps_api_client.get(
                            self.sps_api_base_url.format(spsId=last_sps_id),
                        )
                        if resp.status_code != 200:
                            raise RuntimeError(
                                'SPS API Request Failed. '
                                'Exception: {0}'.format(resp.text)
                            )
                        sps_request_info = json.loads(resp.text)[
                            'requestList'][0]
                        status = sps_request_info['status']
                        work_flow_progress = (
                            sps_request_info['workflowProgress']
                        )
                        if status == 'edge host already created or pending':
                            if work_flow_progress is not None and \
                                    'error' in work_flow_progress.lower():
                                LOG.info("SPS Pending with Error: {0}".format(
                                    work_flow_progress))
                                continue
                            else:
                                pass
                        elif status == 'CPS cancelled':
                            pass
                        elif status != 'SPS Request Complete':
                            LOG.info("SPS Not completed for {0}...".format(
                                     san_cert_name))
                            continue
                    # issue modify san_cert sps request
                    cert_info = self.cert_info_storage.get_cert_info(
                        san_cert_name)
                    cert_info['add.sans'] = cert_obj.domain_name
                    string_post_data = '&'.join(
                        ['%s=%s' % (k, v) for (k, v) in cert_info.items()])
                    LOG.info(
                        'Post modSan request with request data: {0}'.format(
                            string_post_data
                        )
                    )
                    resp = self.sps_api_client.post(
                        self.sps_api_base_url.format(spsId=""),
                        data=string_post_data.encode('utf-8')
                    )
                    if resp.status_code != 202:
                        raise RuntimeError(
                            'SPS Request failed. '
                            'Exception: {0}'.format(resp.text)
                        )
                    else:
                        resp_dict = json.loads(resp.text)
                        LOG.info(
                            'modSan request submitted. Response: {0}'.format(
                                resp_dict
                            )
                        )
                        this_sps_id = resp_dict['spsId']
                        # get last item in results array and use its jobID
                        results = resp_dict['Results']['data']
                        this_job_id = results[0]['results']['jobID']
                        self.cert_info_storage.save_cert_last_ids(
                            san_cert_name,
                            this_sps_id,
                            this_job_id
                        )
                        cert_copy = copy.deepcopy(cert_obj.to_dict())
                        (
                            cert_copy['cert_details']
                            [self.driver.provider_name]
                        ) = {
                            'extra_info': {
                                'akamai_spsId': this_sps_id,
                                'san cert': san_cert_name
                            }
                        }

                        self.san_mapping_queue.enqueue_san_mapping(
                            json.dumps(cert_copy)
                        )
                        return self.responder.ssl_certificate_provisioned(
                            san_cert_name, {
                                'status': 'create_in_progress',
                                'san cert': san_cert_name,
                                'akamai_spsId': this_sps_id,
                                'created_at': str(datetime.datetime.now()),
                                'action': 'Waiting for customer domain '
                                          'validation for {0}'.format(
                                    cert_obj.domain_name)
                            })
                else:
                    self.mod_san_queue.enqueue_mod_san_request(
                        json.dumps(cert_obj.to_dict()))
                    return self.responder.ssl_certificate_provisioned(None, {
                        'status': 'create_in_progress',
                        'san cert': None,
                        # Add logging so it is easier for testing
                        'created_at': str(datetime.datetime.now()),
                        'action': 'No available san cert for {0} right now,'
                                  ' or no san cert info available. Support:'
                                  'Please write down the domain and keep an'
                                  ' eye on next available freed-up SAN certs.'
                                  ' More provisioning might be needed'.format(
                            cert_obj.domain_name)
                    })
            except Exception as e:
                LOG.exception(
                    "Error {0} during certificate creation for {1} "
                    "sending the request sent back to the queue.".format(
                        e, cert_obj.domain_name
                    )
                )
                try:
                    self.mod_san_queue.enqueue_mod_san_request(
                        json.dumps(cert_obj.to_dict()))
                    return self.responder.ssl_certificate_provisioned(None, {
                        'status': 'create_in_progress',
                        'san cert': None,
                        # Add logging so it is easier for testing
                        'created_at': str(datetime.datetime.now()),
                        'action': (
                            'San cert request for {0} has been '
                            'enqueued.'.format(cert_obj.domain_name)
                        )
                    })
                except Exception as exc:
                    LOG.exception("Unable to enqueue {0}, Error: {1}".format(
                        cert_obj.domain_name,
                        exc
                    ))
                    return self.responder.ssl_certificate_provisioned(None, {
                        'status': 'failed',
                        'san cert': None,
                        'created_at': str(datetime.datetime.now()),
                        'action': 'Waiting for action... Provision '
                                  'san cert failed for {0} failed.'.format(
                            cert_obj.domain_name)
                    })
        elif cert_obj.cert_type == 'sni':
            # create a DV SAN SNI certificate using Akamai CPS API
            return self.create_sni_certificate(
                cert_obj, enqueue, https_upgrade)
        else:
            return self.responder.ssl_certificate_provisioned(None, {
                'status': 'failed',
                'reason': "Cert type : {0} hasn't been implemented".format(
                    cert_obj.cert_type
                )
            })

Example 27

Project: poppy
Source File: certificates.py
View license
    def create_sni_certificate(self, cert_obj, enqueue, https_upgrade):
        try:
            found, found_cert = (
                self._check_domain_already_exists_on_sni_certs(
                    cert_obj.domain_name
                )
            )
            if found is True:
                return self.responder.ssl_certificate_provisioned(None, {
                    'status': 'failed',
                    'sni_cert': None,
                    'created_at': str(datetime.datetime.now()),
                    'action': (
                        'Domain {0} already exists '
                        'on sni cert {1}.'.format(
                            cert_obj.domain_name, found_cert
                        )
                    )
                })
            if enqueue:
                self.mod_san_queue.enqueue_mod_san_request(
                    json.dumps(cert_obj.to_dict()))
                extras = {
                    'status': 'create_in_progress',
                    'sni_cert': None,
                    # Add logging so it is easier for testing
                    'created_at': str(datetime.datetime.now()),
                    'action': (
                        'SNI cert request for {0} has been '
                        'enqueued.'.format(cert_obj.domain_name)
                    )
                }
                if https_upgrade is True:
                    extras['https upgrade notes'] = (
                        "This domain was upgraded from HTTP to HTTPS SNI."
                        "Take note of the domain name. Where applicable, "
                        "delete the old HTTP policy after the upgrade is "
                        "complete or the old policy is no longer in use."
                    )
                return self.responder.ssl_certificate_provisioned(
                    None,
                    extras
                )
            cert_hostname_limit = (
                self.cert_info_storage.get_san_cert_hostname_limit()
            )
            for cert_name in self.sni_cert_cnames:
                enabled = (
                    self.cert_info_storage.get_enabled_status(
                        cert_name, info_type='sni'
                    )
                )
                if not enabled:
                    LOG.info("SNI cert {0} is disabled.".format(
                        cert_name))
                    continue
                cert_hostname_limit = (
                    cert_hostname_limit or
                    self.driver.san_cert_hostname_limit
                )

                host_names_count = utils.get_ssl_number_of_hosts_alternate(
                    cert_name
                )
                if host_names_count >= cert_hostname_limit:
                    LOG.info(
                        "SNI cert {0} has {1} hosts, "
                        "limit is {2}.".format(
                            cert_name,
                            host_names_count,
                            cert_hostname_limit))
                    continue

                try:
                    enrollment_id = (
                        self.cert_info_storage.get_cert_enrollment_id(
                            cert_name))
                    # GET the enrollment by ID
                    headers = {
                        'Accept': ('application/vnd.akamai.cps.enrollment.v1+'
                                   'json')
                    }
                    resp = self.cps_api_client.get(
                        self.cps_api_base_url.format(
                            enrollmentId=enrollment_id),
                        headers=headers
                    )
                    if resp.status_code not in [200, 202]:
                        raise RuntimeError(
                            'CPS Request failed. Unable to GET enrollment '
                            'with id {0} Exception: {1}'.format(
                                enrollment_id, resp.text))
                    resp_json = json.loads(resp.text)
                    # check enrollment does not have any pending changes
                    if len(resp_json['pendingChanges']) > 0:
                        LOG.info("{0} has pending changes, skipping...".format(
                            cert_name))
                        continue

                    # adding sans should get them cloned into sni host names
                    resp_json['csr']['sans'] = resp_json['csr']['sans'].append(
                        cert_obj.domain_name
                    )

                    # PUT the enrollment including the modifications
                    headers = {
                        'Content-Type': (
                            'application/vnd.akamai.cps.enrollment.v1+json'),
                        'Accept': (
                            'application/vnd.akamai.cps.enrollment-status.v1+'
                            'json')
                    }
                    resp = self.cps_api_client.put(
                        self.cps_api_base_url.format(
                            enrollmentId=enrollment_id),
                        data=json.dumps(resp_json),
                        headers=headers
                    )
                    if resp.status_code not in [200, 202]:
                        raise RuntimeError(
                            'CPS Request failed. Unable to modify enrollment '
                            'with id {0} Exception: {1}'.format(
                                enrollment_id, resp.text))

                    # resp code 200 means PUT didn't create a change
                    # resp code 202 means PUT created a change
                    if resp.status_code == 202:
                        # save the change id for future reference
                        change_url = json.loads(resp.text)['changes'][0]
                        cert_copy = copy.deepcopy(cert_obj.to_dict())
                        (
                            cert_copy['cert_details']
                            [self.driver.provider_name]
                        ) = {
                            'extra_info': {
                                'change_url': change_url,
                                'sni_cert': cert_name
                            }
                        }
                        self.san_mapping_queue.enqueue_san_mapping(
                            json.dumps(cert_copy)
                        )
                        return self.responder.ssl_certificate_provisioned(
                            cert_name, {
                                'status': 'create_in_progress',
                                'sni_cert': cert_name,
                                'change_url': change_url,
                                'created_at': str(datetime.datetime.now()),
                                'action': 'Waiting for customer domain '
                                          'validation for {0}'.format(
                                    cert_obj.domain_name)
                            })
                except Exception as exc:
                    LOG.exception(
                        "Unable to provision certificate {0}, "
                        "Error: {1}".format(cert_obj.domain_name, exc))
                    return self.responder.ssl_certificate_provisioned(None, {
                        'status': 'failed',
                        'sni_cert': None,
                        'created_at': str(datetime.datetime.now()),
                        'action': 'Waiting for action... CPS API provision '
                                  'DV SNI cert failed for {0} failed.'.format(
                            cert_obj.domain_name)
                    })
            else:
                self.mod_san_queue.enqueue_mod_san_request(
                    json.dumps(cert_obj.to_dict()))
                return self.responder.ssl_certificate_provisioned(None, {
                    'status': 'create_in_progress',
                    'sni_cert': None,
                    # Add logging so it is easier for testing
                    'created_at': str(datetime.datetime.now()),
                    'action': 'No available sni cert for {0} right now,'
                              ' or no sni cert info available. Support:'
                              'Please write down the domain and keep an'
                              ' eye on next available freed-up SNI certs.'
                              ' More provisioning might be needed'.format(
                        cert_obj.domain_name)
                })
        except Exception as e:
            LOG.exception(
                "Error {0} during SNI certificate creation for {1} "
                "sending the request sent back to the queue.".format(
                    e, cert_obj.domain_name
                )
            )
            try:
                self.mod_san_queue.enqueue_mod_san_request(
                    json.dumps(cert_obj.to_dict()))
                return self.responder.ssl_certificate_provisioned(None, {
                    'status': 'create_in_progress',
                    'sni_cert': None,
                    # Add logging so it is easier for testing
                    'created_at': str(datetime.datetime.now()),
                    'action': (
                        'SNI cert request for {0} has been '
                        'enqueued.'.format(cert_obj.domain_name)
                    )
                })
            except Exception as exc:
                LOG.exception("Unable to enqueue {0}, Error: {1}".format(
                    cert_obj.domain_name,
                    exc
                ))
                return self.responder.ssl_certificate_provisioned(None, {
                    'status': 'failed',
                    'sni_cert': None,
                    'created_at': str(datetime.datetime.now()),
                    'action': 'Waiting for action... Provision '
                              'sni cert failed for {0} failed.'.format(
                        cert_obj.domain_name)
                })

Example 28

Project: yadapy
Source File: cursor.py
View license
    def __send_message(self, operation):
        """Send a query or getmore operation and handles the response.

        If operation is ``None`` this is an exhaust cursor, which reads
        the next result batch off the exhaust socket instead of
        sending getMore messages to the server.

        Can raise ConnectionFailure.
        """
        client = self.__collection.database.client
        listeners = client._event_listeners
        publish = listeners.enabled_for_commands
        from_command = False

        if operation:
            kwargs = {
                "read_preference": self.__read_preference,
                "exhaust": self.__exhaust,
            }
            if self.__address is not None:
                kwargs["address"] = self.__address

            try:
                response = client._send_message_with_response(operation,
                                                              **kwargs)
                self.__address = response.address
                if self.__exhaust:
                    # 'response' is an ExhaustResponse.
                    self.__exhaust_mgr = _SocketManager(response.socket_info,
                                                        response.pool)

                cmd_name = operation.name
                data = response.data
                cmd_duration = response.duration
                rqst_id = response.request_id
                from_command = response.from_command
            except AutoReconnect:
                # Don't try to send kill cursors on another socket
                # or to another server. It can cause a _pinValue
                # assertion on some server releases if we get here
                # due to a socket timeout.
                self.__killed = True
                raise
        else:
            # Exhaust cursor - no getMore message.
            rqst_id = 0
            cmd_name = 'getMore'
            if publish:
                # Fake a getMore command.
                cmd = SON([('getMore', self.__id),
                           ('collection', self.__collection.name)])
                if self.__batch_size:
                    cmd['batchSize'] = self.__batch_size
                if self.__max_time_ms:
                    cmd['maxTimeMS'] = self.__max_time_ms
                listeners.publish_command_start(
                    cmd, self.__collection.database.name, 0, self.__address)
                start = datetime.datetime.now()
            try:
                data = self.__exhaust_mgr.sock.receive_message(1, None)
            except Exception as exc:
                if publish:
                    duration = datetime.datetime.now() - start
                    listeners.publish_command_failure(
                        duration, _convert_exception(exc), cmd_name, rqst_id,
                        self.__address)
                if isinstance(exc, ConnectionFailure):
                    self.__die()
                raise
            if publish:
                cmd_duration = datetime.datetime.now() - start

        if publish:
            start = datetime.datetime.now()
        try:
            doc = helpers._unpack_response(response=data,
                                           cursor_id=self.__id,
                                           codec_options=self.__codec_options)
            if from_command:
                helpers._check_command_response(doc['data'][0])
        except OperationFailure as exc:
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            # If this is a tailable cursor the error is likely
            # due to capped collection roll over. Setting
            # self.__killed to True ensures Cursor.alive will be
            # False. No need to re-raise.
            if self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]:
                return
            raise
        except NotMasterError as exc:
            # Don't send kill cursors to another server after a "not master"
            # error. It's completely pointless.
            self.__killed = True

            # Make sure exhaust socket is returned immediately, if necessary.
            self.__die()

            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, exc.details, cmd_name, rqst_id, self.__address)

            client._reset_server_and_request_check(self.__address)
            raise
        except Exception as exc:
            if publish:
                duration = (datetime.datetime.now() - start) + cmd_duration
                listeners.publish_command_failure(
                    duration, _convert_exception(exc), cmd_name, rqst_id,
                    self.__address)
            raise

        if publish:
            duration = (datetime.datetime.now() - start) + cmd_duration
            # Must publish in find / getMore / explain command response format.
            if from_command:
                res = doc['data'][0]
            elif cmd_name == "explain":
                res = doc["data"][0] if doc["number_returned"] else {}
            else:
                res = {"cursor": {"id": doc["cursor_id"],
                                  "ns": self.__collection.full_name},
                       "ok": 1}
                if cmd_name == "find":
                    res["cursor"]["firstBatch"] = doc["data"]
                else:
                    res["cursor"]["nextBatch"] = doc["data"]
            listeners.publish_command_success(
                duration, res, cmd_name, rqst_id, self.__address)

        if from_command and cmd_name != "explain":
            cursor = doc['data'][0]['cursor']
            self.__id = cursor['id']
            if cmd_name == 'find':
                documents = cursor['firstBatch']
            else:
                documents = cursor['nextBatch']
            self.__data = deque(documents)
            self.__retrieved += len(documents)
        else:
            self.__id = doc["cursor_id"]
            self.__data = deque(doc["data"])
            self.__retrieved += doc["number_returned"]

        if self.__id == 0:
            self.__killed = True


        if self.__limit and self.__id and self.__limit <= self.__retrieved:
            self.__die()

        # Don't wait for garbage collection to call __del__, return the
        # socket to the pool now.
        if self.__exhaust and self.__id == 0:
            self.__exhaust_mgr.close()

Example 29

View license
def get_window(window_type):

    class DialogYoutubeList(DialogBaseList, window_type):

        TYPES = ["video", "playlist", "channel"]

        FILTERS = {"channelId": addon.LANG(19029),
                   "publishedAfter": addon.LANG(172),
                   "regionCode": addon.LANG(248),
                   "videoDimension": addon.LANG(32057),
                   "videoDuration": addon.LANG(180),
                   "videoCaption": addon.LANG(287),
                   "videoDefinition": addon.LANG(32058),
                   "videoType": "Type",
                   "relatedToVideoId": addon.LANG(32058)}

        TRANSLATIONS = {"video": addon.LANG(157),
                        "playlist": addon.LANG(559),
                        "channel": addon.LANG(19029)}

        SORTS = {"video": {"date": addon.LANG(552),
                           "rating": addon.LANG(563),
                           "relevance": addon.LANG(32060),
                           "title": addon.LANG(369),
                           "viewCount": addon.LANG(567)},
                 "playlist": {"date": addon.LANG(552),
                              "rating": addon.LANG(563),
                              "relevance": addon.LANG(32060),
                              "title": addon.LANG(369),
                              "videoCount": addon.LANG(32068),
                              "viewCount": addon.LANG(567)},
                 "channel": {"date": addon.LANG(552),
                             "rating": addon.LANG(563),
                             "relevance": addon.LANG(32060),
                             "title": addon.LANG(369),
                             "videoCount": addon.LANG(32068),
                             "viewCount": addon.LANG(567)}}

        LABEL2 = {"date": lambda x: x.get_info("date"),
                  "relevance": lambda x: x.get_property("relevance"),
                  "title": lambda x: x.get_info("title"),
                  "viewCount": lambda x: x.get_property("viewCount"),
                  "videoCount": lambda x: x.get_property("videoCount"),
                  "rating": lambda x: x.get_info("rating")}

        @busy.set_busy
        def __init__(self, *args, **kwargs):
            self.type = kwargs.get('type', "video")
            super(DialogYoutubeList, self).__init__(*args, **kwargs)

        def onClick(self, control_id):
            super(DialogYoutubeList, self).onClick(control_id)
            ch.serve(control_id, self)

        def onAction(self, action):
            super(DialogYoutubeList, self).onAction(action)
            ch.serve_action(action, self.getFocusId(), self)

        @ch.click_by_type("video")
        def main_list_click(self, control_id):
            listitem = self.FocusedItem(control_id)
            youtube_id = listitem.getProperty("youtube_id")
            media_type = listitem.getProperty("type")
            if media_type == "channel":
                filter_ = [{"id": youtube_id,
                            "type": "channelId",
                            "label": listitem.getLabel().decode("utf-8")}]
                wm.open_youtube_list(filters=filter_)
            else:
                wm.play_youtube_video(youtube_id=youtube_id,
                                      listitem=listitem)

        @ch.click(ID_BUTTON_PUBLISHEDFILTER)
        def set_published_filter(self, control_id):
            options = [(1, addon.LANG(32062)),
                       (7, addon.LANG(32063)),
                       (31, addon.LANG(32064)),
                       (365, addon.LANG(32065)),
                       ("custom", addon.LANG(636))]
            deltas = [i[0] for i in options]
            labels = [i[1] for i in options]
            index = xbmcgui.Dialog().select(heading=addon.LANG(32151),
                                            list=labels)
            if index == -1:
                return None
            delta = deltas[index]
            if delta == "custom":
                delta = xbmcgui.Dialog().input(heading=addon.LANG(32067),
                                               type=xbmcgui.INPUT_NUMERIC)
            if not delta:
                return None
            d = datetime.datetime.now() - datetime.timedelta(int(delta))
            self.add_filter(key="publishedAfter",
                            value=d.isoformat('T')[:-7] + "Z",
                            label=labels[index])

        @ch.click(ID_BUTTON_LANGUAGEFILTER)
        def set_language_filter(self, control_id):
            options = [("en", "en"),
                       ("de", "de"),
                       ("fr", "fr")]
            self.choose_filter("regionCode", 32151, options)

        @ch.click(ID_BUTTON_DIMENSIONFILTER)
        def set_dimension_filter(self, control_id):
            options = [("2d", "2D"),
                       ("3d", "3D"),
                       ("any", addon.LANG(593))]
            self.choose_filter("videoDimension", 32151, options)

        @ch.click(ID_BUTTON_DURATIONFILTER)
        def set_duration_filter(self, control_id):
            options = [("long", addon.LANG(33013)),
                       ("medium", addon.LANG(601)),
                       ("short", addon.LANG(33012)),
                       ("any", addon.LANG(593))]
            self.choose_filter("videoDuration", 32151, options)

        @ch.click(ID_BUTTON_CAPTIONFILTER)
        def set_caption_filter(self, control_id):
            options = [("closedCaption", addon.LANG(107)),
                       ("none", addon.LANG(106)),
                       ("any", addon.LANG(593))]
            self.choose_filter("videoCaption", 287, options)

        @ch.click(ID_BUTTON_DEFINITIONFILTER)
        def set_definition_filter(self, control_id):
            options = [("high", addon.LANG(419)),
                       ("standard", addon.LANG(602)),
                       ("any", addon.LANG(593))]
            self.choose_filter("videoDefinition", 169, options)

        @ch.click(ID_BUTTON_TYPEFILTER)
        def set_type_filter(self, control_id):
            options = [("movie", addon.LANG(20338)),
                       ("episode", addon.LANG(20359)),
                       ("any", addon.LANG(593))]
            self.choose_filter("videoType", 32151, options)

        @ch.click(ID_BUTTON_SORTTYPE)
        def get_sort_type(self, control_id):
            if not self.choose_sort_method(self.type):
                return None
            self.update()

        @ch.context("video")
        def context_menu(self, control_id):
            listitem = self.FocusedItem(control_id)
            if self.type == "video":
                more_vids = "{} [B]{}[/B]".format(addon.LANG(32081),
                                                  listitem.getProperty("channel_title"))
                index = xbmcgui.Dialog().contextmenu(list=[addon.LANG(32069), more_vids])
                if index < 0:
                    return None
                elif index == 0:
                    filter_ = [{"id": listitem.getProperty("youtube_id"),
                                "type": "relatedToVideoId",
                                "label": listitem.getLabel()}]
                    wm.open_youtube_list(filters=filter_)
                elif index == 1:
                    filter_ = [{"id": listitem.getProperty("channel_id"),
                                "type": "channelId",
                                "label": listitem.getProperty("channel_title")}]
                    wm.open_youtube_list(filters=filter_)

        def update_ui(self):
            is_video = self.type == "video"
            self.getControl(ID_BUTTON_DIMENSIONFILTER).setVisible(is_video)
            self.getControl(ID_BUTTON_DURATIONFILTER).setVisible(is_video)
            self.getControl(ID_BUTTON_CAPTIONFILTER).setVisible(is_video)
            self.getControl(ID_BUTTON_DEFINITIONFILTER).setVisible(is_video)
            super(DialogYoutubeList, self).update_ui()

        @property
        def default_sort(self):
            return "relevance"

        def add_filter(self, **kwargs):
            kwargs["typelabel"] = self.FILTERS[kwargs["key"]]
            super(DialogYoutubeList, self).add_filter(force_overwrite=True,
                                                      **kwargs)

        def fetch_data(self, force=False):
            self.set_filter_label()
            if self.search_str:
                self.filter_label = addon.LANG(32146) % (self.search_str) + "  " + self.filter_label
            return youtube.search(search_str=self.search_str,
                                  orderby=self.sort,
                                  extended=True,
                                  filters={item["type"]: item["id"] for item in self.filters},
                                  media_type=self.type,
                                  page=self.page_token)

    return DialogYoutubeList

Example 30

View license
    def _do_catch(self, pokemon, encounter_id, catch_rate_by_ball, is_vip=False):
        # settings that may be exposed at some point
        """

        :type pokemon: Pokemon
        """
        berry_id = ITEM_RAZZBERRY
        maximum_ball = ITEM_ULTRABALL if is_vip else ITEM_GREATBALL
        ideal_catch_rate_before_throw = self.vip_berry_threshold if is_vip else self.berry_threshold

        berry_count = self.inventory.get(ITEM_RAZZBERRY).count
        ball_count = {}
        for ball_id in [ITEM_POKEBALL, ITEM_GREATBALL, ITEM_ULTRABALL]:
            ball_count[ball_id] = self.inventory.get(ball_id).count

        # use `min_ultraball_to_keep` from config if is not None
        min_ultraball_to_keep = ball_count[ITEM_ULTRABALL]
        if self.min_ultraball_to_keep is not None:
            if self.min_ultraball_to_keep >= 0 and self.min_ultraball_to_keep < min_ultraball_to_keep:
                min_ultraball_to_keep = self.min_ultraball_to_keep

        used_berry = False
        original_catch_rate_by_ball = catch_rate_by_ball
        while True:

            # find lowest available ball
            current_ball = ITEM_POKEBALL
            while ball_count[current_ball] == 0 and current_ball < maximum_ball:
                current_ball += 1
            if ball_count[current_ball] == 0:
                # use untraball if there is no other balls with constraint to `min_ultraball_to_keep`
                if maximum_ball != ITEM_ULTRABALL and ball_count[ITEM_ULTRABALL] > min_ultraball_to_keep:
                    maximum_ball = ITEM_ULTRABALL
                    self.emit_event('enough_ultraballs', formatted='No regular balls left! Trying ultraball.')
                    continue
                else:
                    self.emit_event('no_pokeballs', formatted='No pokeballs left! Fleeing...')
                    return WorkerResult.ERROR

            # check future ball count
            num_next_balls = 0
            next_ball = current_ball
            while next_ball < maximum_ball:
                next_ball += 1
                num_next_balls += ball_count[next_ball]

            # check if we've got berries to spare
            berries_to_spare = berry_count > 0 if is_vip else berry_count > num_next_balls + 30

            # use a berry if we are under our ideal rate and have berries to spare
            changed_ball = False
            if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and berries_to_spare and not used_berry:
                new_catch_rate_by_ball = self._use_berry(berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball)
                if new_catch_rate_by_ball != catch_rate_by_ball:
                    catch_rate_by_ball = new_catch_rate_by_ball
                    self.inventory.get(ITEM_RAZZBERRY).remove(1)
                    berry_count -= 1
                    used_berry = True

            # pick the best ball to catch with
            best_ball = current_ball
            while best_ball < maximum_ball:
                best_ball += 1
                if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and ball_count[best_ball] > 0:
                    # if current ball chance to catch is under our ideal rate, and player has better ball - then use it
                    current_ball = best_ball
                    changed_ball = True

            # if the rate is still low and we didn't throw a berry before, throw one
            if catch_rate_by_ball[current_ball] < ideal_catch_rate_before_throw and berry_count > 0 and not used_berry:
                new_catch_rate_by_ball = self._use_berry(berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball)
                if new_catch_rate_by_ball != catch_rate_by_ball:
                    catch_rate_by_ball = new_catch_rate_by_ball
                    self.inventory.get(ITEM_RAZZBERRY).remove(1)
                    berry_count -= 1
                    used_berry = True

            # If we change ball then wait to simulate user selecting it
            if changed_ball:
                action_delay(self.catchsim_changeball_wait_min, self.catchsim_changeball_wait_max)

            # Randomize the quality of the throw
            # Default structure
            throw_parameters = {'normalized_reticle_size': 1.950,
                                'spin_modifier': 1.0,
                                'normalized_hit_position': 1.0,
                                'throw_type_label': 'Excellent'}
            self.generate_spin_parameter(throw_parameters)
            self.generate_throw_quality_parameters(throw_parameters)

            # try to catch pokemon!
            ball_count[current_ball] -= 1
            self.inventory.get(current_ball).remove(1)
            # Take some time to throw the ball from config options
            action_delay(self.catchsim_catch_wait_min, self.catchsim_catch_wait_max)
            self.emit_event(
                'threw_pokeball',
                formatted='{throw_type}{spin_label} throw! Used {ball_name}, with chance {success_percentage} ({count_left} left)',
                data={
                    'throw_type': throw_parameters['throw_type_label'],
                    'spin_label': throw_parameters['spin_label'],
                    'ball_name': self.inventory.get(current_ball).name,
                    'success_percentage': self._pct(catch_rate_by_ball[current_ball]),
                    'count_left': ball_count[current_ball]
                }
            )

            hit_pokemon = 1
            if random() >= self.catch_throw_parameters_hit_rate and not is_vip:
                hit_pokemon = 0

            response_dict = self.api.catch_pokemon(
                encounter_id=encounter_id,
                pokeball=current_ball,
                normalized_reticle_size=throw_parameters['normalized_reticle_size'],
                spawn_point_id=self.spawn_point_guid,
                hit_pokemon=hit_pokemon,
                spin_modifier=throw_parameters['spin_modifier'],
                normalized_hit_position=throw_parameters['normalized_hit_position']
            )

            try:
                catch_pokemon_status = response_dict['responses']['CATCH_POKEMON']['status']
            except KeyError:
                break

            # retry failed pokemon
            if catch_pokemon_status == CATCH_STATUS_FAILED:
                self.emit_event(
                    'pokemon_capture_failed',
                    formatted='{pokemon} capture failed.. trying again!',
                    data={'pokemon': pokemon.name}
                )
                used_berry = False
                catch_rate_by_ball = original_catch_rate_by_ball

                # sleep according to flee_count and flee_duration config settings
                # randomly chooses a number of times to 'show' wobble animation between 1 and flee_count
                # multiplies this by flee_duration to get total sleep
                if self.catchsim_flee_count:
                    sleep((randrange(self.catchsim_flee_count)+1) * self.catchsim_flee_duration)

                continue

            # abandon if pokemon vanished
            elif catch_pokemon_status == CATCH_STATUS_VANISHED:
                #insert into DB
                with self.bot.database as conn:
                    c = conn.cursor()
                    c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='vanish_log'")
                result = c.fetchone()

                while True:
                    if result[0] == 1:
                        conn.execute('''INSERT INTO vanish_log (pokemon, cp, iv, encounter_id, pokemon_id) VALUES (?, ?, ?, ?, ?)''', (pokemon.name, pokemon.cp, pokemon.iv, str(encounter_id), pokemon.pokemon_id))
                    break
                else:
                    self.emit_event(
                        'vanish_log',
                        sender=self,
                        level='info',
                        formatted="vanish_log table not found, skipping log"
                    )
                    break

                self.emit_event(
                    'pokemon_vanished',
                    formatted='{pokemon} vanished!',
                    data={
                        'pokemon': pokemon.name,
                        'encounter_id': self.pokemon['encounter_id'],
                        'latitude': self.pokemon['latitude'],
                        'longitude': self.pokemon['longitude'],
                        'pokemon_id': pokemon.pokemon_id
                    }
                )

                with self.bot.database as conn:
                    c = conn.cursor()
                    c.execute("SELECT DISTINCT COUNT(encounter_id) FROM vanish_log WHERE dated > (SELECT dated FROM catch_log WHERE dated IN (SELECT MAX(dated) FROM catch_log))")

                result = c.fetchone()
                self.consecutive_vanishes_so_far = result[0]

                if self.rest_completed == False and self.consecutive_vanishes_so_far >= self.consecutive_vanish_limit:
                    self.start_rest()

                if self._pct(catch_rate_by_ball[current_ball]) == 100:
                    self.bot.softban = True

            # pokemon caught!
            elif catch_pokemon_status == CATCH_STATUS_SUCCESS:
                if self.rest_completed == True:
                    self.rest_completed = False
                pokemon.unique_id = response_dict['responses']['CATCH_POKEMON']['captured_pokemon_id']
                self.bot.metrics.captured_pokemon(pokemon.name, pokemon.cp, pokemon.iv_display, pokemon.iv)

                awards = response_dict['responses']['CATCH_POKEMON']['capture_award']
                exp_gain, candy_gain, stardust_gain = self.extract_award(awards)

                self.emit_event(
                    'pokemon_caught',
                    formatted='Captured {pokemon}! [CP {cp}] [NCP {ncp}] [Potential {iv}] [{iv_display}] ({caught_last_24_hour}/{daily_catch_limit}) [+{exp} exp] [+{stardust} stardust]',
                    data={
                        'pokemon': pokemon.name,
                        'ncp': round(pokemon.cp_percent, 2),
                        'cp': pokemon.cp,
                        'iv': pokemon.iv,
                        'iv_display': pokemon.iv_display,
                        'exp': exp_gain,
                        'stardust': stardust_gain,
                        'encounter_id': self.pokemon['encounter_id'],
                        'latitude': self.pokemon['latitude'],
                        'longitude': self.pokemon['longitude'],
                        'pokemon_id': pokemon.pokemon_id,
                        'caught_last_24_hour': self.caught_last_24_hour + 1,
                        'daily_catch_limit': self.daily_catch_limit
                    }
                )

                inventory.pokemons().add(pokemon)
                inventory.player().exp += exp_gain
                self.bot.stardust += stardust_gain
                candy = inventory.candies().get(pokemon.pokemon_id)
                candy.add(candy_gain)

                self.emit_event(
                    'gained_candy',
                    formatted='You now have {quantity} {type} candy!',
                    data = {
                        'quantity': candy.quantity,
                        'type': candy.type,
                    },
                )

                self.bot.softban = False


                try:
                    with self.bot.database as conn:
                        c = conn.cursor()
                        c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='catch_log'")
                    result = c.fetchone()

                    while True:
                        if result[0] == 1:
                            conn.execute('''INSERT INTO catch_log (pokemon, cp, iv, encounter_id, pokemon_id) VALUES (?, ?, ?, ?, ?)''', (pokemon.name, pokemon.cp, pokemon.iv, str(encounter_id), pokemon.pokemon_id))
                        break
                    else:
                        self.emit_event(
                            'catch_log',
                            sender=self,
                            level='info',
                            formatted="catch_log table not found, skipping log"
                        )
                        break
                    user_data_caught = os.path.join(_base_dir, 'data', 'caught-%s.json' % self.bot.config.username)
                    with open(user_data_caught, 'ab') as outfile:
                        outfile.write(str(datetime.now()))
                        json.dump({
                            'pokemon': pokemon.name,
                            'cp': pokemon.cp,
                            'iv': pokemon.iv,
                            'encounter_id': self.pokemon['encounter_id'],
                            'pokemon_id': pokemon.pokemon_id
                        }, outfile)
                        outfile.write('\n')

                    # if it is a new pokemon to our dex, simulate app animation delay
                    if exp_gain >= 500:
                        sleep (randrange(self.catchsim_newtodex_wait_min, self.catchsim_newtodex_wait_max))

                except IOError as e:
                    self.logger.info('[x] Error while opening location file: %s' % e)

            elif catch_pokemon_status == CATCH_STATUS_MISSED:
                self.emit_event(
                    'pokemon_capture_failed',
                    formatted='Pokeball thrown to {pokemon} missed.. trying again!',
                    data={'pokemon': pokemon.name}
                )
                # Take some time to throw the ball from config options
                action_delay(self.catchsim_catch_wait_min, self.catchsim_catch_wait_max)
                continue

            break

Example 31

Project: PseudoTV_Live
Source File: Migrate.py
View license
    def autoTune(self):
        self.log("autoTune")
        if REAL_SETTINGS.getSetting("Autotune") == "true" and REAL_SETTINGS.getSetting("Warning1") == "true":
            self.log('starting autoTune')
            Youtube = self.chanlist.youtube_player_ok()
            self.chanlist.background = True
            self.chanlist.makenewlists = True
            self.chanlist.forceReset = True
            self.myOverlay.setBackgroundStatus("Initializing: Autotuning",0,string2=" ")
            
            #Reserve channel check 
            channelNum = 1       
            if REAL_SETTINGS.getSetting("reserveChannels") == "true":
                self.log('autoTune, using reserve Channels')
                channelNum = 500
            baseNum = channelNum
            self.log('autoTune, Starting channelNum = ' + str(baseNum))
            
            # LiveTV - PVR
            if REAL_SETTINGS.getSetting("autoFindLivePVR") == "true":
                self.log("autoTune, adding Live PVR Channels")
                channelNum = baseNum
                PVRChannels = self.chanlist.getPVRChannels()
                for i in range(len(PVRChannels)):
                    try:
                        CHid = PVRChannels[i][0]
                        CHname = self.chanlist.cleanLabels(PVRChannels[i][1])
                        CHthmb = PVRChannels[i][2]
                        if REAL_SETTINGS.getSetting("respectChannels") == "true":
                            channelNum = self.chkChannelNum(int(CHid))
                        else:
                            channelNum = self.chkChannelNum(channelNum)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "8")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", CHid)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", self.chanlist.getPVRLink(i))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", 'pvr')
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", CHname + ' PVR')
                        rulecnt = 1
                        if isLowPower() == True:
                            rulecnt = 2
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_id", "23")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_opt_1", 'No')
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "%s" %str(rulecnt))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")  
                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding PVR channels",string2=CHname,progress=int(channelNum*100//CHANNEL_LIMIT))
                    except:
                        pass
            
            # LiveTV - HDHomeRun           
            if REAL_SETTINGS.getSetting("autoFindLiveHDHR")  == "true":
                self.log("autoTune, adding Live HDHomeRun Channels")
                channelNum = baseNum
                self.chanlist.cached_readXMLTV = []
                HDHRChannels = self.chanlist.getHDHRChannels(True)
                for i in range(len(HDHRChannels)):
                    try:
                        CHid = HDHRChannels[i][0]
                        CHname = self.chanlist.cleanLabels(HDHRChannels[i][1])
                        link = HDHRChannels[i][4]
                        if REAL_SETTINGS.getSetting("respectChannels") == "true":
                            channelNum = self.chkChannelNum(int(CHid))
                        else:
                            channelNum = self.chkChannelNum(channelNum)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "8")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", CHid)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", link)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", "hdhomerun")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", CHname + ' HDHR')
                        rulecnt = 1
                        if isLowPower() == True:
                            rulecnt = 2
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_id", "23")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_opt_1", 'No')
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "%s" %str(rulecnt))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")     
                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding HDHomeRun channels",string2=CHname,progress=int(channelNum*100//CHANNEL_LIMIT))
                    except Exception,e:
                        self.log("autoFindLiveHD 2, Failed! " + str(e))
             
            # LiveTV - USTVnow
            if REAL_SETTINGS.getSetting("autoFindUSTVNOW") == "true" and isUSTVnow() != False:
                self.log("autoTune, adding USTVnow Channels")
                channelNum = baseNum
                USTVChannels = self.chanlist.getUSTVChannels()
                for i in range(len(USTVChannels)):
                    try:
                        CHname, path, thumb = USTVChannels[i]
                        channelNum = self.chkChannelNum(channelNum)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "8")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", CHname)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", path)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", "ustvnow")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", CHname + ' USTV')    
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_id", "13")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_opt_1", "2") 
                        rulecnt = 2
                        if isLowPower() == True:
                            rulecnt = 3
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_3_id", "23")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_3_opt_1", 'No')
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "%s" %str(rulecnt))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")                 
                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding USTVnow channels",string2=CHname,progress=int(channelNum*100//CHANNEL_LIMIT))
                    except:
                        pass

            # Custom Playlists
            if REAL_SETTINGS.getSetting("autoFindCustom") == "true":
                self.log("autoTune, adding Custom SmartPlaylists")
                channelNum = baseNum
                Music_path = 'special://profile/playlists/music'
                Mixed_path = 'special://profile/playlists/mixed'
                Video_path = 'special://profile/playlists/video'
                xsp_path = [Music_path, Mixed_path, Video_path]
                i = 0
                for path in xsp_path:
                    xspLst = self.chanlist.walk(path,['.xsp'])
                    for xsp in xspLst:
                        if xsp.endswith('.xsp') and len(re.findall("channel_",xsp)) != 0:
                            i += 1
                            if REAL_SETTINGS.getSetting("respectChannels") == "true":
                                channelNum = self.chkChannelNum(int((re.findall("channel_(\d+)", xsp))[0]))
                            else:
                                channelNum = self.chkChannelNum(channelNum)
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "0")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", xbmc.translatePath(xsp))
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                            self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Custom SmartPlaylists",string2=uni(self.chanlist.getSmartPlaylistName(xsp)),progress=int(channelNum*100//CHANNEL_LIMIT))

            # Custom SuperFavs
            
            if REAL_SETTINGS.getSetting("autoFindSuperFav") == "true" :
                self.log("autoTune, adding Super Favourites")
                channelNum = baseNum
                plugin_details = self.chanlist.requestList('plugin://plugin.program.super.favourites')
                
                for i in plugin_details:
                    include = False
                    
                    try:
                        filetypes = re.search('"filetype" *: *"(.*?)"', i)
                        labels = re.search('"label" *: *"(.*?)"', i)
                        files = re.search('"file" *: *"(.*?)"', i)

                        #if core variables have info proceed
                        if filetypes and files and labels:
                            filetype = filetypes.group(1)
                            file = (files.group(1))
                            label = (labels.group(1))
                            
                            if label.lower() not in SF_FILTER:
                                if filetype == 'directory':
                                    if label.lower() in ['pseudotv']:
                                        plugin_details = self.chanlist.requestList(file)
                                        include = True
                                    
                                    elif label.lower().startswith('channel'):
                                        plugin_details = self.chanlist.requestList(file)
                                        include = True

                                    if include == True:
                                        channelNum = self.chkChannelNum(channelNum)         
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "15")
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", file)
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "")
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", str(MEDIA_LIMIT))
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_4", "0")
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "1")
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", label)
                                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Super Favourites",string2=label,progress=int(channelNum*100//CHANNEL_LIMIT))
                    except:
                        pass
                      
            #TV - Networks/Genres
            
            if (REAL_SETTINGS.getSetting("autoFindNetworks") == "true" or REAL_SETTINGS.getSetting("autoFindTVGenres") == "true"):
                self.log("autoTune, Searching for TV Channels")
                self.chanlist.fillTVInfo()

            # need to add check for auto find network channels
            
            if REAL_SETTINGS.getSetting("autoFindNetworks") == "true":
                self.log("autoTune, adding TV Networks")
                channelNum = baseNum
                for i in range(len(self.chanlist.networkList)):
                    channelNum = self.chkChannelNum(channelNum)
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "1")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1",uni(self.chanlist.networkList[i]))
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "4")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                    self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding TV Networks",string2=uni(self.chanlist.networkList[i]),progress=int(channelNum*100//CHANNEL_LIMIT))
            
            
            if REAL_SETTINGS.getSetting("autoFindTVGenres") == "true":
                self.log("autoTune, adding TV Genres")
                channelNum = baseNum
                for i in range(len(self.chanlist.showGenreList)):
                    if self.chanlist.showGenreList[i] != '':
                        channelNum = self.chkChannelNum(channelNum)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "3")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", uni(self.chanlist.showGenreList[i]))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "4")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding TV Genres",string2=uni(self.chanlist.showGenreList[i]),progress=int(channelNum*100//CHANNEL_LIMIT))
            
            
            if (REAL_SETTINGS.getSetting("autoFindStudios") == "true" or REAL_SETTINGS.getSetting("autoFindMovieGenres") == "true"):
                self.chanlist.fillMovieInfo()

            if REAL_SETTINGS.getSetting("autoFindStudios") == "true":
                self.log("autoTune, adding Movie Studios")
                channelNum = baseNum
                for i in range(len(self.chanlist.studioList)):
                    channelNum = self.chkChannelNum(channelNum)
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "2")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", uni(self.chanlist.studioList[i]))
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                    self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Movie Studios",string2=uni(self.chanlist.studioList[i]),progress=int(channelNum*100//CHANNEL_LIMIT))
                    
            
            if REAL_SETTINGS.getSetting("autoFindMovieGenres") == "true":
                self.log("autoTune, adding Movie Genres")
                channelNum = baseNum
                for i in range(len(self.chanlist.movieGenreList)):
                    if self.chanlist.movieGenreList[i] != '':
                        channelNum = self.chkChannelNum(channelNum)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "4")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", uni(self.chanlist.movieGenreList[i]))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Movie Genres",string2=uni(self.chanlist.movieGenreList[i]),progress=int(channelNum*100//CHANNEL_LIMIT))
                     
            if REAL_SETTINGS.getSetting("autoFindMixGenres") == "true":
                self.chanlist.fillMixedGenreInfo()
            
            if REAL_SETTINGS.getSetting("autoFindMixGenres") == "true":
                self.log("autoTune, adding Mixed Genres")
                channelNum = baseNum
                for i in range(len(self.chanlist.mixedGenreList)):
                    if self.chanlist.mixedGenreList[i] != '':
                        channelNum = self.chkChannelNum(channelNum)
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "5")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", uni(self.chanlist.mixedGenreList[i]))
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "4")
                        ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                        self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Mixed Genres",string2=uni(self.chanlist.mixedGenreList[i]),progress=int(channelNum*100//CHANNEL_LIMIT))
            
            #recent movie/tv
             
            if REAL_SETTINGS.getSetting("autoFindRecent") == "true":
                self.log("autoTune, adding Recent TV/Movies")
                channelNum = baseNum
                channelNum = self.chkChannelNum(channelNum)
                TVflename = self.chanlist.createRecentlyAddedTV()
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", TVflename)
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "3")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", "Recent TV")  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_id", "12")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_3_id", "13")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_3_opt_1", "4")  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Recent TV",string2=' ')
                channelNum = self.chkChannelNum(channelNum)
                Movieflename = self.chanlist.createRecentlyAddedMovies()     
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", Movieflename)
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "2")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", "Recent Movies")  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_id", "13")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_opt_1", "4")  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Recent Movies",string2=' ')
               
            #3D movies
             
            if REAL_SETTINGS.getSetting("autoFind3DMovies") == "true":
                self.log("autoTune, adding 3D Movies")
                channelNum = baseNum
                if len(self.chanlist.movie3Dlist) >= MEDIA_LIMIT:
                    channelNum = self.chkChannelNum(channelNum)
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", "")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "0")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                    self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding 3D Movies",string2=' ')
                    
            #Music Genre
            if REAL_SETTINGS.getSetting("autoFindMusicGenres") == "true":
                self.log("autoTune, adding Music Genres")
                channelNum = baseNum
                self.chanlist.fillMusicInfo()
                for i in range(len(self.chanlist.musicGenreList)):
                    channelNum = self.chkChannelNum(channelNum)
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "12")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", uni(self.chanlist.musicGenreList[i]))
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "4")
                    ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                    self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Music Genres",string2=uni(self.chanlist.musicGenreList[i]),progress=int(channelNum*100//CHANNEL_LIMIT))
            
            #Local Directory
            if REAL_SETTINGS.getSetting("autoFindVideosLocal") != "":
                self.log("autoTune, adding Local Videos")
                channelNum = baseNum 
                channelNum = self.chkChannelNum(channelNum)
                LocalVideo = str(REAL_SETTINGS.getSetting('autoFindVideosLocal'))  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "7")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", "" +LocalVideo+ "")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", str(MEDIA_LIMIT))
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_4", "1")     
                self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Local Directory",string2=self.chanlist.getChannelName(7, channelNum, LocalVideo))

            #Youtube - PseudoNetwork
            if REAL_SETTINGS.getSetting("autoFindCommunity_PseudoNetworks") == "true" and isCompanionInstalled() == True:
                self.log("autoTune, adding PseudoNetworks")
                channelNum = baseNum
                detail = uni(self.chanlist.requestList('plugin://plugin.video.pseudo.companion/?mode=3000&name=PseudoNetworks&previous=getOnlineMedia&url'))
                show_busy_dialog()
                for i in detail:
                    files = re.search('"file" *: *"(.*?)",', i)
                    filetypes = re.search('"filetype" *: *"(.*?)",', i)
                    labels = re.search('"label" *: *"(.*?)",', i)
                    if filetypes and labels and files:
                        filetype = filetypes.group(1)
                        name = self.chanlist.cleanLabels(labels.group(1))
                        file = (files.group(1).replace("\\\\", "\\"))
                        if filetype == 'directory':
                            channelNum = self.chkChannelNum(channelNum)
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "15")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", file)
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", str(MEDIA_LIMIT))
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_4", "0")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "1")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", name)  
                            ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                            self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding PseudoNetworks",string2=name)
                    hide_busy_dialog()

            #Youtube - Seasonal
            
            if REAL_SETTINGS.getSetting("autoFindCommunity_Youtube_Seasonal") == "true":
                channelNum = baseNum
                today = datetime.datetime.now()
                month = today.strftime('%B')
                channelNum = self.chkChannelNum(channelNum)
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_type", "10")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_time", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_1", month)
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_2", "31")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_3", str(MEDIA_LIMIT))
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_4", "0")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rulecount", "2")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_id", "1")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_1_opt_1", "Seasonal Channel")  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_id", "13")
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_rule_2_opt_1", "168")  
                ADDON_SETTINGS.setSetting("Channel_" + str(channelNum) + "_changed", "true")
                self.myOverlay.setBackgroundStatus("Initializing: Autotuning adding Seasonal Youtube",string2=" ")

            # #Reserve channel clear old            
            if REAL_SETTINGS.getSetting("reserveChannels") == "true":
                self.clearReserved(baseNum)
            
            # reset auto tune settings        
            REAL_SETTINGS.setSetting('Autotune', "false")
            REAL_SETTINGS.setSetting('Warning1', "false") 
            REAL_SETTINGS.setSetting("autoFindCustom","false")
            REAL_SETTINGS.setSetting("autoFindSuperFav","false") 
            REAL_SETTINGS.setSetting('autoFindLivePVR', "false")
            REAL_SETTINGS.setSetting('autoFindLiveHDHR', "0")
            REAL_SETTINGS.setSetting('autoFindUSTVNOW', "false")  
            REAL_SETTINGS.setSetting("autoFindNetworks","false")
            REAL_SETTINGS.setSetting("autoFindStudios","false")
            REAL_SETTINGS.setSetting("autoFindTVGenres","false")
            REAL_SETTINGS.setSetting("autoFindMovieGenres","false")
            REAL_SETTINGS.setSetting("autoFindMixGenres","false")
            REAL_SETTINGS.setSetting("autoFind3DMovies","false")    
            REAL_SETTINGS.setSetting("autoFindRecent","false")      
            REAL_SETTINGS.setSetting("autoFindMusicGenres","false")
            REAL_SETTINGS.setSetting("autoFindVideosLocal","")
            REAL_SETTINGS.setSetting("autoFindCommunity_PseudoNetworks","false")  
            REAL_SETTINGS.setSetting("ForceChannelReset","true")
            ADDON_SETTINGS.setSetting('LastExitTime', str(int(time.time())))
            ADDON_SETTINGS.writeSettings()
            self.log('autoTune, return')
            self.myOverlay.setBackgroundStatus("Initializing: Autotuning Complete",string2=' ')
            return True

Example 32

Project: pycalphad
Source File: tdb.py
View license
def write_tdb(dbf, fd, groupby='subsystem'):
    """
    Write a TDB file from a pycalphad Database object.

    Parameters
    ----------
    dbf : Database
        A pycalphad Database.
    fd : file-like
        File descriptor.
    groupby : ['subsystem', 'phase'], optional
        Desired grouping of parameters in the file.
    """
    writetime = datetime.datetime.now()
    maxlen = 78
    output = ""
    # Comment header block
    # Import here to prevent circular imports
    from pycalphad import __version__
    output += ("$" * maxlen) + "\n"
    output += "$ Date: {}\n".format(writetime.strftime("%Y-%m-%d %H:%M"))
    output += "$ Components: {}\n".format(', '.join(sorted(dbf.elements)))
    output += "$ Phases: {}\n".format(', '.join(sorted(dbf.phases.keys())))
    output += "$ Generated by {} (pycalphad {})\n".format(getpass.getuser(), __version__)
    output += ("$" * maxlen) + "\n\n"
    for element in sorted(dbf.elements):
        output += "ELEMENT {0} BLANK 0 0 0 !\n".format(element.upper())
    if len(dbf.elements) > 0:
        output += "\n"
    for species in sorted(dbf.species):
        output += "SPECIES {0} !\n".format(species.upper())
    if len(dbf.species) > 0:
        output += "\n"
    # Write FUNCTION block
    for name, expr in sorted(dbf.symbols.items()):
        if not isinstance(expr, Piecewise):
            # Non-piecewise exprs need to be wrapped to print
            # Otherwise TC's TDB parser will complain
            expr = Piecewise((expr, And(v.T >= 1, v.T < 10000)))
        expr = TCPrinter().doprint(expr).upper()
        if ';' not in expr:
            expr += '; N'
        output += "FUNCTION {0} {1} !\n".format(name.upper(), expr)
    output += "\n"
    # Boilerplate code
    output += "TYPE_DEFINITION % SEQ * !\n"
    output += "DEFINE_SYSTEM_DEFAULT ELEMENT 2 !\n"
    default_elements = [i.upper() for i in sorted(dbf.elements) if i.upper() == 'VA' or i.upper() == '/-']
    if len(default_elements) > 0:
        output += 'DEFAULT_COMMAND DEFINE_SYSTEM_ELEMENT {} !\n'.format(' '.join(default_elements))
    output += "\n"
    typedef_chars = list("^&*()'ABCDEFGHIJKLMNOPQSRTUVWXYZ")[::-1]
    #  Write necessary TYPE_DEF based on model hints
    typedefs = defaultdict(lambda: ["%"])
    for name, phase_obj in sorted(dbf.phases.items()):
        model_hints = phase_obj.model_hints.copy()
        if ('ordered_phase' in model_hints.keys()) and (model_hints['ordered_phase'] == name):
            new_char = typedef_chars.pop()
            typedefs[name].append(new_char)
            typedefs[model_hints['disordered_phase']].append(new_char)
            output += 'TYPE_DEFINITION {} GES AMEND_PHASE_DESCRIPTION {} DISORDERED_PART {} !\n'\
                .format(new_char, model_hints['ordered_phase'].upper(),
                        model_hints['disordered_phase'].upper())
            del model_hints['ordered_phase']
            del model_hints['disordered_phase']
        if ('disordered_phase' in model_hints.keys()) and (model_hints['disordered_phase'] == name):
            # We handle adding the correct typedef when we write the ordered phase
            del model_hints['ordered_phase']
            del model_hints['disordered_phase']
        if 'ihj_magnetic_afm_factor' in model_hints.keys():
            new_char = typedef_chars.pop()
            typedefs[name].append(new_char)
            output += 'TYPE_DEFINITION {} GES AMEND_PHASE_DESCRIPTION {} MAGNETIC {} {} !\n'\
                .format(new_char, name.upper(), model_hints['ihj_magnetic_afm_factor'],
                        model_hints['ihj_magnetic_structure_factor'])
            del model_hints['ihj_magnetic_afm_factor']
            del model_hints['ihj_magnetic_structure_factor']
        if len(model_hints) > 0:
            # Some model hints were not properly consumed
            raise ValueError('Not all model hints are supported: {}'.format(model_hints))
    # Perform a second loop now that all typedefs / model hints are consistent
    for name, phase_obj in sorted(dbf.phases.items()):
        output += "PHASE {0} {1}  {2} {3} !\n".format(name.upper(), ''.join(typedefs[name]),
                                                      len(phase_obj.sublattices),
                                                      ' '.join([str(i) for i in phase_obj.sublattices]))
        constituents = ':'.join([','.join(sorted(subl)) for subl in phase_obj.constituents])
        output += "CONSTITUENT {0} :{1}: !\n".format(name.upper(), constituents)
        output += "\n"

    # PARAMETERs by subsystem
    param_sorted = defaultdict(lambda: list())
    paramtuple = namedtuple('ParamTuple', ['phase_name', 'parameter_type', 'complexity', 'constituent_array',
                                           'parameter_order', 'diffusing_species', 'parameter', 'reference'])
    for param in dbf._parameters.all():
        if groupby == 'subsystem':
            components = set()
            for subl in param['constituent_array']:
                components |= set(subl)
            if param['diffusing_species'] is not None:
                components |= {param['diffusing_species']}
            # Wildcard operator is not a component
            components -= {'*'}
            # Remove vacancy if it's not the only component (pure vacancy endmember)
            if len(components) > 1:
                components -= {'VA'}
            components = tuple(sorted([c.upper() for c in components]))
            grouping = components
        elif groupby == 'phase':
            grouping = param['phase_name'].upper()
        else:
            raise ValueError('Unknown groupby attribute \'{}\''.format(groupby))
        # We use the complexity parameter to help with sorting the parameters logically
        param_sorted[grouping].append(paramtuple(param['phase_name'], param['parameter_type'],
                                                 sum([len(i) for i in param['constituent_array']]),
                                                 param['constituent_array'], param['parameter_order'],
                                                 param['diffusing_species'], param['parameter'],
                                                 param['reference']))

    def write_parameter(param_to_write):
        constituents = ':'.join([','.join(sorted([i.upper() for i in subl]))
                         for subl in param_to_write.constituent_array])
        # TODO: Handle references
        paramx = param_to_write.parameter
        if not isinstance(paramx, Piecewise):
            # Non-piecewise parameters need to be wrapped to print correctly
            # Otherwise TC's TDB parser will fail
            paramx = Piecewise((paramx, And(v.T >= 1, v.T < 10000)))
        exprx = TCPrinter().doprint(paramx).upper()
        if ';' not in exprx:
            exprx += '; N'
        if param_to_write.diffusing_species is not None:
            ds = "&" + param_to_write.diffusing_species
        else:
            ds = ""
        return "PARAMETER {}({}{},{};{}) {} !\n".format(param_to_write.parameter_type.upper(),
                                                        param_to_write.phase_name.upper(),
                                                        ds,
                                                        constituents,
                                                        param_to_write.parameter_order,
                                                        exprx)
    if groupby == 'subsystem':
        for num_elements in range(1, 5):
            subsystems = list(itertools.combinations(sorted([i.upper() for i in dbf.elements]), num_elements))
            for subsystem in subsystems:
                parameters = sorted(param_sorted[subsystem])
                if len(parameters) > 0:
                    output += "\n\n"
                    output += "$" * maxlen + "\n"
                    output += "$ {}".format('-'.join(sorted(subsystem)).center(maxlen, " ")[2:-1]) + "$\n"
                    output += "$" * maxlen + "\n"
                    output += "\n"
                    for parameter in parameters:
                        output += write_parameter(parameter)
        # Don't generate combinatorics for multi-component subsystems or we'll run out of memory
        if len(dbf.elements) > 4:
            subsystems = [k for k in param_sorted.keys() if len(k) > 4]
            for subsystem in subsystems:
                parameters = sorted(param_sorted[subsystem])
                for parameter in parameters:
                    output += write_parameter(parameter)
    elif groupby == 'phase':
        for phase_name in sorted(dbf.phases.keys()):
            parameters = sorted(param_sorted[phase_name])
            if len(parameters) > 0:
                output += "\n\n"
                output += "$" * maxlen + "\n"
                output += "$ {}".format(phase_name.upper().center(maxlen, " ")[2:-1]) + "$\n"
                output += "$" * maxlen + "\n"
                output += "\n"
                for parameter in parameters:
                    output += write_parameter(parameter)
    else:
        raise ValueError('Unknown groupby attribute {}'.format(groupby))
    # Reflow text to respect character limit per line
    fd.write(reflow_text(output, linewidth=maxlen))

Example 33

Project: pyspace
Source File: trainer.py
View license
    def prepare_training(self, training_files, potentials, operation, nullmarker_stride_ms = None):
        """ Prepares pyspace live for training.

        Prepares everything for training of pyspace live,
        i.e. creates flows based on the dataflow specs
        and configures them.
        """
        online_logger.info( "Preparing Training")
        self.potentials = potentials
        self.operation = operation
        self.nullmarker_stride_ms = nullmarker_stride_ms
        if self.nullmarker_stride_ms == None:
            online_logger.warn( 'Nullmarker stride interval is %s. You can specify it in your parameter file.' % self.nullmarker_stride_ms)
        else:
            online_logger.info( 'Nullmarker stride interval is set to %s ms ' % self.nullmarker_stride_ms)

        online_logger.info( "Creating flows..")
        for key in self.potentials.keys():
            spec_base = self.potentials[key]["configuration"].spec_dir
            if self.operation == "train":
                self.potentials[key]["node_chain"] = os.path.join(spec_base, self.potentials[key]["node_chain"])
                online_logger.info( "node_chain_spec:" + self.potentials[key]["node_chain"])

            elif self.operation in ("prewindowing", "prewindowing_offline"):
                self.potentials[key]["prewindowing_flow"] = os.path.join(spec_base, self.potentials[key]["prewindowing_flow"])
                online_logger.info( "prewindowing_dataflow_spec: " + self.potentials[key]["prewindowing_flow"])

            elif self.operation == "prewindowed_train":
                self.potentials[key]["postprocess_flow"] = os.path.join(spec_base, self.potentials[key]["postprocess_flow"])
                online_logger.info( "postprocessing_dataflow_spec: " + self.potentials[key]["postprocess_flow"])

            self.training_active_potential[key] = multiprocessing.Value("b",False)

        online_logger.info("Path variables set for NodeChains")

        # check if multiple potentials are given for training
        if isinstance(training_files, list):
            self.training_data = training_files
        else:
            self.training_data = [training_files]

        # Training is done in separate processes, we send the time series
        # windows to these threads via two queues
        online_logger.info( "Initializing Queues")
        for key in self.potentials.keys():
            self.queue[key] = multiprocessing.Queue()


        def flow_generator(key):
            """create a generator to yield all the abri flow windows"""
            # Yield all windows until a None item is found in the queue
            while True:
                window = self.queue[key].get(block = True, timeout = None)
                if window == None: break
                yield window

        # Create the actual data flows
        for key in self.potentials.keys():

            if self.operation == "train":
                self.node_chains[key] = NodeChainFactory.flow_from_yaml(Flow_Class = NodeChain,
                                                         flow_spec = file(self.potentials[key]["node_chain"]))
                self.node_chains[key][0].set_generator(flow_generator(key))
                flow = open(self.potentials[key]["node_chain"])
            elif self.operation in ("prewindowing", "prewindowing_offline"):
                online_logger.info("loading prewindowing flow..")
                online_logger.info("file: " + str(self.potentials[key]["prewindowing_flow"]))

                self.node_chains[key] = NodeChainFactory.flow_from_yaml(Flow_Class = NodeChain,
                                                             flow_spec = file(self.potentials[key]["prewindowing_flow"]))
                self.node_chains[key][0].set_generator(flow_generator(key))
                flow = open(self.potentials[key]["prewindowing_flow"])
            elif self.operation == "prewindowed_train":
                self.node_chains[key] = NodeChainFactory.flow_from_yaml(Flow_Class = NodeChain, flow_spec = file(self.potentials[key]["postprocess_flow"]))
                replace_start_and_end_markers = False

                final_collection = TimeSeriesDataset()
                final_collection_path = os.path.join(self.prewindowed_data_directory, key, "all_train_data")
                # delete previous training collection
                if os.path.exists(final_collection_path):
                    online_logger.info("deleting old training data collection for " + key)
                    shutil.rmtree(final_collection_path)

                # load all prewindowed collections and
                # append data to the final collection
                prewindowed_sets = \
                    glob.glob(os.path.join(self.prewindowed_data_directory, key, "*"))
                if len(prewindowed_sets) == 0:
                    online_logger.error("Couldn't find data, please do prewindowing first!")
                    raise Exception
                online_logger.info("concatenating prewindowed data from " + str(prewindowed_sets))

                for s,d in enumerate(prewindowed_sets):
                    collection = BaseDataset.load(d)
                    data = collection.get_data(0, 0, "train")
                    for d,(sample,label) in enumerate(data):
                        if replace_start_and_end_markers:
                            # in case we concatenate multiple 'Window' labeled
                            # sets we have to remove every start- and endmarker
                            for k in sample.marker_name.keys():
                                # find '{S,s}  8' or '{S,s}  9'
                                m = re.match("^s\s{0,2}[8,9]{1}$", k, re.IGNORECASE)
                                if m is not None:
                                    online_logger.info(str("remove %s from %d %d" % (m.group(), s, d)))
                                    del(sample.marker_name[m.group()])

                            if s == len(prewindowed_sets)-1 and \
                                d == len(data)-1:
                                # insert endmarker
                                sample.marker_name["S  9"] = [0.0]
                                online_logger.info("added endmarker" + str(s) + " " + str(d))

                            if s == 0 and d == 0:
                                # insert startmarker
                                sample.marker_name["S  8"] = [0.0]
                                online_logger.info("added startmarker" + str(s) + " " + str(d))

                        final_collection.add_sample(sample, label, True)

                # save final collection (just for debugging)
                os.mkdir(final_collection_path)
                final_collection.store(final_collection_path)

                online_logger.info("stored final collection at " + final_collection_path)

                # load final collection again for training
                online_logger.info("loading data from " + final_collection_path)
                self.prewindowed_data[key] =  BaseDataset.load(final_collection_path)
                self.node_chains[key][0].set_input_dataset(self.prewindowed_data[key])

                flow = open(self.potentials[key]["postprocess_flow"])

            # create window_stream for every potential

            if self.operation in ("prewindowing"):
                window_spec_file = os.path.join(spec_base,"node_chains","windower",
                             self.potentials[key]["windower_spec_path_train"])

                self.window_stream[key] = \
                        self.stream_manager.request_window_stream(window_spec_file,
                                                              nullmarker_stride_ms = self.nullmarker_stride_ms)
            elif self.operation in ("prewindowing_offline"):
                pass
            elif self.operation in ("train"):
                pass

            self.node_chain_definitions[key] = yaml.load(flow)
            flow.close()

        # TODO: check if the prewindowing flow is still needed when using the stream mode!
        if self.operation in ("train"):
            online_logger.info( "Removing old flows...")
            try:
                shutil.rmtree(self.flow_storage)
            except:
                online_logger.info("Could not delete flow storage directory")
            os.mkdir(self.flow_storage)
        elif self.operation in ("prewindowing", "prewindowing_offline"):
            # follow this policy:
            # - delete prewindowed data older than 12 hours
            # - always delete trained/stored flows
            now = datetime.datetime.now()
            then = now - datetime.timedelta(hours=12)

            if not os.path.exists(self.prewindowed_data_directory):
                os.mkdir(self.prewindowed_data_directory)
            if not os.path.exists(self.flow_storage):
                os.mkdir(self.flow_storage)

            for key in self.potentials.keys():
                found = self.find_files_older_than(then, \
                        os.path.join(self.prewindowed_data_directory, key))
                if found is not None:
                    for f in found:
                        online_logger.info(str("recursively deleting files in \'%s\'" % f))
                        try:
                            shutil.rmtree(os.path.abspath(f))
                        except Exception as e:
                            # TODO: find a smart solution for this!
                            pass # dir was probably already deleted..

                if os.path.exists(os.path.join(self.prewindowed_data_directory, key, "all_train_data")):
                    shutil.rmtree(os.path.join(self.prewindowed_data_directory, key, "all_train_data"))
                    online_logger.info("deleted concatenated training data for " + key)


        online_logger.info( "Training preparations finished")
        return 0

Example 34

Project: spladder
Source File: spladder_test.py
View license
def main():

    ### get command line options
    options = parse_options(sys.argv)

    ### parse parameters from options object
    CFG = settings.parse_args(options, identity='test')
    CFG['use_exon_counts'] = False

    ### generate output directory
    outdir = os.path.join(options.outdir, 'testing')
    if options.timestamp == 'y':
        outdir = '%s_%s' % (outdir, str(datetime.datetime.now()).replace(' ', '_'))
    if CFG['diagnose_plots']:
        CFG['plot_dir'] = os.path.join(options.outdir, 'plots')
        if not os.path.exists(CFG['plot_dir']):
            os.makedirs(CFG['plot_dir'])

    if options.labelA != 'condA' and options.labelB != 'condB':
        outdir = '%s_%s_vs_%s' % (outdir, options.labelA, options.labelB)
    if not os.path.exists(outdir):
        os.makedirs(outdir)

    if CFG['debug']:

        print "Generating simulated dataset"

        npr.seed(23)
        CFG['is_matlab'] = False
        #cov = npr.permutation(20000-20).astype('float').reshape(999, 20)
        #cov = sp.r_[cov, sp.c_[sp.ones((1, 10)) *10, sp.ones((1, 10)) * 500000] + npr.normal(10, 1, 20)]
        #sf = sp.ones((cov.shape[1], ), dtype='float')

        setsize = 50
        ### diff event counts
        cov = sp.zeros((500, 2 * setsize), dtype='int')
        for i in range(10):
            cov[i, :setsize] = nbinom.rvs(30, 0.8, size=setsize)
            cov[i, setsize:] = nbinom.rvs(10, 0.8, size=setsize)
        for i in range(10, cov.shape[0]):
            cov[i, :] = nbinom.rvs(30, 0.8, size=2*setsize)

        ### diff gene expression
        cov2 = sp.zeros((500, 2 * setsize), dtype='int')
        for i in range(20):
            cov2[i, :setsize] = nbinom.rvs(2000, 0.2, size=setsize)
            cov2[i, setsize:] = nbinom.rvs(2000, 0.3, size=setsize)
        for i in range(20, cov2.shape[0]):
            cov2[i, :] = nbinom.rvs(2000, 0.3, size=2*setsize)

        cov = sp.c_[cov, cov2] * 10000

        tidx = sp.arange(setsize)

        sf = npr.uniform(0, 5, 2*setsize)
        sf = sp.r_[sf, sf]

        #dmatrix0 = sp.ones((cov.shape[1], 3), dtype='bool')
        dmatrix1 = sp.zeros((cov.shape[1], 4), dtype='float')
        dmatrix1[:, 0] = 1
        dmatrix1[tidx, 1] = 1
        #dmatrix1[tidx, 2] = 1
        dmatrix1[tidx + (2*setsize), 2] = 1
        dmatrix1[(2*setsize):, 3] = 1
        #dmatrix1[:, 4] = sp.log(sf)
        dmatrix0 = dmatrix1[:, [0, 2, 3]]

        cov = cov * sf
        #sf = sp.ones((cov.shape[1], ), dtype='float')

        pvals = run_testing(cov, dmatrix0, dmatrix1, sf, CFG)
        pvals_adj = adj_pval(pvals, CFG) 
        pdb.set_trace()
    else:
        val_tag = ''
        if CFG['validate_splicegraphs']:
            val_tag = '.validated'

        if CFG['is_matlab']:
            CFG['fname_genes'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.mat' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
            CFG['fname_count_in'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.count.mat' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
        else:
            CFG['fname_genes'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.pickle' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
            CFG['fname_count_in'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.count.hdf5' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))

        condition_strains = None
        CFG['fname_exp_hdf5'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.gene_exp.hdf5' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
        if os.path.exists(CFG['fname_exp_hdf5']):
            if CFG['verbose']:
                print 'Loading expression counts from %s' % CFG['fname_exp_hdf5']
            IN = h5py.File(CFG['fname_exp_hdf5'], 'r')
            gene_counts = IN['raw_count'][:]
            gene_strains = IN['strains'][:]
            gene_ids = IN['genes'][:]
            IN.close()
        else:
            if options.subset_samples == 'y':
                condition_strains = sp.unique(sp.r_[sp.array(CFG['conditionA']), sp.array(CFG['conditionB'])])
                CFG['fname_exp_hdf5'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.gene_exp.%i.hdf5' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag, hash(tuple(sp.unique(condition_strains))) * -1))
            if os.path.exists(CFG['fname_exp_hdf5']):
                if CFG['verbose']:
                    print 'Loading expression counts from %s' % CFG['fname_exp_hdf5']
                IN = h5py.File(CFG['fname_exp_hdf5'], 'r')
                gene_counts = IN['raw_count'][:]
                gene_strains = IN['strains'][:]
                gene_ids = IN['genes'][:]
                IN.close()
            else:
                gene_counts, gene_strains, gene_ids = get_gene_expression(CFG, fn_out=CFG['fname_exp_hdf5'], strain_subset=condition_strains)

        gene_strains = sp.array([x.split(':')[1] if ':' in x else x for x in gene_strains])

        ### estimate size factors for library size normalization
        sf_ge = get_size_factors(gene_counts, CFG)

        ### get index of samples for difftest
        idx1 = sp.where(sp.in1d(gene_strains, CFG['conditionA']))[0]
        idx2 = sp.where(sp.in1d(gene_strains, CFG['conditionB']))[0]

        ### for TESTING
        #setsize = 100
        #idx1 = sp.arange(0, setsize / 2)
        #idx2 = sp.arange(setsize / 2, setsize)

        ### subset expression counts to tested samples
        gene_counts = gene_counts[:, sp.r_[idx1, idx2]]
        sf_ge = sf_ge[sp.r_[idx1, idx2]]
        #sf = sp.r_[sf, sf]

        ### test each event type individually
        for event_type in CFG['event_types']:

            if CFG['verbose']:
                print 'Testing %s events' % event_type

            CFG['fname_events'] = os.path.join(CFG['out_dirname'], 'merge_graphs_%s_C%i.counts.hdf5' % (event_type, CFG['confidence_level']))

            ### quantify events
            (cov, gene_idx, event_idx, event_ids, event_strains) = quantify.quantify_from_counted_events(CFG['fname_events'], sp.r_[idx1, idx2], event_type, CFG)

            ### estimate size factors
            sf_ev = get_size_factors(sp.vstack(cov), CFG)

            sf = sp.r_[sf_ev, sf_ge]

            assert(sp.all(gene_strains == event_strains))

            ### map gene expression to event order
            curr_gene_counts = gene_counts[gene_idx, :]

            ### filter for min expression
            if event_type == 'intron_retention':
                k_idx = sp.where((sp.mean(cov[0] == 0, axis=1) < CFG['max_0_frac']) | \
                                 (sp.mean(cov[1] == 0, axis=1) < CFG['max_0_frac']))[0]
            else:
                k_idx = sp.where(((sp.mean(cov[0] == 0, axis=1) < CFG['max_0_frac']) | \
                                  (sp.mean(cov[1] == 0, axis=1) < CFG['max_0_frac'])) & \
                                 (sp.mean(sp.c_[cov[0][:, :idx1.shape[0]], cov[1][:, :idx1.shape[0]]] == 0, axis=1) < CFG['max_0_frac']) & \
                                 (sp.mean(sp.c_[cov[0][:, idx2.shape[0]:], cov[1][:, idx2.shape[0]:]] == 0, axis=1) < CFG['max_0_frac']))[0]
            if CFG['verbose']:
                print 'Exclude %i of %i %s events (%.2f percent) from testing due to low coverage' % (cov[0].shape[0] - k_idx.shape[0], cov[0].shape[0], event_type, (1 - float(k_idx.shape[0]) / cov[0].shape[0]) * 100)
            if k_idx.shape[0] == 0:
                print 'All events of type %s were filtered out due to low coverage. Please try re-running with less stringent filter criteria' % event_type
                continue
           # k_idx = sp.where((sp.mean(sp.c_[cov[0], cov[1]], axis=1) > 2))[0]
           # k_idx = sp.where((sp.mean(cov[0], axis=1) > 2) & (sp.mean(cov[1], axis=1) > 2))[0]
            cov[0] = cov[0][k_idx, :]
            cov[1] = cov[1][k_idx, :]
            curr_gene_counts = curr_gene_counts[k_idx, :]
            event_idx = event_idx[k_idx]
            gene_idx = gene_idx[k_idx]
            event_ids = [x[k_idx] for x in event_ids]

            cov[0] = sp.around(sp.hstack([cov[0], curr_gene_counts]))
            cov[1] = sp.around(sp.hstack([cov[1], curr_gene_counts]))
            cov = sp.vstack(cov)
            event_ids = sp.hstack(event_ids)

            tidx = sp.arange(idx1.shape[0])

        #if CFG['debug']:
        #    for i in range(cov.shape[0]):
        #        fig = plt.figure(figsize=(8, 6), dpi=100)
        #        ax = fig.add_subplot(111)
        #        ax.hist(cov[i, :] * sf, 50, histtype='bar', rwidth=0.8)
        #        #ax.plot(sp.arange(cov.shape[1]), sorted(cov[i, :]), 'bo')
        #        ax.set_title('Count Distribution - Sample %i' % i )
        #        plt.savefig('count_dist.%i.pdf' % i, format='pdf', bbox_inches='tight')
        #        plt.close(fig)

            ### build design matrix for testing
            dmatrix1 = sp.zeros((cov.shape[1], 4), dtype='bool')
            dmatrix1[:, 0] = 1                      # intercept
            dmatrix1[tidx, 1] = 1                   # delta a
            dmatrix1[tidx, 2] = 1                   # delta g
            dmatrix1[tidx + (idx1.shape[0] + idx2.shape[0]), 2] = 1         # delta g
            dmatrix1[(idx1.shape[0] + idx2.shape[0]):, 3] = 1         # is g
            dmatrix0 = dmatrix1[:, [0, 2, 3]]

            ### make event splice forms unique to prevent unnecessary tests
            event_ids, u_idx, r_idx = sp.unique(event_ids, return_index=True, return_inverse=True)
            if CFG['verbose']:
                print 'Consider %i unique event splice forms for testing' % u_idx.shape[0]

            ### run testing
            #pvals = run_testing(cov[u_idx, :], dmatrix0, dmatrix1, sf, CFG, r_idx)
            pvals = run_testing(cov, dmatrix0, dmatrix1, sf, CFG)
            pvals_adj = adj_pval(pvals, CFG) 

            ### write output
            out_fname = os.path.join(outdir, 'test_results_C%i_%s.tsv' % (options.confidence, event_type))
            if CFG['verbose']:
                print 'Writing test results to %s' % out_fname
            s_idx = sp.argsort(pvals_adj)
            header = sp.array(['event_id', 'gene', 'p_val', 'p_val_adj']) 
            event_ids = sp.array(['%s_%i' % (event_type, i + 1) for i in event_idx], dtype='str')
            if CFG['is_matlab']:
                data_out = sp.c_[event_ids[s_idx], gene_ids[gene_idx[s_idx], 0], pvals[s_idx].astype('str'), pvals_adj[s_idx].astype('str')]
            else:
                data_out = sp.c_[event_ids[s_idx], gene_ids[gene_idx[s_idx]], pvals[s_idx].astype('str'), pvals_adj[s_idx].astype('str')]
            data_out = sp.r_[header[sp.newaxis, :], data_out]
            sp.savetxt(out_fname, data_out, delimiter='\t', fmt='%s')

Example 35

View license
def run(stdout, stderr, argv, theano_nose, batch_size, time_profile,
        display_batch_output):

    # Setting aside current working directory for later saving
    sav_dir = os.getcwd()
    # The first argument is the called script.
    argv = argv[1:]

    # It seems safer to fully regenerate the list of tests on each call.
    if os.path.isfile('.noseids'):
        os.remove('.noseids')

    # Collect test IDs.
    print("""\
####################
# COLLECTING TESTS #
####################""")
    stdout.flush()
    stderr.flush()
    dummy_in = open(os.devnull)
    # We need to call 'python' on Windows, because theano-nose is not a
    # native Windows app; and it does not hurt to call it on Unix.
    # Using sys.executable, so that the same Python version is used.
    python = sys.executable
    rval = subprocess.call(
        ([python, theano_nose, '--collect-only', '--with-id']
         + argv),
        stdin=dummy_in.fileno(),
        stdout=stdout.fileno(),
        stderr=stderr.fileno())
    stdout.flush()
    stderr.flush()
    assert rval == 0
    noseids_file = '.noseids'

    with open(noseids_file, 'rb') as f:
        data = pickle.load(f)

    ids = data['ids']
    n_tests = len(ids)
    if n_tests == 0:
        raise Exception("0 test selected")
    assert n_tests == max(ids)

    # Standard batch testing is called for
    if not time_profile:
        failed = set()
        print("""\
###################################
# RUNNING TESTS IN BATCHES OF %s #
###################################""" % batch_size)
        # When `display_batch_output` is False, we suppress all output because
        # we want the user to focus only on the failed tests, which are re-run
        # (with output) below.
        dummy_out = open(os.devnull, 'w')
        for test_id in xrange(1, n_tests + 1, batch_size):
            stdout.flush()
            stderr.flush()
            test_range = list(range(test_id,
                                    min(test_id + batch_size, n_tests + 1)))
            cmd = ([python, theano_nose, '--with-id'] +
                   list(map(str, test_range)) +
                   argv)
            subprocess_extra_args = dict(stdin=dummy_in.fileno())
            if not display_batch_output:
                # Use quiet mode in nosetests.
                cmd.append('-q')
                # Suppress all output.
                subprocess_extra_args.update(dict(
                    stdout=dummy_out.fileno(),
                    stderr=dummy_out.fileno()))
            t0 = time.time()
            subprocess.call(cmd, **subprocess_extra_args)
            t1 = time.time()
            # Recover failed test indices from the 'failed' field of the
            # '.noseids' file. We need to do it after each batch because
            # otherwise this field may get erased. We use a set because it
            # seems like it is not systematically erased though, and we want
            # to avoid duplicates.
            with open(noseids_file, 'rb') as f:
                failed = failed.union(pickle.load(f)['failed'])

            print('%s%% done in %.3fs (failed: %s)' % (
                (test_range[-1] * 100) // n_tests, t1 - t0, len(failed)))
        # Sort for cosmetic purpose only.
        failed = sorted(failed)
        if failed:
            # Re-run only failed tests
            print("""\
################################
# RE-RUNNING FAILED TESTS ONLY #
################################""")
            stdout.flush()
            stderr.flush()
            subprocess.call(
                ([python, theano_nose, '-v', '--with-id']
                 + failed
                 + argv),
                stdin=dummy_in.fileno(),
                stdout=stdout.fileno(),
                stderr=stderr.fileno())
            stdout.flush()
            stderr.flush()
            return 0
        else:
            print("""\
####################
# ALL TESTS PASSED #
####################""")

    # Time-profiling is called for
    else:
        print("""\
########################################
# RUNNING TESTS IN TIME-PROFILING MODE #
########################################""")

        # finds first word of list l containing string s
        def getIndexOfFirst(l, s):
            for pos, word in enumerate(l):
                if s in word:
                    return pos

        # finds last word of list l containing string s
        def getIndexOfLast(l, s):
            for pos, word in enumerate(reversed(l)):
                if s in word:
                    return (len(l) - pos - 1)

        # iterating through tests
        # initializing master profiling list and raw log
        prof_master_nosort = []
        prof_rawlog = []
        dummy_out = open(os.devnull, 'w')
        path_rawlog = os.path.join(sav_dir, 'timeprof_rawlog')
        stamp = str(datetime.datetime.now()) + '\n\n'
        f_rawlog = open(path_rawlog, 'w')
        f_rawlog.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
                       ' (raw log)\n\n' + stamp)
        f_rawlog.flush()

        stamp = str(datetime.datetime.now()) + '\n\n'
        fields = ('Fields: computation time; nosetests sequential id;'
                  ' test name; parent class (if any); outcome\n\n')
        path_nosort = os.path.join(sav_dir, 'timeprof_nosort')
        # probably this part can be extracted for function with many args
        with open(path_nosort, 'w') as f_nosort:
            # begin of saving nosort
            f_nosort.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
                           ' (by sequential id)\n\n' + stamp + fields)
            f_nosort.flush()
            for test_floor in xrange(1, n_tests + 1, batch_size):
                for test_id in xrange(test_floor, min(test_floor + batch_size,
                                                     n_tests + 1)):
                    # Print the test we will start in the raw log to help
                    # debug tests that are too long.
                    f_rawlog.write("\n%s Will run test #%d %s\n" % (
                        time.ctime(), test_id, data["ids"][test_id]))
                    f_rawlog.flush()

                    p_out = output_subprocess_Popen(
                        ([python, theano_nose, '-v', '--with-id']
                         + [str(test_id)] + argv +
                         ['--disabdocstring']))
                        # the previous option calls a custom Nosetests plugin
                        # precluding automatic sustitution of doc. string for
                        # test name in display
                        # (see class 'DisabDocString' in file theano-nose)

                    # recovering and processing data from pipe
                    err = p_out[1]
                    # print the raw log
                    f_rawlog.write(err)
                    f_rawlog.flush()

                    # parsing the output
                    l_err = err.split()
                    try:
                        pos_id = getIndexOfFirst(l_err, '#')
                        prof_id = l_err[pos_id]
                        pos_dot = getIndexOfFirst(l_err, '...')
                        prof_test = ''
                        for s in l_err[pos_id + 1: pos_dot]:
                            prof_test += s + ' '
                        if 'OK' in err:
                            pos_ok = getIndexOfLast(l_err, 'OK')
                            if len(l_err) == pos_ok + 1:
                                prof_time = float(l_err[pos_ok - 1][0:-1])
                                prof_pass = 'OK'
                            elif 'SKIP' in l_err[pos_ok + 1]:
                                prof_time = 0.
                                prof_pass = 'SKIPPED TEST'
                            elif 'KNOWNFAIL' in l_err[pos_ok + 1]:
                                prof_time = float(l_err[pos_ok - 1][0:-1])
                                prof_pass = 'OK'
                            else:
                                prof_time = 0.
                                prof_pass = 'FAILED TEST'
                        else:
                            prof_time = 0.
                            prof_pass = 'FAILED TEST'
                    except Exception:
                        prof_time = 0
                        prof_id = '#' + str(test_id)
                        prof_test = ('FAILED PARSING, see raw log for details'
                                     ' on test')
                        prof_pass = ''
                    prof_tuple = (prof_time, prof_id, prof_test, prof_pass)

                    # appending tuple to master list
                    prof_master_nosort.append(prof_tuple)

                    # write the no sort file
                    s_nosort = ((str(prof_tuple[0]) + 's').ljust(10) +
                     " " + prof_tuple[1].ljust(7) + " " +
                     prof_tuple[2] + prof_tuple[3] +
                     "\n")
                    f_nosort.write(s_nosort)
                    f_nosort.flush()

                print('%s%% time-profiled' % ((test_id * 100) // n_tests))
            f_rawlog.close()

            # sorting tests according to running-time
            prof_master_sort = sorted(prof_master_nosort,
                                      key=lambda test: test[0], reverse=True)

            # saving results to readable files
            path_sort = os.path.join(sav_dir, 'timeprof_sort')
            with open(path_sort, 'w') as f_sort:
                f_sort.write('TIME-PROFILING OF THEANO\'S NOSETESTS'
                             ' (sorted by computation time)\n\n' + stamp + fields)
                for i in xrange(len(prof_master_nosort)):
                    s_sort = ((str(prof_master_sort[i][0]) + 's').ljust(10) +
                         " " + prof_master_sort[i][1].ljust(7) + " " +
                         prof_master_sort[i][2] + prof_master_sort[i][3] +
                         "\n")
                    f_sort.write(s_sort)

Example 36

Project: CumulusCI
Source File: run_apex_tests.py
View license
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) in ['true','True']
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)
    
    if namespace:
        namespace = "'{0}'".format(namespace,)
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '{0}'".format(pattern))

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '{0}')".format(pattern,))
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(namespace,)
    if where_name:
        query += " AND ({0})".format(' OR '.join(where_name),)
    if where_exclude:
        query += " AND {0}".format(' AND '.join(where_exclude),)

    print "Running Query: {0}".format(query,)
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found {0} classes".format(res['totalSize'],)
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    trace_id = None
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        print 'Setting up trace flag to capture debug logs'

        # Get the User's id to set a TraceFlag
        res_user = sf.query("Select Id from User where Username = '{0}'".format(username,))
        user_id = res_user['records'][0]['Id']
        
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=sf.sf_instance,
                             object_name='TraceFlag',
                             sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])
    
        expiration = datetime.datetime.now() + datetime.timedelta(seconds=60*60*12)
        res = TraceFlag.create({
            'ApexCode': 'Info',
            'ApexProfiling': 'Debug',
            'Callout': 'Info',
            'Database': 'Info',
            'ExpirationDate': expiration.isoformat(),
            #'ScopeId': user_id,
            'System': 'Info',
            'TracedEntityId': user_id,
            'Validation': 'Info',
            'Visualforce': 'Info',
            'Workflow': 'Info',
        })
        trace_id = res['id']

        print 'Created TraceFlag for user'
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'".format(job_id,))
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'".format(job_id,))
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('{0}')".format("','".join([str(id) for id in classes_by_log_id.keys()]),)
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}".format(log_ids,))
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '{0}sobjects/ApexLog/{1}/Body'.format(sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)
            
            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flag
        TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: {0} ({1}s)'.format(class_name, duration)
        else:
            print 'Class: {0}'.format(class_name,)
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children': result.get('children', None),
                'ClassName': decode_to_unicode(class_name),
                'Method': decode_to_unicode(result['MethodName']),
                'Message': decode_to_unicode(result['Message']),
                'Outcome': decode_to_unicode(result['Outcome']),
                'StackTrace': decode_to_unicode(result['StackTrace']),
                'Stats': result.get('stats', None),
                'TestTimestamp': result.get('TestTimestamp', None),
            })
            
            # Output result for method
            if debug and json_output and result.get('stats') and 'duration' in result['stats']:
                # If debug is enabled and we're generating the json output, include duration with the test
                print u'   {0}: {1} ({2}s)'.format(
                    result['Outcome'], 
                    result['MethodName'], 
                    result['stats']['duration']
                )
            else:
                print u'   {Outcome}: {MethodName}'.format(**result)

            if debug and not json_output:
                print u'     DEBUG LOG INFO:'
                stats = result.get('stats',None)
                if not stats:
                    print u'       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = u'       {0} / {1}'.format(value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = u'       {0}'.format(stats[stat],)
                            print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print u'   Message: {Message}'.format(**result)
                print u'   StackTrace: {StackTrace}'.format(**result)
            sys.stdout.flush()
    
    print u'-------------------------------------------------------------------------------'
    print u'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print u'-------------------------------------------------------------------------------'
    sys.stdout.flush()
    
    if counts['Fail'] or counts['CompileFail']:
        print u''
        print u'Failing Tests'
        print u'-------------'
        print u''
        sys.stdout.flush()

        counter = 0
        for result in test_results:
            if result['Outcome'] not in ['Fail','CompileFail']:
                continue
            counter += 1
            print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome'])
            print u'  Message: {0}'.format(result['Message'],)
            print u'  StackTrace: {0}'.format(result['StackTrace'],)
            sys.stdout.flush()

    if json_output:
        f = codecs.open(json_output, encoding='utf-8', mode='w')
        f.write(json.dumps(test_results))
        f.close()

    if junit_output:
        f = codecs.open(junit_output, encoding='utf-8', mode='w')
        f.write('<testsuite tests="{0}">\n'.format(len(test_results)),)
        for result in test_results:
            testcase = '  <testcase classname="{0}" name="{1}"'.format(result['ClassName'], result['Method'])
            if 'Stats' in result and result['Stats'] and 'duration' in result['Stats']:
                testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration'])
            if result['Outcome'] in ['Fail','CompileFail']:
                testcase = '{0}>\n'.format(testcase,)
                testcase = '{0}    <failure type="{1}">{2}</failure>\n'.format(
                    testcase, 
                    cgi.escape(result['StackTrace']), 
                    cgi.escape(result['Message']),
                )
                testcase = '{0}  </testcase>\n'.format(testcase,)
            else:
                testcase = '{0} />\n'.format(testcase,)
            f.write(testcase)

        f.write('</testsuite>')
        f.close()
        

    return counts

Example 37

Project: ice
Source File: feed.py
View license
def main():
	parser = OptionParser(usage="%prog: [options] name,address[:port] ...")
	
	parser.add_option("-p", "--port", type="int", default=net._PORT, help="default upstream port [default=%default]")
	parser.add_option("-l", "--listen", type="int", default=12876, help="default server listen port [default=%default]")
	parser.add_option("-b", "--buffer-size", type="int", default=1024*4, help="receive buffer size [default=%default]")
	parser.add_option("-L", "--limit", type="int", default=-1, help="async send buffer limit (-1: unlimited) [default=%default]")
	parser.add_option("-B", "--blocking-send", action="store_true", default=False, help="disable async send thread [default=%default]")
	parser.add_option("-w", "--switch-wait-time", type="float", default=5.0, help="time to wait until switching to superior feed [default=%default]")
	parser.add_option("-a", "--ave-period", type="float", default=10.0, help="rate averaging window [default=%default]")
	parser.add_option("-A", "--min-ave-factor", type="float", default=0.25, help="averaging window fullness [default=%default]")
	parser.add_option("-i", "--imm-switch-factor", type="float", default=0.75, help="immediate switch factor (percentage of superior's rate underwhich active must be) [default=%default]")
	parser.add_option("-u", "--update-interval", type="float", default=0.25, help="UI update interval [default=%default]")
	parser.add_option("-H", "--history-length", type="float", default=30.0, help="sent frame history length (s) [default=%default]")
	parser.add_option("", "--disable-ui", action="store_true", default=False, help="disable UI [default=%default]")
	
	(options, args) = parser.parse_args()
	
	if len(args) == 0:
		print "Supply at least one feed source"
		return
	
	feeds = []
	data_event = threading.Event()
	
	max_name_length = 0
	max_destination_length = 0
	for arg in args:
		parts = arg.split(",")
		if len(parts) < 2:
			print "Ignoring invalid feed source:", arg
			continue
		destination = parts[1]
		idx = destination.find(':')
		if idx > -1:
			destination = (destination[:idx], int(destination[idx+1:]))
		else:
			destination = (destination, options.port)
		max_name_length = max(max_name_length, len(parts[0]))
		max_destination_length = max(max_destination_length, len("%s:%d" % (destination[0], destination[1])))
		feeds += [FeedSource(
			name=parts[0],
			destination=destination,
			data_event=data_event,
			averaging_period=options.ave_period,
			minimum_averaging_factor=options.min_ave_factor,
			history_length=options.history_length
		)]	# buffer_size, timeout
	
	if len(feeds) == 0:
		print "No valid feeds"
		return
	
	server = None
	scr = None
	ex = None
	feed_thread = None
	ex_str = ""
	ui_timeout = 10	#ms	# MAGIC
	log = Log(options.disable_ui, options.disable_ui)
	
	try:
		server = tcp_server.ThreadedTCPServer(("", options.listen), buffer_size=options.buffer_size, blocking_mode=options.blocking_send, send_limit=options.limit, silent=not options.disable_ui)
		
		def _log_listen_retry(e, msg):
			print "    Socket error:", msg
			if (e == 98):
				print "    Waiting, then trying again..."
		
		server.start(retry=True, wait=LISTEN_RETRY_INTERVAL, log=_log_listen_retry)
		
		print "==> TCP server running in thread:", server.server_thread.getName()
		
		print "Starting feeds..."
		for feed in feeds:
			feed.start()	# Feeds will automatically reconnect if connection fails
		
		feed_thread = FeedThread(
			feeds=feeds,
			server=server,
			options=options,
			data_event=data_event,
			history_length=options.history_length,
			log=log
		)
		
		print "Starting feed thread..."
		feed_thread.start()
		
		################################
		
		if options.disable_ui:
			while True:
				raw_input()
		else:
			scr = curses.initscr()
			scr.timeout(ui_timeout)	# -1 for blocking
			scr.keypad(1)	# Otherwise app will end when pressing arrow keys
			curses.noecho()
			scr.erase()
			
			max_y, max_x = None, None
			
			while True:
				try:
					_max_y, _max_x = scr.getmaxyx()
					if _max_y != max_y or _max_x != max_x:
						scr.erase()
					
					max_y, max_x = _max_y, _max_x
					
					scr.move(0, 0)
					scr.clrtoeol()
					scr.addstr(str(datetime.datetime.now()))
					
					y = 2
					for feed in feeds:
						x = 0
						scr.move(y, x)
						scr.clrtoeol()
						if feed == feed_thread.active_feed:
							scr.addstr(">>>")
						elif feed == feed_thread.next_best:
							scr.addstr(" > ")
						
						x = 4
						scr.move(y, x)
						scr.addstr(feed.name)
						x += max_name_length+3
						
						scr.move(y, x)
						scr.clrtoeol()
						scr.addstr("%s:%d" % (feed.destination[0], feed.destination[1]))
						x += max_destination_length+3
						
						scr.move(y, x)
						scr.clrtoeol()
						scr.addstr("%04d" % (len(feed.stats_history)))
						x += 7
						
						scr.move(y, x)
						scr.clrtoeol()
						scr.addstr("%.0f" % (feed.get_ave_rate(calculate_now=False)))
						
						y += 1
						x = 0
						scr.move(y, x)
						scr.clrtoeol()
						scr.addstr(feed.get_status_string())
						
						y += 1
						
						y += 1
					
					log_buffer = log.get_buffer()
					if len(log_buffer) > 0:
						for log_msg in log_buffer[::-1]:
							scr.move(y, 0)
							scr.insertln()
							scr.addstr(log_msg)
							#y += 1
					
					scr.refresh()
					
					ch = scr.getch()
					if ch > -1:
						if ch == 27:	# ESC (quit)
							break
						elif ch >= ord('0') and ch <= ord('9'):
							idx = (ch - ord('0') - 1) % 10
							if idx < len(feeds):
								feed_thread.switch(feeds[idx])
				except:
					pass
				
				time.sleep(options.update_interval)
	except KeyboardInterrupt:
		pass
	except Exception, e:
		ex = e
		ex_str = traceback.format_exc()
	
	if scr:
		scr.erase()
		scr.refresh()
		
		curses.nocbreak()
		scr.keypad(0)
		curses.echo()
		curses.endwin()
	
	if ex:
		print "Unhandled exception:", ex
		if len(ex_str) > 0: print ex_str
	
	try:
		print "Shutting down..."
		
		if server:
			def _log_shutdown(client):
				print "Disconnecting client:", client.client_address
			
			server.shutdown(True, log=_log_shutdown)
		
		if feed_thread:
			print "Stopping feed thread..."
			feed_thread.stop()
		
		print "Stopping feeds..."
		for feed in feeds:
			feed.stop()
	except Exception, e:
		print "Unhandled exception during shutdown:", e
	
	return 0

Example 38

View license
    def __init__(self,
                interactive = False,
                select_first = False,
                debug = False,
                cache = True,
                banners = False,
                actors = False,
                custom_ui = None,
                language = None,
                search_all_languages = False,
                apikey = None,
                forceConnect=False,
                useZip=False):

        """interactive (True/False):
            When True, uses built-in console UI is used to select the correct show.
            When False, the first search result is used.

        select_first (True/False):
            Automatically selects the first series search result (rather
            than showing the user a list of more than one series).
            Is overridden by interactive = False, or specifying a custom_ui

        debug (True/False) DEPRECATED:
             Replaced with proper use of logging module. To show debug messages:

                 >>> import logging
                 >>> logging.basicConfig(level = logging.DEBUG)

        cache (True/False/str/unicode/urllib2 opener):
            Retrieved XML are persisted to to disc. If true, stores in
            tvdb_api folder under your systems TEMP_DIR, if set to
            str/unicode instance it will use this as the cache
            location. If False, disables caching.  Can also be passed
            an arbitrary Python object, which is used as a urllib2
            opener, which should be created by urllib2.build_opener

        banners (True/False):
            Retrieves the banners for a show. These are accessed
            via the _banners key of a Show(), for example:

            >>> Tvdb(banners=True)['scrubs']['_banners'].keys()
            ['fanart', 'poster', 'series', 'season']

        actors (True/False):
            Retrieves a list of the actors for a show. These are accessed
            via the _actors key of a Show(), for example:

            >>> t = Tvdb(actors=True)
            >>> t['scrubs']['_actors'][0]['name']
            u'Zach Braff'

        custom_ui (tvdb_ui.BaseUI subclass):
            A callable subclass of tvdb_ui.BaseUI (overrides interactive option)

        language (2 character language abbreviation):
            The language of the returned data. Is also the language search
            uses. Default is "en" (English). For full list, run..

            >>> Tvdb().config['valid_languages'] #doctest: +ELLIPSIS
            ['da', 'fi', 'nl', ...]

        search_all_languages (True/False):
            By default, Tvdb will only search in the language specified using
            the language option. When this is True, it will search for the
            show in and language
        
        apikey (str/unicode):
            Override the default thetvdb.com API key. By default it will use
            tvdb_api's own key (fine for small scripts), but you can use your
            own key if desired - this is recommended if you are embedding
            tvdb_api in a larger application)
            See http://thetvdb.com/?tab=apiregister to get your own key

        forceConnect (bool):
            If true it will always try to connect to theTVDB.com even if we
            recently timed out. By default it will wait one minute before
            trying again, and any requests within that one minute window will
            return an exception immediately.

        useZip (bool):
            Download the zip archive where possibale, instead of the xml.
            This is only used when all episodes are pulled.
            And only the main language xml is used, the actor and banner xml are lost.
        """
        
        global lastTimeout
        
        # if we're given a lastTimeout that is less than 1 min just give up
        if not forceConnect and lastTimeout != None and datetime.datetime.now() - lastTimeout < datetime.timedelta(minutes=1):
            raise tvdb_error("We recently timed out, so giving up early this time")
        
        self.shows = ShowContainer() # Holds all Show classes
        self.corrections = {} # Holds show-name to show_id mapping

        self.config = {}

        if apikey is not None:
            self.config['apikey'] = apikey
        else:
            self.config['apikey'] = "0629B785CE550C8D" # tvdb_api's API key

        self.config['debug_enabled'] = debug # show debugging messages

        self.config['custom_ui'] = custom_ui

        self.config['interactive'] = interactive # prompt for correct series?

        self.config['select_first'] = select_first

        self.config['search_all_languages'] = search_all_languages

        self.config['useZip'] = useZip


        if cache is True:
            self.config['cache_enabled'] = True
            self.config['cache_location'] = self._getTempDir()
            self.urlopener = urllib2.build_opener(
                CacheHandler(self.config['cache_location'])
            )

        elif cache is False:
            self.config['cache_enabled'] = False
            self.urlopener = urllib2.build_opener() # default opener with no caching

        elif isinstance(cache, basestring):
            self.config['cache_enabled'] = True
            self.config['cache_location'] = cache
            self.urlopener = urllib2.build_opener(
                CacheHandler(self.config['cache_location'])
            )

        elif isinstance(cache, urllib2.OpenerDirector):
            # If passed something from urllib2.build_opener, use that
            log().debug("Using %r as urlopener" % cache)
            self.config['cache_enabled'] = True
            self.urlopener = cache

        else:
            raise ValueError("Invalid value for Cache %r (type was %s)" % (cache, type(cache)))

        self.config['banners_enabled'] = banners
        self.config['actors_enabled'] = actors

        if self.config['debug_enabled']:
            warnings.warn("The debug argument to tvdb_api.__init__ will be removed in the next version. "
            "To enable debug messages, use the following code before importing: "
            "import logging; logging.basicConfig(level=logging.DEBUG)")
            logging.basicConfig(level=logging.DEBUG)


        # List of language from http://www.thetvdb.com/api/0629B785CE550C8D/languages.xml
        # Hard-coded here as it is realtively static, and saves another HTTP request, as
        # recommended on http://thetvdb.com/wiki/index.php/API:languages.xml
        self.config['valid_languages'] = [
            "da", "fi", "nl", "de", "it", "es", "fr","pl", "hu","el","tr",
            "ru","he","ja","pt","zh","cs","sl", "hr","ko","en","sv","no"
        ]

        # thetvdb.com should be based around numeric language codes,
        # but to link to a series like http://thetvdb.com/?tab=series&id=79349&lid=16
        # requires the language ID, thus this mapping is required (mainly
        # for usage in tvdb_ui - internally tvdb_api will use the language abbreviations)
        self.config['langabbv_to_id'] = {'el': 20, 'en': 7, 'zh': 27,
        'it': 15, 'cs': 28, 'es': 16, 'ru': 22, 'nl': 13, 'pt': 26, 'no': 9,
        'tr': 21, 'pl': 18, 'fr': 17, 'hr': 31, 'de': 14, 'da': 10, 'fi': 11,
        'hu': 19, 'ja': 25, 'he': 24, 'ko': 32, 'sv': 8, 'sl': 30}

        if language is None:
            self.config['language'] = 'en'
        else:
            if language not in self.config['valid_languages']:
                raise ValueError("Invalid language %s, options are: %s" % (
                    language, self.config['valid_languages']
                ))
            else:
                self.config['language'] = language

        # The following url_ configs are based of the
        # http://thetvdb.com/wiki/index.php/Programmers_API
        self.config['base_url'] = "http://thetvdb.com"

        if self.config['search_all_languages']:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=all" % self.config
        else:
            self.config['url_getSeries'] = u"%(base_url)s/api/GetSeries.php?seriesname=%%s&language=%(language)s" % self.config

        self.config['url_epInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.xml" % self.config
        self.config['url_epInfo_zip'] = u"%(base_url)s/api/%(apikey)s/series/%%s/all/%%s.zip" % self.config

        self.config['url_seriesInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/%%s.xml" % self.config
        self.config['url_actorsInfo'] = u"%(base_url)s/api/%(apikey)s/series/%%s/actors.xml" % self.config

        self.config['url_seriesBanner'] = u"%(base_url)s/api/%(apikey)s/series/%%s/banners.xml" % self.config
        self.config['url_artworkPrefix'] = u"%(base_url)s/banners/%%s" % self.config

Example 39

Project: pygbe
Source File: main.py
View license
def main(argv=sys.argv, log_output=True, return_output_fname=False,
         return_results_dict=False):
    """
    Run a PyGBe problem, write outputs to STDOUT and to log file in
    problem directory

    Arguments
    ----------
    log_output         : Bool, default True.
                         If False, output is written only to STDOUT and not
                         to a log file.
    return_output_fname: Bool, default False.
                         If True, function main() returns the name of the
                         output log file. This is used for the regression tests.

    Returns
    --------
    output_fname       : str, if kwarg is True.
                         The name of the log file containing problem output
    """

    args = read_inputs(argv[1:])
    configFile, paramfile = find_config_files(args)
    full_path = os.environ.get('PYGBE_PROBLEM_FOLDER')
    #check if a custom geometry location has been specified
    #if it has, add an ENV_VAR to handle it
    if args.geometry:
        geo_path = os.path.abspath(args.geometry)
        if os.path.isdir(geo_path):
            os.environ['PYGBE_GEOMETRY'] = geo_path
        else:
            sys.exit('Invalid geometry prefix provided (Folder not found)')
    else:
        geo_path = os.path.join(full_path, 'geometry')

    #try to expand ~ if present in output path
    args.output = os.path.expanduser(args.output)
    #if output path is absolute, use that, otherwise prepend
    #problem path
    if not os.path.isdir(args.output):
        output_dir = os.path.join(full_path, args.output)
    else:
        output_dir = args.output
    # create output directory if it doesn't already exist
    try:
        os.makedirs(output_dir)
    except OSError:
        pass

    results_dict = {}
    timestamp = time.localtime()
    outputfname = '{:%Y-%m-%d-%H%M%S}-output.log'.format(datetime.now())
    results_dict['output_file'] = outputfname
    if log_output:
        sys.stdout = Logger(os.path.join(output_dir, outputfname))
    # Time stamp
    print('Run started on:')
    print('\tDate: {}/{}/{}'.format(timestamp.tm_year, timestamp.tm_mon,
                                    timestamp.tm_mday))
    print('\tTime: {}:{}:{}'.format(timestamp.tm_hour, timestamp.tm_min,
                                    timestamp.tm_sec))
    TIC = time.time()

    print('Config file: {}'.format(configFile))
    print('Parameter file: {}'.format(paramfile))
    print('Geometry folder: {}'.format(geo_path))
    print('Running in: {}'.format(full_path))
    results_dict['config_file'] = configFile
    results_dict['param_file'] = paramfile
    results_dict['geo_file'] = geo_path
    results_dict['full_path'] = full_path

    ### Read parameters
    param = Parameters()
    precision = readParameters(param, paramfile)

    param.Nm = (param.P + 1) * (param.P + 2) * (
        param.P + 3) // 6  # Number of terms in Taylor expansion
    param.BlocksPerTwig = int(numpy.ceil(param.NCRIT / float(param.BSZ))
                              )  # CUDA blocks that fit per twig

    HAS_GPU = check_for_nvcc()
    if param.GPU == 1 and not HAS_GPU:
        print('\n\n\n\n')
        print('{:-^{}}'.format('No GPU DETECTED', 60))
        print("Your param file has `GPU = 1` but CUDA was not detected.\n"
              "Continuing using CPU.  If you do not want this, use Ctrl-C\n"
              "to stop the program and check that your CUDA installation\n"
              "is on your $PATH")
        print('{:-^{}}'.format('No GPU DETECTED', 60))
        print('\n\n\n\n')
        param.GPU = 0

    ### Generate array of fields
    field_array = initialize_field(configFile, param)

    ### Generate array of surfaces and read in elements
    surf_array = initialize_surface(field_array, configFile, param)

    ### Fill surface class
    time_sort = 0.
    for i in range(len(surf_array)):
        time_sort += surf_array[i].fill_surface(param)

    ### Output setup summary
    param.N = 0
    param.Neq = 0
    for s in surf_array:
        N_aux = len(s.triangle)
        param.N += N_aux
        if s.surf_type == 'dirichlet_surface' or s.surf_type == 'neumann_surface' or s.surf_type == 'asc_surface':
            param.Neq += N_aux
        else:
            param.Neq += 2 * N_aux
    print('\nTotal elements : {}'.format(param.N))
    print('Total equations: {}'.format(param.Neq))

    results_dict['total_elements'] = param.N
    results_dict['N_equation'] = param.Neq

    results_dict = print_summary(surf_array, field_array, param, results_dict)

    ### Precomputation
    ind0 = IndexConstant()
    computeIndices(param.P, ind0)
    precomputeTerms(param.P, ind0)

    ### Load CUDA code
    if param.GPU == 1:
        kernel = kernels(param.BSZ, param.Nm, param.K_fine, param.P, precision)
    else:
        kernel = 1

    ### Generate interaction list
    print('Generate interaction list')
    tic = time.time()
    generateList(surf_array, field_array, param)
    toc = time.time()
    list_time = toc - tic

    ### Transfer data to GPU
    print('Transfer data to GPU')
    tic = time.time()
    if param.GPU == 1:
        dataTransfer(surf_array, field_array, ind0, param, kernel)
    toc = time.time()
    transfer_time = toc - tic

    timing = Timing()

    ### Generate RHS
    print('Generate RHS')
    tic = time.time()
    if param.GPU == 0:
        F = generateRHS(field_array, surf_array, param, kernel, timing, ind0)
    elif param.GPU == 1:
        F = generateRHS_gpu(field_array, surf_array, param, kernel, timing,
                            ind0)
    toc = time.time()
    rhs_time = toc - tic

    setup_time = toc - TIC
    print('List time          : {}s'.format(list_time))
    print('Data transfer time : {}s'.format(transfer_time))
    print('RHS generation time: {}s'.format(rhs_time))
    print('-'*30)
    print('Total setup time   : {}s'.format(setup_time))

    tic = time.time()

    ### Solve
    print('Solve')
    phi = numpy.zeros(param.Neq)
    phi, iteration = gmres_mgs(surf_array, field_array, phi, F, param, ind0,
                               timing, kernel)
    toc = time.time()
    results_dict['iterations'] = iteration
    solve_time = toc - tic
    print('Solve time        : {}s'.format(solve_time))
    phifname = '{:%Y-%m-%d-%H%M%S}-phi.txt'.format(datetime.now())
    results_dict['solve_time'] = solve_time
    numpy.savetxt(os.path.join(output_dir, phifname), phi)

    # Put result phi in corresponding surfaces
    s_start = 0
    for surf in surf_array:
        s_start = surf.fill_phi(phi, s_start)

    # Calculate solvation energy
    print('Calculate Esolv')
    tic = time.time()
    E_solv = calculate_solvation_energy(surf_array, field_array, param, kernel)
    toc = time.time()
    print('Time Esolv: {}s'.format(toc - tic))
    ii = -1
    for i, f in enumerate(field_array):
        if f.pot == 1:
            parent_type = surf_array[f.parent[0]].surf_type
            if parent_type != 'dirichlet_surface' and parent_type != 'neumann_surface':
                ii += 1
                print('Region {}: Esolv = {} kcal/mol = {} kJ/mol'.format(i,
                                                                          E_solv[ii],
                                                                          E_solv[ii] * 4.184))

    # Calculate surface energy
    print('\nCalculate Esurf')
    tic = time.time()
    E_surf = calculate_surface_energy(surf_array, field_array, param, kernel)
    toc = time.time()
    ii = -1
    for f in param.E_field:
        parent_type = surf_array[field_array[f].parent[0]].surf_type
        if parent_type == 'dirichlet_surface' or parent_type == 'neumann_surface':
            ii += 1
            print('Region {}: Esurf = {} kcal/mol = {} kJ/mol'.format(
                f, E_surf[ii], E_surf[ii] * 4.184))
    print('Time Esurf: {}s'.format(toc - tic))

    ### Calculate Coulombic interaction
    print('\nCalculate Ecoul')
    tic = time.time()
    i = -1
    E_coul = []
    for f in field_array:
        i += 1
        if f.coulomb == 1:
            print('Calculate Coulomb energy for region {}'.format(i))
            E_coul.append(coulomb_energy(f, param))
            print('Region {}: Ecoul = {} kcal/mol = {} kJ/mol'.format(
                i, E_coul[-1], E_coul[-1] * 4.184))
    toc = time.time()
    print('Time Ecoul: {}s'.format(toc - tic))

    ### Output summary
    print('\n'+'-'*30)
    print('Totals:')
    print('Esolv = {} kcal/mol'.format(sum(E_solv)))
    print('Esurf = {} kcal/mol'.format(sum(E_surf)))
    print('Ecoul = {} kcal/mol'.format(sum(E_coul)))
    print('\nTime = {} s'.format(toc - TIC))
    results_dict['total_time'] = (toc - TIC)
    results_dict['E_solv_kcal'] = sum(E_solv)
    results_dict['E_solv_kJ'] = sum(E_solv) * 4.184
    results_dict['E_surf_kcal'] = sum(E_surf)
    results_dict['E_surf_kJ'] = sum(E_surf) * 4.184
    results_dict['E_coul_kcal'] = sum(E_coul)
    results_dict['E_coul_kJ'] = sum(E_coul) * 4.184

    output_pickle = outputfname.split('-')
    output_pickle.pop(-1)
    output_pickle.append('resultspickle')
    output_pickle = '-'.join(output_pickle)
    with open(os.path.join(output_dir, output_pickle), 'wb') as f:
        pickle.dump(results_dict, f, 2)

    #reset stdout so regression tests, etc, don't get logged into the output
    #file that they themselves are trying to read
    sys.stdout = sys.__stdout__

    if return_results_dict:
        return results_dict

    if return_output_fname and log_output:
        return outputfname

Example 40

Project: pystan
Source File: model.py
View license
    def sampling(self, data=None, pars=None, chains=4, iter=2000,
                 warmup=None, thin=1, seed=None, init='random',
                 sample_file=None, diagnostic_file=None, verbose=False,
                 algorithm=None, control=None, n_jobs=-1, **kwargs):
        """Draw samples from the model.

        Parameters
        ----------
        data : dict
            A Python dictionary providing the data for the model. Variables
            for Stan are stored in the dictionary as expected. Variable
            names are the keys and the values are their associated values.
            Stan only accepts certain kinds of values; see Notes.

        pars : list of string, optional
            A list of strings indicating parameters of interest. By default
            all parameters specified in the model will be stored.

        chains : int, optional
            Positive integer specifying number of chains. 4 by default.

        iter : int, 2000 by default
            Positive integer specifying how many iterations for each chain
            including warmup.

        warmup : int, iter//2 by default
            Positive integer specifying number of warmup (aka burin) iterations.
            As `warmup` also specifies the number of iterations used for step-size
            adaption, warmup samples should not be used for inference.

        thin : int, 1 by default
            Positive integer specifying the period for saving samples.

        seed : int or np.random.RandomState, optional
            The seed, a positive integer for random number generation. Only
            one seed is needed when multiple chains are used, as the other
            chain's seeds are generated from the first chain's to prevent
            dependency among random number streams. By default, seed is
            ``random.randint(0, MAX_UINT)``.

        algorithm : {"NUTS", "HMC", "Fixed_param"}, optional
            One of algorithms that are implemented in Stan such as the No-U-Turn
            sampler (NUTS, Hoffman and Gelman 2011), static HMC, or ``Fixed_param``.

        init : {0, '0', 'random', function returning dict, list of dict}, optional
            Specifies how initial parameter values are chosen: 0 or '0'
            initializes all to be zero on the unconstrained support; 'random'
            generates random initial values; list of size equal to the number
            of chains (`chains`), where the list contains a dict with initial
            parameter values; function returning a dict with initial parameter
            values. The function may take an optional argument `chain_id`.

        sample_file : string, optional
            File name specifying where samples for *all* parameters and other
            saved quantities will be written. If not provided, no samples
            will be written. If the folder given is not writable, a temporary
            directory will be used. When there are multiple chains, an underscore
            and chain number are appended to the file name. By default do not
            write samples to file.

        verbose : boolean, False by default
            Indicates whether intermediate output should be piped to the
            console. This output may be useful for debugging.

        control : dict, optional
            A dictionary of parameters to control the sampler's behavior. Default
            values are used if control is not specified.  The following are
            adaptation parameters for sampling algorithms.

            These are parameters used in Stan with similar names:

            - `adapt_engaged` : bool, default True
            - `adapt_gamma` : float, positive, default 0.05
            - `adapt_delta` : float, between 0 and 1, default 0.8
            - `adapt_kappa` : float, between default 0.75
            - `adapt_t0`    : float, positive, default 10

            In addition, the algorithm HMC (called 'static HMC' in Stan) and NUTS
            share the following parameters:

            - `stepsize`: float, positive
            - `stepsize_jitter`: float, between 0 and 1
            - `metric` : str, {"unit_e", "diag_e", "dense_e"}

            In addition, depending on which algorithm is used, different parameters
            can be set as in Stan for sampling. For the algorithm HMC we can set

            - `int_time`: float, positive

            For algorithm NUTS, we can set

            - `max_treedepth` : int, positive

        n_jobs : int, optional
            Sample in parallel. If -1 all CPUs are used. If 1, no parallel
            computing code is used at all, which is useful for debugging.

        Returns
        -------
        fit : StanFit4Model
            Instance containing the fitted results.

        Other parameters
        ----------------

        chain_id : int or iterable of int, optional
            `chain_id` can be a vector to specify the chain_id for all chains or
            an integer. For the former case, they should be unique. For the latter,
            the sequence of integers starting from the given `chain_id` are used
            for all chains.

        init_r : float, optional
            `init_r` is only valid if `init` == "random". In this case, the intial
            values are simulated from [-`init_r`, `init_r`] rather than using the
            default interval (see the manual of Stan).

        test_grad: bool, optional
            If `test_grad` is ``True``, Stan will not do any sampling. Instead,
            the gradient calculation is tested and printed out and the fitted
            StanFit4Model object is in test gradient mode.  By default, it is
            ``False``.

        append_samples`: bool, optional

        refresh`: int, optional
            Argument `refresh` can be used to control how to indicate the progress
            during sampling (i.e. show the progress every \code{refresh} iterations).
            By default, `refresh` is `max(iter/10, 1)`.

        Examples
        --------
        >>> from pystan import StanModel
        >>> m = StanModel(model_code='parameters {real y;} model {y ~ normal(0,1);}')
        >>> m.sampling(iter=100)

        """
        # NOTE: in this function, iter masks iter() the python function.
        # If this ever turns out to be a problem just add:
        # iter_ = iter
        # del iter  # now builtins.iter is available
        if diagnostic_file is not None:
            raise NotImplementedError("diagnostic_file not supported yet")
        if data is None:
            data = {}
        if warmup is None:
            warmup = int(iter // 2)
        algorithms = ("NUTS", "HMC", "Fixed_param")  # , "Metropolis")
        algorithm = "NUTS" if algorithm is None else algorithm
        if algorithm not in algorithms:
            raise ValueError("Algorithm must be one of {}".format(algorithms))

        fit = self.fit_class(data)

        m_pars = fit._get_param_names()
        p_dims = fit._get_param_dims()

        if isinstance(pars, string_types):
            pars = [pars]
        if pars is not None and len(pars) > 0:
            # Implementation note: this does not set the params_oi for the
            # instances of stan_fit which actually make the calls to
            # call_sampler. This is because we need separate instances of
            # stan_fit in each thread/process. So update_param_oi needs to
            # be called in every stan_fit instance.
            fit._update_param_oi(pars)
            if not all(p in m_pars for p in pars):
                pars = np.asarray(pars)
                unmatched = pars[np.invert(np.in1d(pars, m_pars))]
                msg = "No parameter(s): {}; sampling not done."
                raise ValueError(msg.format(', '.join(unmatched)))
        else:
            pars = m_pars

        if chains < 1:
            raise ValueError("The number of chains is less than one; sampling"
                             "not done.")

        # check that arguments in kwargs are valid
        valid_args = {"chain_id", "init_r", "test_grad", "append_samples", "refresh", "control"}
        for arg in kwargs:
            if arg not in valid_args:
                raise ValueError("Parameter `{}` is not recognized.".format(arg))

        args_list = pystan.misc._config_argss(chains=chains, iter=iter,
                                              warmup=warmup, thin=thin,
                                              init=init, seed=seed, sample_file=sample_file,
                                              diagnostic_file=diagnostic_file,
                                              algorithm=algorithm,
                                              control=control, **kwargs)

        # number of samples saved after thinning
        warmup2 = 1 + (warmup - 1) // thin
        n_kept = 1 + (iter - warmup - 1) // thin
        n_save = n_kept + warmup2

        if n_jobs is None:
            n_jobs = -1

        # disable multiprocessing if we only have a single chain
        if chains == 1:
            n_jobs = 1

        assert len(args_list) == chains
        call_sampler_args = izip(itertools.repeat(data), args_list, itertools.repeat(pars))
        call_sampler_star = self.module._call_sampler_star
        ret_and_samples = _map_parallel(call_sampler_star, call_sampler_args, n_jobs)
        samples = [smpl for _, smpl in ret_and_samples]

        # _organize_inits strips out lp__ (RStan does it in this method)
        inits_used = pystan.misc._organize_inits([s['inits'] for s in samples], m_pars, p_dims)

        random_state = np.random.RandomState(args_list[0]['seed'])
        perm_lst = [random_state.permutation(int(n_kept)) for _ in range(chains)]
        fnames_oi = fit._get_param_fnames_oi()
        n_flatnames = len(fnames_oi)
        fit.sim = {'samples': samples,
                   # rstan has this; name clashes with 'chains' in samples[0]['chains']
                   'chains': len(samples),
                   'iter': iter,
                   'warmup': warmup,
                   'thin': thin,
                   'n_save': [n_save] * chains,
                   'warmup2': [warmup2] * chains,
                   'permutation': perm_lst,
                   'pars_oi': fit._get_param_names_oi(),
                   'dims_oi': fit._get_param_dims_oi(),
                   'fnames_oi': fnames_oi,
                   'n_flatnames': n_flatnames}
        fit.model_name = self.model_name
        fit.model_pars = m_pars
        fit.par_dims = p_dims
        fit.mode = 0 if not kwargs.get('test_grad') else 1
        fit.inits = inits_used
        fit.stan_args = args_list
        fit.stanmodel = self
        fit.date = datetime.datetime.now()
        return fit

Example 41

Project: timestring
Source File: Date.py
View license
    def __init__(self, date, offset=None, start_of_week=None, tz=None, verbose=False):
        if isinstance(date, Date):
            self.date = copy(date.date)
            return

        # The original request
        self._original = date
        if tz:
            tz = pytz.timezone(str(tz))

        if date == 'infinity':
            self.date = 'infinity'

        elif date == 'now':
            self.date = datetime.now()

        elif type(date) in (str, unicode) and re.match(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+-\d{2}", date):
            self.date = datetime.strptime(date[:-3], "%Y-%m-%d %H:%M:%S.%f") - timedelta(hours=int(date[-3:]))

        else:
            # Determinal starting date.
            if type(date) in (str, unicode):
                """The date is a string and needs to be converted into a <dict> for processesing
                """
                _date = date.lower()
                res = TIMESTRING_RE.search(_date.strip())
                if res:
                    date = res.groupdict()
                    if verbose:
                        print("Matches:\n", ''.join(["\t%s: %s\n" % (k, v) for k, v in date.items() if v]))
                else:
                    raise TimestringInvalid('Invalid date string >> %s' % date)

                date = dict((k, v if type(v) is str else v) for k, v in date.items() if v)
                #print(_date, dict(map(lambda a: (a, date.get(a)), filter(lambda a: date.get(a), date))))

            if isinstance(date, dict):
                # Initial date.
                new_date = datetime(*time.localtime()[:3])
                if tz and tz.zone != "UTC":
                    #
                    # The purpose here is to adjust what day it is based on the timezeone
                    #
                    ts = datetime.now()
                    # Daylight savings === second Sunday in March and reverts to standard time on the first Sunday in November
                    # Monday is 0 and Sunday is 6.
                    # 14 days - dst_start.weekday()
                    dst_start = datetime(ts.year, 3, 1, 2, 0, 0) + timedelta(13 - datetime(ts.year, 3, 1).weekday())
                    dst_end = datetime(ts.year, 11, 1, 2, 0, 0) + timedelta(6 - datetime(ts.year, 11, 1).weekday())

                    ts = ts + tz.utcoffset(new_date, is_dst=(dst_start < ts < dst_end))
                    new_date = datetime(ts.year, ts.month, ts.day)

                if date.get('unixtime'):
                    new_date = datetime.fromtimestamp(int(date.get('unixtime')))

                # !number of (days|...) (ago)?
                elif date.get('num') and (date.get('delta') or date.get('delta_2')):
                    if date.get('num', '').find('couple') > -1:
                        i = 2 * int(1 if date.get('ago', True) or date.get('ref') == 'last' else -1)
                    else:
                        i = int(text2num(date.get('num', 'one'))) * int(1 if date.get('ago') or (date.get('ref', '') or '') == 'last' else -1)

                    delta = (date.get('delta') or date.get('delta_2')).lower()
                    if delta.startswith('y'):
                        try:
                            new_date = new_date.replace(year=(new_date.year - i))
                        # day is out of range for month
                        except ValueError:
                            new_date = new_date - timedelta(days=(365*i))
                    elif delta.startswith('month'):
                        try:
                            new_date = new_date.replace(month=(new_date.month - i))
                        # day is out of range for month
                        except ValueError:
                            new_date = new_date - timedelta(days=(30*i))

                    elif delta.startswith('q'):
                        '''
                        This section is not working...
                        Most likely need a generator that will take me to the right quater.
                        '''
                        q1, q2, q3, q4 = datetime(new_date.year, 1, 1), datetime(new_date.year, 4, 1), datetime(new_date.year, 7, 1), datetime(new_date.year, 10, 1)
                        if q1 <= new_date < q2:
                            # We are in Q1
                            if i == -1:
                                new_date = datetime(new_date.year-1, 10, 1)
                            else:
                                new_date = q2
                        elif q2 <= new_date < q3:
                            # We are in Q2
                            pass
                        elif q3 <= new_date < q4:
                            # We are in Q3
                            pass
                        else:
                            # We are in Q4
                            pass
                        new_date = new_date - timedelta(days=(91*i))

                    elif delta.startswith('w'):
                        new_date = new_date - timedelta(days=(i * 7))

                    else:
                        new_date = new_date - timedelta(**{('days' if delta.startswith('d') else 'hours' if delta.startswith('h') else 'minutes' if delta.startswith('m') else 'seconds'): i})

                # !dow
                if [date.get(key) for key in ('day', 'day_2', 'day_3') if date.get(key)]:
                    dow = max([date.get(key) for key in ('day', 'day_2', 'day_3') if date.get(key)])
                    iso = dict(monday=1, tuesday=2, wednesday=3, thursday=4, friday=5, saturday=6, sunday=7, mon=1, tue=2, tues=2, wed=3, wedn=3, thu=4, thur=4, fri=5, sat=6, sun=7).get(dow)
                    if iso:
                        # determin which direction
                        if date.get('ref') not in ('this', 'next'):
                            days = iso - new_date.isoweekday() - (7 if iso >= new_date.isoweekday() else 0)
                        else:
                            days = iso - new_date.isoweekday() + (7 if iso < new_date.isoweekday() else 0)

                        new_date = new_date + timedelta(days=days)

                    elif dow == 'yesterday':
                        new_date = new_date - timedelta(days=1)
                    elif dow == 'tomorrow':
                        new_date = new_date + timedelta(days=1)

                # !year
                year = [int(CLEAN_NUMBER.sub('', date[key])) for key in ('year', 'year_2', 'year_3', 'year_4', 'year_5', 'year_6') if date.get(key)]
                if year:
                    year = max(year)
                    if len(str(year)) != 4:
                        year += 2000 if year <= 40 else 1900
                    new_date = new_date.replace(year=year)

                # !month
                month = [date.get(key) for key in ('month', 'month_1', 'month_2', 'month_3', 'month_4') if date.get(key)]
                if month:
                    new_date = new_date.replace(day=1)
                    new_date = new_date.replace(month=int(max(month)) if re.match('^\d+$', max(month)) else dict(january=1, february=2, march=3, april=4, june=6, july=7, august=8, september=9, october=10, november=11, december=12, jan=1, feb=2, mar=3, apr=4, may=5, jun=6, jul=7, aug=8, sep=9, sept=9, oct=10, nov=11, dec=12).get(max(month),  new_date.month))

                # !day
                day = [date.get(key) for key in ('date', 'date_2', 'date_3') if date.get(key)]
                if day:
                    new_date = new_date.replace(day=int(max(day)))

                # !daytime
                if date.get('daytime'):
                    if date['daytime'].find('this time') >= 1:
                        new_date = new_date.replace(hour=datetime(*time.localtime()[:5]).hour,
                                                    minute=datetime(*time.localtime()[:5]).minute)
                    else:
                        new_date = new_date.replace(hour=dict(morning=9, noon=12, afternoon=15, evening=18, night=21, nighttime=21, midnight=24).get(date.get('daytime'), 12))
                    # No offset because the hour was set.
                    offset = False

                # !hour
                hour = [date.get(key) for key in ('hour', 'hour_2', 'hour_3') if date.get(key)]
                if hour:
                    new_date = new_date.replace(hour=int(max(hour)))
                    am = [date.get(key) for key in ('am', 'am_1') if date.get(key)]
                    if am and max(am) in ('p', 'pm'):
                        h = int(max(hour))
                        if h < 12:
                            new_date = new_date.replace(hour=h+12)
                    # No offset because the hour was set.
                    offset = False

                    #minute
                    minute = [date.get(key) for key in ('minute', 'minute_2') if date.get(key)]
                    if minute:
                        new_date = new_date.replace(minute=int(max(minute)))

                    #second
                    seconds = date.get('seconds', 0)
                    if seconds:
                        new_date = new_date.replace(second=int(seconds))

                self.date = new_date

            elif type(date) in (int, long, float) and re.match('^\d{10}$', str(date)):
                self.date = datetime.fromtimestamp(int(date))

            elif isinstance(date, datetime):
                self.date = date

            elif date is None:
                self.date = datetime.now()

            else:
                # Set to the current date Y, M, D, H0, M0, S0
                self.date = datetime(*time.localtime()[:3])

            if tz:
                self.date = self.date.replace(tzinfo=tz)

            # end if type(date) is types.DictType: and self.date.hour == 0:
            if offset and isinstance(offset, dict):
                self.date = self.date.replace(**offset)

Example 42

Project: read_FEC
Source File: regen_overview.py
View license
    def handle(self, *args, **options):
        
        for cycle in ACTIVE_CYCLES:
            
            cycle_details = cycle_calendar[int(cycle)]
            CYCLE_START = cycle_details['start']
            CYCLE_END = cycle_details['end']
        
            update_time = datetime.now()
        
            print "Running outside spending summaries"
            summary_obj = {}
        
            all_independent_expenditures = SkedE.objects.filter(superceded_by_amendment=False, effective_date__gte=CYCLE_START, effective_date__lte=CYCLE_END)
            summary_obj['ie_sum'] = all_independent_expenditures.aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
        
            print "\tpositive vs negative"
            # postive vs negative
            summary_obj['positive'] = all_independent_expenditures.filter(support_oppose_checked='S').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
            summary_obj['negative'] = all_independent_expenditures.filter(support_oppose_checked='O').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']

            print "\ttarget party breakdown"
            # breakdown by target party
            summary_obj['pro_dem'] = all_independent_expenditures.filter(support_oppose_checked='S', candidate_party_checked='D').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
            summary_obj['pro_rep'] = all_independent_expenditures.filter(support_oppose_checked='S', candidate_party_checked='R').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
            summary_obj['anti_dem'] = all_independent_expenditures.filter(support_oppose_checked='O', candidate_party_checked='D').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
            summary_obj['anti_rep'] = all_independent_expenditures.filter(support_oppose_checked='O', candidate_party_checked='R').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
        
            print "\toffice breakdown"
            # breakdown by target party
            summary_obj['senate'] = all_independent_expenditures.filter(candidate_office_checked='S').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
            summary_obj['house'] = all_independent_expenditures.filter(candidate_office_checked='H').aggregate(total_expenditures=Sum('expenditure_amount'))['total_expenditures']
       
        
            all_outside_spenders = Committee_Overlay.nulls_last_objects.filter(total_indy_expenditures__gt=0, cycle=str(cycle))
            print "\toutside spending types"
            # breakdown by types:
            summary_obj['party_committees'] = all_outside_spenders.filter(ctype__in=('X', 'Y', 'Z')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            # includes hybrids
            summary_obj['super_pacs'] = all_outside_spenders.filter(ctype__in=('O', 'U', 'V', 'W')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            # non-committees:
            summary_obj['non_committees'] = all_outside_spenders.filter(ctype__in=('I')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            summary_obj['oth_committees'] = all_outside_spenders.filter(ctype__in=('N', 'Q')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            summary_obj['house_committees'] = all_outside_spenders.filter(ctype__in=('H')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            summary_obj['senate_committees'] = all_outside_spenders.filter(ctype__in=('S')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
        
        
        
            print "\toutside spending parties"
            # breakdown by parties 
            summary_obj['dem_affil'] = all_outside_spenders.filter(political_orientation='D').aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            summary_obj['rep_affil'] = all_outside_spenders.filter(political_orientation='R').aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
            summary_obj['no_affil'] = all_outside_spenders.exclude(political_orientation__in=('R', 'D')).aggregate(total_expenditures=Sum('total_indy_expenditures'))['total_expenditures']
        
        
        
            ## write the outside spending overview

            other_year = None
            if cycle == '2016':
                other_year = '2014'
            elif cycle == '2014':
                other_year = '2016'
            cycle_list = [cycle_fake(cycle, "/overview/outside-money/%s/" % cycle), cycle_fake(other_year, "/overview/outside-money/%s/" % other_year)]


            page_title = "Independent Expenditures, %s Cycle" % cycle    
            c = Context({"update_time": update_time, "sums": summary_obj, "page_title":page_title, "cycle_list":cycle_list, "cycle_start":CYCLE_START, "cycle_end":CYCLE_END})
            this_template = get_template('generated_pages/overview_outside_money.html')
            result = this_template.render(c)
            thisurl = "/overview/outside-money/%s/" % cycle
            
            thisflatpage, created = FlatPage.objects.get_or_create(url=thisurl)            
            thisflatpage.title = page_title
            thisflatpage.content = result
            thisflatpage.template_name = "flatpages/cycle_enabled_base.html" # Default for admin flatpages too
            if not thisflatpage.sites.all():
                thisflatpage.sites.add(this_site)
                
            thisflatpage.save()
            
            
            ### end outside spending part. 

            
            ## deal with the inside spending
        
            # assumes fake committees have been removed; disregard joint fundraisers which disburse their proceeds to their recipients
            
            print "Running main overview summaries" 
            
            all_webk = WebK.objects.filter(cycle=cycle).exclude(com_des='J')
            summary_types = [
                {'name':'Super PACs', 'code':'UOVW', 'outside_spending': summary_obj['super_pacs']},
                {'name':'Party Committees', 'code':'XYZ', 'outside_spending': summary_obj['party_committees']},
                {'name':'House Candidate Committees', 'code':'H', 'outside_spending': summary_obj['house_committees']},
                {'name':'Senate Candidate Committees', 'code':'S', 'outside_spending': summary_obj['senate_committees']},
                {'name':'Other PACs', 'code':'NQ', 'outside_spending': summary_obj['oth_committees']}
            ]
            for s in summary_types:
                code_list = [i for i in s['code']]
                sums = all_webk.filter(com_typ__in=code_list).aggregate(tot_rec=Sum('tot_rec'), tot_dis=Sum('tot_dis'), par_com_con=Sum('par_com_con'), oth_com_con=Sum('oth_com_con'), ind_ite_con=Sum('ind_ite_con'), ind_uni_con=Sum('ind_uni_con'), fed_can_com_con=Sum('fed_can_com_con'), tot_ope_exp=Sum('tot_ope_exp'), ope_exp=Sum('ope_exp'))
                s['tot_dis'] = sums['tot_dis'] or 0
                s['tot_rec'] = sums['tot_rec'] or 0
                s['oth_com_con'] = (sums['oth_com_con'] or 0) + (sums['par_com_con'] or 0)
                s['ind_ite_con']= sums['ind_ite_con'] or 0
                s['ind_uni_con'] = sums['ind_uni_con'] or 0
                s['fed_can_com_con'] = sums['fed_can_com_con'] or 0
            
                # operating expenses are recorded differently for candidate pacs
                if s['code'] in ['H', 'S']:
                    s['tot_ope_exp'] = sums['ope_exp'] or 0
                else: 
                    s['tot_ope_exp'] = sums['tot_ope_exp'] or 0
        
        
            ## reuse this stuff as is in noncommittees
            all_noncommittees = Committee_Overlay.objects.filter(ctype__in=['I'], cycle=cycle).exclude(designation='J')       
            sums = all_noncommittees.aggregate(tot_ie=Sum('total_indy_expenditures'))
            ##
            dark_money_total_ies = sums['tot_ie']
            summary_types.append({'name':'Dark Money', 'code':'I', 'outside_spending': sums['tot_ie'], 'tot_dis':0, 'tot_rec':0, 'oth_com_con':0, 'ind_ite_con':0, 'ind_uni_con':0, 'fed_can_com_con':0, 'tot_ope_exp':0})
        
            print "\toverview main sums: %s" % summary_obj
            print "\toverview inside money: %s" % summary_types


            other_year = None
            if cycle == '2016':
                other_year = '2014'
            elif cycle == '2014':
                other_year = '2016'
            cycle_list = [cycle_fake(cycle, "/overview/%s/" % cycle), cycle_fake(other_year, "/overview/%s/" % other_year)]



            page_title = "Cycle Overview, %s Cycle" % cycle    
            c = Context({"update_time": update_time, "sums": summary_obj, "inside_money": summary_types, "page_title":page_title, "cycle_list":cycle_list, "cycle_start":CYCLE_START, "cycle_end":CYCLE_END})
            
            this_template = get_template('generated_pages/overview_main.html')
            result = this_template.render(c)
            
            thisurl = "/overview/%s/" % cycle
            
            thisflatpage, created = FlatPage.objects.get_or_create(url=thisurl)            
            thisflatpage.title = page_title
            thisflatpage.content = result
            thisflatpage.template_name = "flatpages/cycle_enabled_base.html" # Default for admin flatpages too
            if not thisflatpage.sites.all():
                thisflatpage.sites.add(this_site)

                
            thisflatpage.save()
        
        
            # now superpacs
            print "Now running superpac summaries... "
            
            sp_summary_types = [
                {'name':'Super PACs', 'code':'UO'},
                {'name':'Hybrid Super PACs', 'code':'VW'},
                {'name': 'All Super PACs', 'code':'UOVW'}
            ]
            all_superpacs = Committee_Overlay.objects.filter(cycle=cycle,ctype__in=['U', 'O', 'V', 'W']).exclude(designation='J')
        
            for s in sp_summary_types:
                code_list = [i for i in s['code']]
                sums = all_superpacs.filter(ctype__in=code_list).aggregate(tot_ie=Sum('total_indy_expenditures'), tot_rec=Sum('total_receipts'), coh=Sum('cash_on_hand'))
                s['tot_ie'] = sums['tot_ie'] or 0
                s['tot_rec'] = sums['tot_rec'] or 0
                s['coh'] = sums['coh'] or 0
        
        
            top_superpacs = all_superpacs.order_by('-total_indy_expenditures')[:20]
            
            page_title = "Top Super PACs by Independent Expenditures, %s Cycle" % (cycle)
            
            
            other_year = None
            if cycle == '2016':
                other_year = '2014'
            elif cycle == '2014':
                other_year = '2016'
            cycle_list = [cycle_fake(cycle, "/overview/super-pacs/%s/" % cycle), cycle_fake(other_year, "/overview/super-pacs/%s/" % other_year)]
            
            c = Context({"update_time": update_time, "sums": sp_summary_types, "top_superpacs": top_superpacs, "page_title":page_title, "cycle_list":cycle_list, "cycle_start":CYCLE_START, "cycle_end":CYCLE_END})
            this_template = get_template('generated_pages/overview_superpac.html')
            result = this_template.render(c)
        
            thisurl = "/overview/super-pacs/%s/" % cycle
            
            thisflatpage, created = FlatPage.objects.get_or_create(url=thisurl)            
            thisflatpage.title = page_title
            thisflatpage.content = result
            thisflatpage.template_name = "flatpages/cycle_enabled_base.html" # Default for admin flatpages too
            if not thisflatpage.sites.all():
                thisflatpage.sites.add(this_site)

                
            thisflatpage.save()
                
        
            print "Regenerating dark money pages"
            # now dark money groups -- the sums were calculated previously 


            top_noncommittees = all_noncommittees.order_by('-total_indy_expenditures')[:10]
        
            page_title = "Top Dark Money groups by Independent Expenditures, %s Cycle" % (cycle)
            
            other_year = None
            if cycle == '2016':
                other_year = '2014'
            elif cycle == '2014':
                other_year = '2016'
            cycle_list = [cycle_fake(cycle, "/overview/dark-money/%s/" % cycle), cycle_fake(other_year, "/overview/dark-money/%s/" % other_year)]
            
            
            c = Context({"update_time": update_time, "dark_money_total_ies": dark_money_total_ies, "top_darkmoneyers": top_noncommittees, "page_title":page_title, "cycle_list":cycle_list, "cycle_start":CYCLE_START, "cycle_end":CYCLE_END})
            this_template = get_template('generated_pages/overview_dark_money.html')
            result = this_template.render(c)
            
            thisurl = "/overview/dark-money/%s/" % cycle

            thisflatpage, created = FlatPage.objects.get_or_create(url=thisurl)            
            thisflatpage.title = page_title
            thisflatpage.content = result
            thisflatpage.template_name = "flatpages/cycle_enabled_base.html" # Default for admin flatpages too
            if not thisflatpage.sites.all():
                thisflatpage.sites.add(this_site)


            thisflatpage.save()
            
            
            
            ## do connected pacs now.
        
            all_webk = WebK.objects.filter(cycle=cycle).exclude(com_des='J')

            connected_org_types = [
                {'name':'Corporation', 'code':'C'},
                {'name':'Labor organization', 'code':'L'},
                {'name':'Member Organization', 'code':'M'},
                {'name':'Cooperative', 'code':'V'},
                {'name':'Trade Association', 'code':'T'},
                {'name':'Corporation without capital stock', 'code':'W'}
            ]

            for j in connected_org_types:
                committees = Committee.objects.filter(cmte_tp__in=['N', 'Q'], cycle=str(cycle),org_tp=j['code'])
                committee_id_list = [i.cmte_id for i in committees]

                sums = all_webk.filter(com_id__in=committee_id_list).aggregate(tot_rec=Sum('tot_rec'), tot_dis=Sum('tot_dis'), par_com_con=Sum('par_com_con'), oth_com_con=Sum('oth_com_con'), ind_ite_con=Sum('ind_ite_con'), ind_uni_con=Sum('ind_uni_con'), fed_can_com_con=Sum('fed_can_com_con'), tot_ope_exp=Sum('tot_ope_exp'), ope_exp=Sum('ope_exp'))
                j['tot_dis'] = sums['tot_dis'] or 0
                j['tot_rec'] = sums['tot_rec'] or 0
                j['oth_com_con'] = (sums['oth_com_con'] or 0) + (sums['par_com_con'] or 0)
                j['ind_ite_con']= sums['ind_ite_con'] or 0
                j['ind_uni_con'] = sums['ind_uni_con'] or 0
                j['fed_can_com_con'] = sums['fed_can_com_con'] or 0
                j['tot_ope_exp'] = sums['tot_ope_exp'] or 0
         
            print "building connected pac with inside money set to %s" % (connected_org_types)
            
            page_title = "Cycle Overview, %s Cycle -- Connected pacs" % cycle
            other_year = None
            if cycle == '2016':
                other_year = '2014'
            elif cycle == '2014':
                other_year = '2016'
            cycle_list = [cycle_fake(cycle, "/overview/connected/%s/" % cycle), cycle_fake(other_year, "/overview/connected/%s/" % other_year)]
            
            
            c = Context({"update_time": update_time, "inside_money": connected_org_types, "page_title":page_title, "cycle_list":cycle_list, "cycle_start":CYCLE_START, "cycle_end":CYCLE_END})
            this_template = get_template('generated_pages/overview_connected.html')
            result = this_template.render(c)
            thisurl = "/overview/connected/%s/" % cycle
            
            thisflatpage, created = FlatPage.objects.get_or_create(url=thisurl)            
            thisflatpage.title = page_title
            thisflatpage.content = result
            thisflatpage.template_name = "flatpages/cycle_enabled_base.html" # Default for admin flatpages too
            if not thisflatpage.sites.all():
                thisflatpage.sites.add(this_site)


            thisflatpage.save()

Example 43

View license
def add_to_search(options, args):
    import settings

    es = rawes.Elastic(getattr(settings, "ES_HOST", 'thrift://localhost:9500'), timeout=60.0)
    index = getattr(es, settings.ES_INDEX)

    now = datetime.datetime.now()

    querysets = {}
    builders = {}
    metadata = {}

    PER_REQUEST = 200

    ### Dockets ###

    query = {'scraped': 'yes'}
    if options.agency:
        query['agency'] = options.agency
    if options.docket:
        query['_id'] = options.docket
    if not options.process_all:
        query['in_search_index'] = False

    querysets['docket'] = Docket.objects(__raw__=query)

    def build_docket(docket):
        print 'preparing docket', docket.id

        # build initial ES document
        es_doc = {
            'title': docket.title,
            'agency': docket.agency,
            'identifiers': [docket.id]
        }

        # add identifiers
        if docket.rin and docket.rin != "Not Assigned":
            es_doc['identifiers'].append(docket.rin)

        return es_doc

    def get_docket_metadata(docket):
        return {'_index': settings.ES_INDEX, '_type': 'docket', '_id': docket.id}

    builders['docket'] = build_docket
    metadata['docket'] = get_docket_metadata

    ### Documents ###

    query = {'deleted': False, 'scraped': 'yes', '$nor': [{'views.extracted': 'no'},{'attachments.views.extracted':'no'}]}
    if options.agency:
        query['agency'] = options.agency
    if options.docket:
        query['docket_id'] = options.docket
    if not options.process_all:
        query['in_search_index'] = False

    querysets['document'] = Doc.objects(__raw__=query)

    def build_document(doc):
        print 'preparing document', doc.id
        if doc.renamed:
            print 'preparing', doc.id
            doc.in_search_index = True
            doc.save()
            return None
        
        # build initial ES document
        es_doc = {
            'docket_id': doc.docket_id if doc.docket_id else doc.id.rsplit('-', 1)[0],
            'comment_on': doc.comment_on.get('document_id', None) if doc.comment_on else None,
            'title': doc.title,
            'agency': doc.agency,
            'posted_date': doc.details['Date_Posted'].replace(tzinfo=pytz.UTC) if 'Date_Posted' in doc.details else None,
            'document_type': doc.type,
            'submitter_organization': doc.details.get('Organization_Name', None),
            'submitter_name': ' '.join(filter(bool, [doc.details.get('First_Name', None), doc.details.get('Middle_Initial', None), doc.details.get('Last_Name', None)])),
            'submitter_entities': doc.submitter_entities,
            'files': [],
            'analyses': [],
            'identifiers': [doc.id]
        }

        # add views (max of 5 to avoid pathological cases)
        for view in doc.views[:5]:
            if not view.content:
                continue
            es_doc['files'].append({
                "title": None,
                "abstract": None,
                "object_id": doc.object_id,
                "file_type": view.type,
                "view_type": "document_view",
                "text": view.as_text()[:100000],
                "entities": view.entities
            })

        # add attachments (max of 10 to avoid pathological cases)
        for attachment in doc.attachments[:10]:
            for view in attachment.views[:5]:
                if not view.content:
                    continue
                es_doc['files'].append({
                    "title": attachment.title,
                    "abstract": attachment.abstract,
                    "object_id": attachment.object_id,
                    "file_type": view.type,
                    "view_type": "attachment_view",
                    "text": view.as_text()[:100000],
                    "entities": view.entities
                })

        # add identifiers
        if doc.rin and doc.rin != "Not Assigned":
            es_doc['identifiers'].append(doc.rin)

        if doc.details.get('Federal_Register_Number', None):
            es_doc['identifiers'].append(doc.details['Federal_Register_Number'])

        if doc.details.get('FR_Citation', None):
            es_doc['identifiers'].append(doc.details['FR_Citation'].replace(' ', ''))

        return es_doc

    def get_document_metadata(doc):
        return {'_index': settings.ES_INDEX, '_type': 'document', '_id': doc.id, '_parent': doc.docket_id if doc.docket_id else doc.id.rsplit('-', 1)[0]}

    builders['document'] = build_document
    metadata['document'] = get_document_metadata

    ### Actually do everything ###
    def flush(queue, ids, collection):
        # no need to do anything if there aren't any docs to add
        if not ids:
            return
        
        # save current queue to ES
        try:
            es_status = es._bulk.post(data="\n".join(queue))
            print 'saved %s to ES' % ", ".join(ids)
        except rawes.elastic_exception.ElasticException:
            # sometimes the bulk save fails for some reason; fall back to traditional iterative safe if so
            print 'falling back to iterative save...'
            # iterate over the queue pair-wise
            for command, record in itertools.izip(*[iter(queue)]*2):
                meta = json.loads(command)['index']
                params = {'parent': meta['_parent']} if '_parent' in meta else {}

                es_index = getattr(es, meta['_index'])
                es_type = getattr(es_index, meta['_type'])

                es_status = es_type[meta['_id']].put(data=record, params=params)
                print 'saved %s to ES as %s' % (meta['_id'], es_status['_id'])
        
        # update mongo docs
        collection.update({'_id': {'$in': ids}}, {'$set': {'in_search_index': True}}, multi=True, safe=True)

        print "saved %s back to mongo" % ", ".join(ids)
    
    counts = {'docket': 0, 'document': 0}
    for datatype in ('docket', 'document'):
        queue = []
        ids = []
        max_length = PER_REQUEST * 2
        for item in querysets[datatype]:
            record = builders[datatype](item)
            meta = metadata[datatype](item)

            if record:
                if not item.suppression.get('replaced_by', None):
                    queue.append(json.dumps({'index':meta}))
                    queue.append(json.dumps(record, default=es.json_encoder))
                ids.append(item.id)

            if len(queue) >= max_length:
                flush(queue, ids, querysets[datatype]._collection)
                counts[datatype] += len(ids)
                queue = []
                ids = []
        flush(queue, ids, querysets[datatype]._collection)
        counts[datatype] += len(ids)

    print "Done adding things to search: %s docket entries and %s document entries." % (counts['docket'], counts['document'])
    return counts

Example 44

Project: aurproxy
Source File: manager.py
View license
  def test_source_manager(self):
    # Validation Functions
    def val_len(manager, source_endpoint_groups, sources, source_cb_scopes,
                overflow_endpoint_groups, overflow_sources,
                overflow_source_cb_scopes, overflow_threshold,
                weight_adj_start):
      '''
      Validate that the expected number of endpoints are returned.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      self.assertEqual(len(manager.endpoints), len(eps) + len(oeps))

    def val_eps(manager, source_endpoint_groups, sources, source_cb_scopes,
                overflow_endpoint_groups, overflow_sources,
                overflow_source_cb_scopes, overflow_threshold,
                weight_adj_start):
      '''
      Validate that the expected endpoints are returned.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      for ep in eps:
        self.assertIn(ep, manager.endpoints)
      for oep in oeps:
        self.assertIn(oep, manager.endpoints)

    def val_weights_all_healthy(manager, source_endpoint_groups, sources,
                            source_cb_scopes, overflow_endpoint_groups,
                            overflow_sources, overflow_source_cb_scopes,
                            overflow_threshold, weight_adj_start):
      '''
      Validate that when the service is healthy, all endpoints are present and
      weighted correctly.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      for ep in manager.endpoints:
        if ep in eps:
          self.assertEqual(ep.weight, SIGNIFICANCE)
        elif ep in oeps:
          self.assertEqual(ep.weight, 0)
        else:
          raise Exception('Unknown endpoint.')

    def val_weights_overflow_m_src(manager, source_endpoint_groups, sources,
                                   source_cb_scopes, overflow_endpoint_groups,
                                   overflow_sources, overflow_source_cb_scopes,
                                   overflow_threshold, weight_adj_start):
      '''
      Validate that when passing the overflow threshold, all endpoints are
      present and weighted correctly.
      '''
      eps = list(itertools.chain(*source_endpoint_groups))
      oeps = list(itertools.chain(*overflow_endpoint_groups))
      min_healthy = int(len(eps) * float(overflow_threshold) / float(100))
      min_unhealthy = len(eps) - min_healthy

      # Set share to 0 for enough endpoints to reach or almost reach the
      # unhealthy threshold
      for i in range(min_unhealthy):
        ith_ep = source_endpoint_groups[0][i]
        sh_calcs = manager._share_calcs[sources[0]]
        sh_calcs[ith_ep]._share_adjusters[0].set_share(0.0)

      # Overflow shouldn't be on yet.
      for ep in manager.endpoints:
        if ep in oeps:
          self.assertEqual(ep.weight, 0)

      # Regular endpoints serving
      num_reg_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in eps])
      if weight_adj_start == now:
        # Not all regular endpoints should be serving
        self.assertEqual(num_reg_serving, min_healthy)
      else:
        # Regular endpoints should be serving weight adjustment hasn't started.
        self.assertEqual(num_reg_serving, len(eps))

      # Overflow endpoints serving
      num_o_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in oeps])
      # No overflow endpoints should be serving
      self.assertEqual(num_o_serving, 0)

      # Turn off one more regular endpoint
      sh_calcs = manager._share_calcs[sources[0]]
      sh_calc = sh_calcs[source_endpoint_groups[0][min_unhealthy+1]]
      sh_calc._share_adjusters[0].set_share(0.0)

      # Regular endpoints serving
      num_reg_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in eps])
      if weight_adj_start == now:
        # Not all regular endpoints should be serving
        self.assertEqual(num_reg_serving, min_healthy-1)
        sum_overflow_weight = 0
        for ep in manager.endpoints:
          if ep in oeps:
            sum_overflow_weight += ep.weight
        self.assertGreater(sum_overflow_weight, 0)
      else:
        # Regular endpoints should be serving weight adjustment hasn't started.
        self.assertEqual(num_reg_serving, len(eps))

      # Overflow endpoints serving
      num_o_serving = len([ep for ep in manager.endpoints
                             if ep.weight > 0 and ep in oeps])
      if weight_adj_start == now:
        # Overflow endpoints should be serving
        self.assertGreater(num_o_serving, 0)
      else:
        self.assertEqual(num_o_serving, 0)

    def val_shares_m_src(manager, source_endpoint_groups, sources,
                         source_cb_scopes, overflow_endpoint_groups,
                         overflow_sources, overflow_source_cb_scopes,
                         overflow_threshold, weight_adj_start):
      '''
      Validate that when multiple share adjusters are applied, all endpoints
      are present and weighted correctly.
      '''
      if len(sources[0].share_adjuster_factories) < 2:
        raise Exception('Validator must be run on source with at least 2 share'
                        'adjuster factories registered.')
      if len(sources[0].endpoints) == 0:
        raise Exception('Validator must be run on source with at least one'
                        'endpoint.')
      eps = list(itertools.chain(*source_endpoint_groups))
      # Share calculator for one endpoint
      sh_calc = manager._share_calcs[sources[0]][source_endpoint_groups[0][0]]
      # Set share to 0.5 for 2 sibling adjusters - .5 * .5 -> expect .25
      sh_calc._share_adjusters[0].set_share(0.5)
      sh_calc._share_adjusters[1].set_share(0.5)
      if weight_adj_start == now:
        sorted_eps = sorted(manager.endpoints, key=lambda x: x.weight)
        lowest = sorted_eps[0]
        rest = sorted_eps[1:]
        for ep in rest:
          self.assertTrue(float(lowest.weight)/float(ep.weight) == 0.25)
      else:
        num_reg_serving = len([ep for ep in manager.endpoints
                              if ep.weight > 0 and ep in eps])
        self.assertEqual(num_reg_serving, len(eps))

    # Group standard validation functions
    val_fns = [
      val_len,
      val_eps,
      val_weights_all_healthy
    ]
    # Overflow validation functions
    o_val_fns = [
      val_weights_overflow_m_src
    ]
    # Share validation functions
    share_val_fns = [
      val_shares_m_src
    ]

    # Parameter group parts
    # Source endpoints
    s_1_eps = [SourceEndpoint('127.0.0.1', i) for i in range(8000, 8005)]
    s_2_eps = [SourceEndpoint('127.0.0.1', i) for i in range(9000, 9005)]
    # Overflow source endpoints
    os_1_eps = [SourceEndpoint('127.0.0.1', i) for i in range(10000, 10005)]

    # Share adjuster factory groups
    tst_adjuster = 'tellapart.aurproxytest.share.adjuster.TstShareAdjuster'
    tst_sh_adj_fact = load_klass_factory(tst_adjuster)

    # Source Builders
    s_src_no_sh_adj = [TstSourceBuilder(s_1_eps, [])]
    s_src_m_sh_adj = [TstSourceBuilder(s_1_eps, [tst_sh_adj_fact,
                                                 tst_sh_adj_fact])]
    m_src_no_sh_adj = [TstSourceBuilder(s_1_eps, []),
                       TstSourceBuilder(s_2_eps, [])]
    no_osrc_no_sh_adj = []
    s_osrc_no_sh_adj = [TstSourceBuilder(os_1_eps, [])]
    m_src_s_sh_adj = [TstSourceBuilder(s_1_eps, [tst_sh_adj_fact]),
                      TstSourceBuilder(s_2_eps, [])]

    # Start times
    now = datetime.now()
    future = now + timedelta(days=1)

    # Overflow Threshold Percentages
    thr_none = None
    thr_80 = 80

    # Parameter groups
    #  source_builders
    #  overflow_source_builders
    #  overflow_threshold_pct,
    #  weight_adjustment_start time,
    #  validation_fns
    pgroups = [
      (s_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, now, val_fns),
      (s_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, future, val_fns),
      (s_src_m_sh_adj, no_osrc_no_sh_adj, thr_none, now,
       val_fns+share_val_fns),
      (s_src_m_sh_adj, no_osrc_no_sh_adj, thr_none, future,
       val_fns+share_val_fns),
      (m_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, now, val_fns),
      (m_src_no_sh_adj, no_osrc_no_sh_adj, thr_none, future, val_fns),
      (m_src_s_sh_adj, s_osrc_no_sh_adj, thr_80, now, val_fns+o_val_fns),
      (m_src_s_sh_adj, s_osrc_no_sh_adj, thr_80, future, val_fns+o_val_fns),
    ]

    # Helper to build source and related validation items.
    def build_sources(builders):
      srcs, cb_scopes, ep_groups = [], [], []
      for builder in builders:
        src, cb_scope, eps = builder.build()
        srcs.append(src)
        cb_scopes.append(cb_scope)
        ep_groups.append(eps)
      return srcs, cb_scopes, ep_groups

    # Run validators against parameter groups
    for src_builders, o_src_builders, o_thresh, w_adj_start, v_fns in pgroups:
      for validation_fn in v_fns:
        manager_cb_scope = SourceManagerCallbackScope()
        srcs, src_cb_scopes, src_ep_groups = build_sources(src_builders)
        o_srcs, o_src_cbs, o_src_ep_groups = build_sources(o_src_builders)

        signal_update_fn = manager_cb_scope.signal_update_fn
        manager = SourceGroupManager(sources=srcs,
                                     overflow_threshold_pct=o_thresh,
                                     overflow_sources=o_srcs,
                                     signal_update_fn=signal_update_fn)
        manager.start(weight_adjustment_start=w_adj_start)
        validation_fn(manager, src_ep_groups, srcs, src_cb_scopes,
                      o_src_ep_groups, o_srcs, o_src_ep_groups, o_thresh,
                      w_adj_start)

Example 45

View license
    def handle(self, *args, **options):
        verbosity = 1
        if 'verbosity' in options:
            verbosity = options['verbosity']
        # first test if we have notices set up
        from tendenci.apps.corporate_memberships.models import Notice
        if not Notice.objects.filter(status=True,
                                     status_detail='active'
                                    ).exclude(
                                    notice_time='attimeof'
                                    ).exists():
            if verbosity > 1:
                print('No notices set up...existing...')
            # no active notices to process. stop here
            return

        from tendenci.apps.corporate_memberships.models import (
            CorpMembership, CorpMembershipApp,
            NoticeLog,
            NoticeLogRecord)
        from tendenci.apps.notifications import models as notification
        from tendenci.apps.base.utils import fieldify
        from tendenci.apps.site_settings.utils import get_setting

        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        site_contact_name = get_setting('site', 'global', 'sitecontactname')
        site_contact_email = get_setting('site', 'global', 'sitecontactemail')
        site_url = get_setting('site', 'global', 'siteurl')

        email_context = {
            'sender':get_setting('site', 'global', 'siteemailnoreplyaddress'),
            'sender_display':site_display_name,
            'reply_to':site_contact_email}

        now = datetime.now()
        nowstr = time.strftime("%d-%b-%y %I:%M %p", now.timetuple())

        def email_admins_recap(notices, total_sent):
            """Send admins recap after the notices were processed.
            """
            recap_recipient = get_admin_emails()
            if recap_recipient:
                template_name = "corporate_memberships/notices/email_recap.html"
                try:
                    recap_email_content = render_to_string(
                               template_name,
                               {'notices': notices,
                              'total_sent': total_sent,
                              'site_url': site_url,
                              'site_display_name': site_display_name,
                              'site_contact_name': site_contact_name,
                              'site_contact_email': site_contact_email})
                    recap_subject = '%s Corporate Membership Notices Distributed' % (
                                                    site_display_name)
                    email_context.update({
                        'subject':recap_subject,
                        'content': recap_email_content,
                        'content_type':"html"})

                    notification.send_emails(recap_recipient, 'corp_memb_notice_email',
                                             email_context)
                except TemplateDoesNotExist:
                    pass

        def email_script_errors(err_msg):
            """Send error message to us if any.
            """
            script_recipient = get_script_support_emails()
            if script_recipient:
                email_context.update({
                    'subject':'Error Processing Corporate Membership Notices on %s' % (
                                                            site_url),
                    'content':'%s \n\nTime Submitted: %s\n' % (err_msg, nowstr),
                    'content_type':"text"})

                notification.send_emails(script_recipient, 'corp_memb_notice_email',
                                         email_context)

        def get_script_support_emails():
            admins = getattr(settings, 'ADMINS', None)
            if admins:
                recipients_list = [admin[1] for admin in admins]
                return recipients_list
            return None

        def get_admin_emails():
            admin_emails = get_setting('module', 'corporate_memberships',
                                       'corporatemembershiprecipients').strip()
            if admin_emails:
                admin_emails = admin_emails.split(',')
            if not admin_emails:
                admin_emails = (get_setting('site', 'global',
                                            'admincontactemail'
                                            ).strip()).split(',')
            return admin_emails

        def process_notice(notice):
            notice.members_sent = []
            num_sent = 0
            if notice.notice_time == 'before':
                start_dt = now + timedelta(days=notice.num_days)
            else:
                start_dt = now - timedelta(days=notice.num_days)

            if notice.notice_type == 'disapprove':
                status_detail_list = ['inactive']
            else:
                status_detail_list = ['active', 'expired']

            memberships = CorpMembership.objects.filter(
                                    status=True,
                                    status_detail__in=status_detail_list
                                    )
            if notice.notice_type in ['approve_join', 'disapprove_join'
                                      'approve_renewal', 'disapprove_renewal']:
                filters = {'approved_denied_dt__year': start_dt.year,
                           'approved_denied_dt__month': start_dt.month,
                           'approved_denied_dt__day': start_dt.day,
                           'renewal': False,
                           'approved': True
                           }
                if notice.notice_type in ['approve_renewal',
                                          'disapprove_renewal']:
                    filters.update({'renewal': True})
                if notice.notice_type in ['disapprove_join',
                                          'disapprove_renewal']:
                    filters.update({'approved': False})

                memberships = memberships.filter(**filters)
            else:  # 'expire'
                memberships = memberships.filter(
                    expiration_dt__year=start_dt.year,
                    expiration_dt__month=start_dt.month,
                    expiration_dt__day=start_dt.day)

            # filter by membership type
            if notice.corporate_membership_type:
                memberships = memberships.filter(
                                corporate_membership_type=notice.corporate_membership_type)

            memberships_count = memberships.count()

            if memberships_count > 0:
                email_context.update({'content_type':notice.content_type})

                global_context = {'site_display_name': site_display_name,
                                  'site_contact_name': site_contact_name,
                                  'site_contact_email': site_contact_email,
                                  'time_submitted': nowstr,
                                  }

                # log notice sent
                notice_log = NoticeLog(notice=notice,
                                       num_sent=0)
                notice_log.save()
                notice.log = notice_log
                notice.err = ''

                for membership in memberships:
                    try:
                        num_sent += email_member(notice, membership, global_context)
                        if memberships_count <= 50:
                            notice.members_sent.append(membership)

                        # log record
                        notice_log_record = NoticeLogRecord(
                            notice_log=notice_log,
                            corp_membership=membership)
                        notice_log_record.save()
                    except:
                        # catch the exception and email
                        notice.err += traceback.format_exc()
                        print traceback.format_exc()

                if num_sent > 0:
                    notice_log.num_sent = num_sent
                    notice_log.save()

            return num_sent

        def email_member(notice, membership, global_context):
            corp_profile = membership.corp_profile
            representatives = corp_profile.reps.filter(Q(is_dues_rep=True)|(Q(is_member_rep=True)))
            sent = 0

            corp_app = CorpMembershipApp.objects.current_app()
            authentication_info = render_to_string(
                'notification/corp_memb_notice_email/auth_info.html',
                {'corp_membership': membership,
                 'corp_app': corp_app})
            individuals_join_url = '%s%s' % (site_url,
                                             reverse('membership_default.corp_pre_add',
                                                     args=[membership.id]))
            if membership.expiration_dt:
                expire_dt = time.strftime("%d-%b-%y %I:%M %p",
                                          membership.expiration_dt.timetuple())
            else:
                expire_dt = ''

            if membership.payment_method:
                payment_method = membership.payment_method.human_name
            else:
                payment_method = ''

            if membership.renewal:
                renewed_individuals_list = render_to_string(
                    'notification/corp_memb_notice_email/renew_list.html',
                    {'corp_membership': membership})
                total_individuals_renewed = membership.indivmembershiprenewentry_set.count()
            else:
                renewed_individuals_list = ''
                total_individuals_renewed = ''

            if membership.invoice:
                invoice_link = '%s%s' % (site_url,
                                         membership.invoice.get_absolute_url())
            else:
                invoice_link = ''

            global_context.update({
                'name': corp_profile.name,
                'email': corp_profile.email,
                'expire_dt': expire_dt,
                'payment_method': payment_method,
                'renewed_individuals_list': renewed_individuals_list,
                'total_individuals_renewed': total_individuals_renewed,
                'view_link': "%s%s" % (site_url, membership.get_absolute_url()),
                'renew_link': "%s%s" % (site_url, membership.get_renewal_url()),
                'invoice_link': invoice_link,
                'authentication_info': authentication_info,
                'individuals_join_url': individuals_join_url,
            })

            for recipient in representatives:
                body = notice.email_content
                context = membership.get_field_items()
                context['membership'] = membership
                context.update(global_context)

                context.update({
                    'rep_first_name': recipient.user.first_name,
                })

                body = fieldify(body)

                body = '%s <br /><br />%s' % (body, get_footer())

                context = Context(context)
                template = Template(body)
                body = template.render(context)

                email_recipient = recipient.user.email
                subject = notice.subject.replace('(name)',
                                            corp_profile.name)
                template = Template(subject)
                subject = template.render(context)

                email_context.update({
                    'subject':subject,
                    'content':body})

                if notice.sender:
                    email_context.update({
                        'sender':notice.sender,
                        'reply_to':notice.sender})
                if notice.sender_display:
                    email_context.update({'sender_display':notice.sender_display})

                notification.send_emails([email_recipient], 'corp_memb_notice_email',
                                         email_context)
                sent += 1
                if verbosity > 1:
                    print 'To ', email_recipient, subject
            return sent

        def get_footer():
            return """
                    This e-mail was generated by Tendenci&reg; Software -
                    a web based membership management software solution
                    www.tendenci.com developed by Schipul - The Web
                    Marketing Company
                    """

        exception_str = ""

        notices = Notice.objects.filter(status=True, status_detail='active'
                                    ).exclude(notice_time='attimeof')

        if notices:
            if verbosity > 1:
                print "Start sending out notices to members:"
            total_notices = 0
            total_sent = 0
            for notice in notices:
                total_notices += 1
                total_sent += process_notice(notice)
                if hasattr(notice, 'err'):
                    exception_str += notice.err

            if total_sent > 0:
                processed_notices = [notice for notice in notices if hasattr(
                                        notice, 'log'
                                        ) and notice.log.num_sent > 0]
                email_admins_recap(processed_notices, total_sent)

            # if there is any error, notify us
            if exception_str:
                email_script_errors(exception_str)

            if verbosity > 1:
                print 'Total notice processed: %d' % (total_notices)
                print 'Total email sent: %d' % (total_sent)
                print "Done"
        else:
            if verbosity > 1:
                print "No notices on the site."

Example 46

Project: tendenci
Source File: views.py
View license
@is_enabled('forms')
def form_detail(request, slug, template="forms/form_detail.html"):
    """
    Display a built form and handle submission.
    """
    published = Form.objects.published(for_user=request.user)
    form = get_object_or_404(published, slug=slug)

    if not has_view_perm(request.user,'forms.view_form',form):
        raise Http403

    # If form has a recurring payment, make sure the user is logged in
    if form.recurring_payment:
        [email_field] = form.fields.filter(field_type__iexact='EmailVerificationField')[:1] or [None]
        if request.user.is_anonymous() and not email_field:
            # anonymous user - if we don't have the email field, redirect to login
            response = redirect('auth_login')
            response['Location'] += '?next=%s' % form.get_absolute_url()
            return response
        if request.user.is_superuser and not email_field:
            messages.add_message(request, messages.WARNING,
                    'Please edit the form to include an email field ' + \
                    'as it is required for setting up a recurring ' + \
                    'payment for anonymous users.')

    form_for_form = FormForForm(form, request.user, request.POST or None, request.FILES or None)
    for field in form_for_form.fields:
        field_default = request.GET.get(field, None)
        if field_default:
            form_for_form.fields[field].initial = field_default

    if request.method == "POST":
        if form_for_form.is_valid():
            entry = form_for_form.save()
            entry.entry_path = request.POST.get("entry_path", "")
            if request.user.is_anonymous():
                if entry.get_email_address():
                    emailfield = entry.get_email_address()
                    firstnamefield = entry.get_first_name()
                    lastnamefield = entry.get_last_name()
                    phonefield = entry.get_phone_number()
                    password = ''
                    for i in range(0, 10):
                        password += random.choice(string.ascii_lowercase + string.ascii_uppercase)

                    user_list = User.objects.filter(email=emailfield).order_by('-last_login')
                    if user_list:
                        anonymous_creator = user_list[0]
                    else:
                        anonymous_creator = User(username=emailfield[:30], email=emailfield,
                                                 first_name=firstnamefield, last_name=lastnamefield)
                        anonymous_creator.set_password(password)
                        anonymous_creator.is_active = False
                        anonymous_creator.save()
                        anonymous_profile = Profile(user=anonymous_creator, owner=anonymous_creator,
                                                    creator=anonymous_creator, phone=phonefield)
                        anonymous_profile.save()
                    entry.creator = anonymous_creator
            else:
                entry.creator = request.user
            entry.save()
            entry.set_group_subscribers()

            # Email
            subject = generate_email_subject(form, entry)
            email_headers = {}  # content type specified below
            if form.email_from:
                email_headers.update({'Reply-To':form.email_from})

            # Email to submitter
            # fields aren't included in submitter body to prevent spam
            submitter_body = generate_submitter_email_body(entry, form_for_form)
            email_from = form.email_from or settings.DEFAULT_FROM_EMAIL
            email_to = form_for_form.email_to()
            is_spam = Email.is_blocked(email_to)
            if is_spam:
                # log the spam
                description = "Email \"{0}\" blocked because it is listed in email_blocks.".format(email_to)
                EventLog.objects.log(instance=form, description=description)
                
                if form.completion_url:
                    return HttpResponseRedirect(form.completion_url)
                return redirect("form_sent", form.slug)
                
            email = Email()
            email.subject = subject
            email.reply_to = form.email_from

            if email_to and form.send_email and form.email_text:
                # Send message to the person who submitted the form.
                email.recipient = email_to
                email.body = submitter_body
                email.send(fail_silently=True)

            # Email copies to admin
            admin_body = generate_admin_email_body(entry, form_for_form)
            email_from = email_to or email_from # Send from the email entered.
            email_headers = {}  # Reset the email_headers
            email_headers.update({'Reply-To':email_from})
            email_copies = [e.strip() for e in form.email_copies.split(',') if e.strip()]

            subject = subject.encode(errors='ignore')
            email_recipients = entry.get_function_email_recipients()
            # reply_to of admin emails goes to submitter
            email.reply_to = email_to

            if email_copies or email_recipients:
                # prepare attachments
                attachments = []
                try:
                    for f in form_for_form.files.values():
                        f.seek(0)
                        attachments.append((f.name, f.read()))
                except ValueError:
                    attachments = []
                    for field_entry in entry.fields.all():
                        if field_entry.field.field_type == 'FileField':
                            try:
                                f = default_storage.open(field_entry.value)
                            except IOError:
                                pass
                            else:
                                f.seek(0)
                                attachments.append((f.name.split('/')[-1], f.read()))

                # Send message to the email addresses listed in the copies
                if email_copies:
                    email.body = admin_body
                    email.recipient = email_copies
                    email.send(fail_silently=True, attachments=attachments)

                # Email copies to recipient list indicated in the form
                if email_recipients:
                    email.body = admin_body
                    email.recipient = email_recipients
                    email.send(fail_silently=True, attachments=attachments)

            # payment redirect
            if (form.custom_payment or form.recurring_payment) and entry.pricing:
                # get the pricing's price, custom or otherwise
                price = entry.pricing.price or form_for_form.cleaned_data.get('custom_price')

                if form.recurring_payment:
                    if request.user.is_anonymous():
                        rp_user = entry.creator
                    else:
                        rp_user = request.user
                    billing_start_dt = datetime.datetime.now()
                    trial_period_start_dt = None
                    trial_period_end_dt = None
                    if entry.pricing.has_trial_period:
                        trial_period_start_dt = datetime.datetime.now()
                        trial_period_end_dt = trial_period_start_dt + datetime.timedelta(1)
                        billing_start_dt = trial_period_end_dt
                    # Create recurring payment
                    rp = RecurringPayment(
                             user=rp_user,
                             description=form.title,
                             billing_period=entry.pricing.billing_period,
                             billing_start_dt=billing_start_dt,
                             num_days=entry.pricing.num_days,
                             due_sore=entry.pricing.due_sore,
                             payment_amount=price,
                             taxable=entry.pricing.taxable,
                             tax_rate=entry.pricing.tax_rate,
                             has_trial_period=entry.pricing.has_trial_period,
                             trial_period_start_dt=trial_period_start_dt,
                             trial_period_end_dt=trial_period_end_dt,
                             trial_amount=entry.pricing.trial_amount,
                             creator=rp_user,
                             creator_username=rp_user.username,
                             owner=rp_user,
                             owner_username=rp_user.username,
                         )
                    rp.save()
                    rp.add_customer_profile()

                    # redirect to recurring payments
                    messages.add_message(request, messages.SUCCESS, _('Successful transaction.'))
                    return redirect('recurring_payment.view_account', rp.id, rp.guid)
                else:
                    # create the invoice
                    invoice = make_invoice_for_entry(entry, custom_price=price)
                    # log an event for invoice add

                    EventLog.objects.log(instance=form)

                    # redirect to billing form
                    return redirect('form_entry_payment', invoice.id, invoice.guid)

            # default redirect
            if form.completion_url:
                return HttpResponseRedirect(form.completion_url)
            return redirect("form_sent", form.slug)

    # set form's template to forms/base.html if no template or template doesn't exist
    if not form.template or not template_exists(form.template):
        form.template = "forms/base.html"

    # NOTE: Temporarily use forms/base.html for the meantime
    form.template = "forms/base.html"

    context = {
        "form": form,
        "form_for_form": form_for_form,
        'form_template': form.template,
    }
    return render_to_response(template, context, RequestContext(request))

Example 47

Project: tendenci
Source File: send_membership_notices.py
View license
    def handle(self, *args, **options):
        verbosity = 1
        if 'verbosity' in options:
            verbosity = options['verbosity']

        from django.conf import settings
        from tendenci.apps.memberships.models import (Notice,
                                                        MembershipDefault,
                                                        NoticeLog,
                                                        NoticeDefaultLogRecord)
        from tendenci.apps.base.utils import fieldify
        from tendenci.apps.notifications import models as notification
        from tendenci.apps.site_settings.utils import get_setting

        site_display_name = get_setting('site', 'global', 'sitedisplayname')
        site_contact_name = get_setting('site', 'global', 'sitecontactname')
        site_contact_email = get_setting('site', 'global', 'sitecontactemail')
        site_url = get_setting('site', 'global', 'siteurl')

        corp_replace_str = """
                            <br /><br />
                            <font color="#FF0000">
                            Organizational Members, please contact your company
                            Membership coordinator
                            to ensure that your membership is being renewed.
                            </font>
                            """

        email_context = {
            'sender':get_setting('site', 'global', 'siteemailnoreplyaddress'),
            'sender_display':site_display_name,
            'reply_to':site_contact_email}

        now = datetime.now()
        nowstr = time.strftime("%d-%b-%y %I:%M %p", now.timetuple())

        def email_admins_recap(notices, total_sent):
            """Send admins recap after the notices were processed.
            """
            recap_recipient = get_admin_emails()
            if recap_recipient:
                template_name = "memberships/notices/email_recap.html"
                try:
                    recap_email_content = render_to_string(
                               template_name,
                               {'notices': notices,
                              'total_sent': total_sent,
                              'site_url': site_url,
                              'site_display_name': site_display_name,
                              'site_contact_name': site_contact_name,
                              'site_contact_email': site_contact_email})
                    recap_subject = '%s Membership Notices Distributed' % (
                                                    site_display_name)
                    email_context.update({
                        'subject':recap_subject,
                        'content': recap_email_content,
                        'content_type':"html"})

                    notification.send_emails(recap_recipient, 'membership_notice_email',
                                             email_context)
                except TemplateDoesNotExist:
                    pass

        def email_script_errors(err_msg):
            """Send error message to us if any.
            """
            script_recipient = get_script_support_emails()
            if script_recipient:
                email_context.update({
                    'subject':'Error Processing Membership Notices on %s' % (
                                                            site_url),
                    'content':'%s \n\nTime Submitted: %s\n' % (err_msg, nowstr),
                    'content_type':"text"})

                notification.send_emails(script_recipient, 'membership_notice_email',
                                         email_context)

        def get_script_support_emails():
            admins = getattr(settings, 'ADMINS', None)
            if admins:
                recipients_list = [admin[1] for admin in admins]
                return recipients_list

            return None

        def get_admin_emails():
            admin_emails = get_setting('module', 'memberships',
                                       'membershiprecipients').strip()
            if admin_emails:
                admin_emails = admin_emails.split(',')
            if not admin_emails:
                admin_emails = (get_setting('site', 'global',
                                            'admincontactemail'
                                            ).strip()).split(',')
            return admin_emails

        def process_notice(notice):
            notice.members_sent = []
            num_sent = 0
            if notice.notice_time == 'before':
                start_dt = now + timedelta(days=notice.num_days)
            else:
                start_dt = now - timedelta(days=notice.num_days)

            if notice.notice_type == 'disapprove':
                status_detail_list = ['disapproved']
            else:
                status_detail_list = ['active', 'expired']
            memberships = MembershipDefault.objects.filter(
                                    status=True,
                                    status_detail__in=status_detail_list
                                    )
            if notice.notice_type == 'join':
                memberships = memberships.filter(
                                    join_dt__year=start_dt.year,
                                    join_dt__month=start_dt.month,
                                    join_dt__day=start_dt.day,
                                    renewal=False)
            elif notice.notice_type == 'renewal':
                memberships = memberships.filter(
                                    renew_dt__year=start_dt.year,
                                    renew_dt__month=start_dt.month,
                                    renew_dt__day=start_dt.day,
                                    renewal=True)
            elif notice.notice_type == 'approve':
                memberships = memberships.filter(
                                    application_approved_denied_dt__year=start_dt.year,
                                    application_approved_denied_dt__month=start_dt.month,
                                    application_approved_denied_dt__day=start_dt.day,
                                    application_approved=True)
            elif notice.notice_type == 'disapprove':
                memberships = memberships.filter(
                                    application_approved_denied_dt__year=start_dt.year,
                                    application_approved_denied_dt__month=start_dt.month,
                                    application_approved_denied_dt__day=start_dt.day,
                                    application_approved=False)
            else:  # 'expire'
                memberships = memberships.filter(
                                    expire_dt__year=start_dt.year,
                                    expire_dt__month=start_dt.month,
                                    expire_dt__day=start_dt.day)
                if get_setting('module', 'memberships', 'renewalreminderexcludecorpmembers'):
                    # exclude corp members
                    memberships = memberships.exclude(corporate_membership_id__gt=0)

            # filter by membership type
            if notice.membership_type:
                memberships = memberships.filter(
                                membership_type=notice.membership_type)

            memberships_count = memberships.count()

            if memberships_count > 0:
                email_context.update({'content_type':notice.content_type})

                # password
                passwd_str = """
                        If you've forgotten your password or need to reset
                        the auto-generated one, click <a href="%s%s">here</a>
                        and follow the instructions on the page to
                        reset your password.
                        """ % (site_url, reverse('auth_password_reset'))

                global_context = {'site_display_name': site_display_name,
                                  'site_contact_name': site_contact_name,
                                  'site_contact_email': site_contact_email,
                                  'time_submitted': nowstr,
                                  'sitedisplayname': site_display_name,
                                  'sitecontactname': site_contact_name,
                                  'sitecontactemail': site_contact_email,
                                  'timesubmitted': nowstr,
                                  'password': passwd_str
                                  }

                # log notice sent
                notice_log = NoticeLog(notice=notice,
                                       num_sent=0)
                notice_log.save()
                notice.log = notice_log
                notice.err = ''

                for membership in memberships:
                    try:
                        email_member(notice, membership, global_context)
                        if memberships_count <= 50:
                            notice.members_sent.append(membership)
                        num_sent += 1

                        # log record
                        notice_log_record = NoticeDefaultLogRecord(
                                                notice_log=notice_log,
                                                membership=membership)
                        notice_log_record.save()
                    except:
                        # catch the exception and email
                        notice.err += traceback.format_exc()
                        print traceback.format_exc()

                if num_sent > 0:
                    notice_log.num_sent = num_sent
                    notice_log.save()

            return num_sent

        def email_member(notice, membership, global_context):
            user = membership.user

            body = notice.email_content
            context = membership.get_field_items()
            context['membership'] = membership
            context.update(global_context)

            # corporate member corp_replace_str
            if membership.corporate_membership_id:
                context['corporate_membership_notice'] = corp_replace_str

            if membership.expire_dt:
                context.update({
                    'expire_dt': time.strftime(
                    "%d-%b-%y %I:%M %p",
                    membership.expire_dt.timetuple()),
                })

            if membership.payment_method:
                payment_method_name = membership.payment_method.human_name
            else:
                payment_method_name = ''

            context.update({
                'member_number': membership.member_number,
                'payment_method': payment_method_name,
                'referer_url': '%s%s?next=%s' % (site_url, reverse('auth_login'), membership.referer_url),
                'membership_link': '%s%s' % (site_url, membership.get_absolute_url()),
                'renew_link': '%s%s' % (site_url, membership.get_absolute_url()),
                'mymembershipslink': '%s%s' % (site_url, membership.get_absolute_url()),
                'membershiplink': '%s%s' % (site_url, membership.get_absolute_url()),
                'renewlink': '%s%s' % (site_url, membership.get_absolute_url())
            })

            body = fieldify(body)

            body = '%s <br /><br />%s' % (body, get_footer())

            context = Context(context)
            template = Template(body)
            body = template.render(context)

            email_recipient = user.email
            subject = notice.subject.replace('(name)',
                                        user.get_full_name())
            template = Template(subject)
            subject = template.render(context)

            email_context.update({
                'subject':subject,
                'content':body})
            if notice.sender:
                email_context.update({
                    #'sender':notice.sender,
                    'reply_to':notice.sender})
            if notice.sender_display:
                email_context.update({'sender_display':notice.sender_display})

            notification.send_emails([email_recipient], 'membership_notice_email',
                                     email_context)
            if verbosity > 1:
                print 'To ', email_recipient, subject

        def get_footer():
            return """
                    This e-mail was generated by Tendenci&reg; Software -
                    a web based membership management software solution
                    www.tendenci.com developed by Schipul - The Web
                    Marketing Company
                    """

        exception_str = ""

        notices = Notice.objects.filter(status=True, status_detail='active'
                                    ).exclude(notice_time='attimeof')

        if notices:
            if verbosity > 1:
                print "Start sending out notices to members:"
            total_notices = 0
            total_sent = 0
            for notice in notices:
                total_notices += 1
                total_sent += process_notice(notice)
                if hasattr(notice, 'err'):
                    exception_str += notice.err

            if total_sent > 0:
                processed_notices = [notice for notice in notices if hasattr(
                                        notice, 'log'
                                        ) and notice.log.num_sent > 0]
                email_admins_recap(processed_notices, total_sent)

            # if there is any error, notify us
            if exception_str:
                email_script_errors(exception_str)

            if verbosity > 1:
                print 'Total notice processed: %d' % (total_notices)
                print 'Total email sent: %d' % (total_sent)
                print "Done"
        else:
            if verbosity > 1:
                print "No notices on the site."

Example 48

Project: tendenci
Source File: utils.py
View license
def api_rp_setup(data):
    """Create a recurrring payment account. Accepted format: json

    Input fields:
        email - required
        description - required
        amount - required
        cp_id - customer profile id, required
        pp_id - customer payment profile id, required
        billing_cycle_start_dt - required
        billing_cycle_end_dt - required
        response_str - required
        login_name
        login_password
        url
        first_name
        last_name


        billing_period - optional, default to 'month'
        billing_frequency - optional, default to 1
        billing_start_dt - optional, default to today
        num_days - optional, default to 0
        has_trial_period - optional, default to False
        trial_period_start_dt - optional, default to today
        trial_period_end_dt - optional, default to today
        trial_amount - optional, default to 0

    Output:
        rp_id - a recurring payment id
        rp_url - url to rp
        username
        result_code
    """
    from decimal import Decimal
    from tendenci.apps.base.utils import validate_email
    import dateutil.parser as dparser
    from tendenci.apps.imports.utils import get_unique_username

    email = data.get('email', '')
    description = data.get('description', '')
    url = data.get('url')
    payment_amount = data.get('amount', '')
    taxable = data.get('taxable', 0)
    if taxable in ('True', 'true', '1', 1):
        taxable = 1
    else:
        taxable = 0
    try:
        tax_rate = Decimal(data.get('tax_rate', 0))
        if tax_rate > 1: tax_rate = 0
    except:
        tax_rate = 0
    tax_exempt = data.get('tax_exempt', 0)
    if tax_exempt in ('True', 'true', '1', 1):
        tax_exempt = 1
    else:
        tax_exempt = 0
    try:
        payment_amount = Decimal(payment_amount)
    except:
        payment_amount = 0
    cp_id = data.get('cp_id')
    pp_id = data.get('pp_id')
    billing_cycle_start_dt = data.get('billing_cycle_start_dt')
    if billing_cycle_start_dt:
        billing_cycle_start_dt = dparser.parse(billing_cycle_start_dt)
    billing_cycle_end_dt = data.get('billing_cycle_end_dt')
    if billing_cycle_end_dt:
        billing_cycle_end_dt = dparser.parse(billing_cycle_end_dt)

    direct_response_str = data.get('response_str')

    if not all([validate_email(email),
                description,
                payment_amount>0,
                cp_id,
                pp_id,
                billing_cycle_start_dt,
                billing_cycle_end_dt,
                direct_response_str]
               ):
        return False, {}

    # 1) get or create user
    username = data.get('login_name')

    # check if user already exists based on email and username
    users = User.objects.filter(email=email, username=username)
    if users:
        u = users[0]
    else:
        # create user account
        u = User()
        u.email=email
        u.username = username
        if not u.username:
            u.username = email.split('@')[0]
        u.username = get_unique_username(u)
        raw_password = data.get('login_password')
        if not raw_password:
            raw_password = User.objects.make_random_password(length=8)
        u.set_password(raw_password)
        u.first_name = data.get('first_name', '')
        u.last_name = data.get('last_name', '')
        u.is_staff = False
        u.is_superuser = False
        u.save()

        profile = Profile.objects.create(
           user=u,
           creator=u,
           creator_username=u.username,
           owner=u,
           owner_username=u.username,
           email=u.email
        )

    # 2) create a recurring payment account
    rp = RecurringPayment()
    rp.user = u
    rp.description = description
    rp.url = url
    rp.payment_amount = payment_amount
    rp.taxable = taxable
    rp.tax_rate = tax_rate
    rp.tax_exempt = tax_exempt
    rp.customer_profile_id = cp_id
    rp.billing_start_dt = billing_cycle_start_dt

    has_trial_period = data.get('has_trial_period')
    trial_period_start_dt = data.get('trial_period_start_dt')
    trial_period_end_dt = data.get('trial_period_end_dt')
    if has_trial_period in ['True', '1',  True, 1] and all([trial_period_start_dt,
                                                            trial_period_end_dt]):
        rp.has_trial_period = True
        rp.trial_period_start_dt = dparser.parse(trial_period_start_dt)
        rp.trial_period_end_dt = dparser.parse(trial_period_end_dt)
    else:
        rp.has_trial_period = False

    rp.status_detail = 'active'
    rp.save()

    # 3) create a payment profile account
    payment_profile_exists = PaymentProfile.objects.filter(
                                        customer_profile_id=cp_id,
                                        payment_profile_id=pp_id
                                        ).exists()
    if not payment_profile_exists:
        PaymentProfile.objects.create(
                        customer_profile_id=cp_id,
                        payment_profile_id=pp_id,
                        owner=u,
                        owner_username=u.username
                        )

    # 4) create rp invoice
    billing_cycle = {'start': billing_cycle_start_dt,
                     'end': billing_cycle_end_dt}
    rp_invoice = rp.create_invoice(billing_cycle, billing_cycle_start_dt)
    rp_invoice.invoice.tender(rp.user)

    # 5) create rp transaction
    now = datetime.now()
    payment = Payment()
    payment.payments_pop_by_invoice_user(rp.user,
                                         rp_invoice.invoice,
                                         rp_invoice.invoice.guid)
    payment_transaction = PaymentTransaction(
                                    recurring_payment=rp,
                                    recurring_payment_invoice=rp_invoice,
                                    payment_profile_id=pp_id,
                                    trans_type='auth_capture',
                                    amount=rp_invoice.invoice.total,
                                    status=True)
    payment = payment_update_from_response(payment, direct_response_str)
    payment.mark_as_paid()
    payment.save()
    rp_invoice.invoice.make_payment(rp.user, Decimal(payment.amount))
    rp_invoice.invoice.save()


    rp_invoice.payment_received_dt = now
    rp_invoice.save()
    rp.last_payment_received_dt = now
    rp.num_billing_cycle_completed += 1
    rp.save()

    payment_transaction.payment = payment
    payment_transaction.result_code = data.get('result_code')
    payment_transaction.message_code = data.get('message_code')
    payment_transaction.message_text = data.get('message_text')

    payment_transaction.save()

    site_url = get_setting('site', 'global', 'siteurl')


    return True, {'rp_id': rp.id,
                  'rp_url': '%s%s' %  (site_url,
                                reverse('recurring_payment.view_account', args=[rp.id])),
                  'username': rp.user.username}

Example 49

View license
def train(target, dataset, cluster_spec):
  """Train Inception on a dataset for a number of steps."""
  # Number of workers and parameter servers are infered from the workers and ps
  # hosts string.
  num_workers = len(cluster_spec.as_dict()['worker'])
  num_parameter_servers = len(cluster_spec.as_dict()['ps'])
  # If no value is given, num_replicas_to_aggregate defaults to be the number of
  # workers.
  if FLAGS.num_replicas_to_aggregate == -1:
    num_replicas_to_aggregate = num_workers
  else:
    num_replicas_to_aggregate = FLAGS.num_replicas_to_aggregate

  # Both should be greater than 0 in a distributed training.
  assert num_workers > 0 and num_parameter_servers > 0, (' num_workers and '
                                                         'num_parameter_servers'
                                                         ' must be > 0.')

  # Choose worker 0 as the chief. Note that any worker could be the chief
  # but there should be only one chief.
  is_chief = (FLAGS.task_id == 0)

  # Ops are assigned to worker by default.
  with tf.device('/job:worker/task:%d' % FLAGS.task_id):
    # Variables and its related init/assign ops are assigned to ps.
    with slim.scopes.arg_scope(
        [slim.variables.variable, slim.variables.global_step],
        device=slim.variables.VariableDeviceChooser(num_parameter_servers)):
      # Create a variable to count the number of train() calls. This equals the
      # number of updates applied to the variables.
      global_step = slim.variables.global_step()

      # Calculate the learning rate schedule.
      num_batches_per_epoch = (dataset.num_examples_per_epoch() /
                               FLAGS.batch_size)
      # Decay steps need to be divided by the number of replicas to aggregate.
      decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay /
                        num_replicas_to_aggregate)

      # Decay the learning rate exponentially based on the number of steps.
      lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
                                      global_step,
                                      decay_steps,
                                      FLAGS.learning_rate_decay_factor,
                                      staircase=True)
      # Add a summary to track the learning rate.
      tf.scalar_summary('learning_rate', lr)

      # Create an optimizer that performs gradient descent.
      opt = tf.train.RMSPropOptimizer(lr,
                                      RMSPROP_DECAY,
                                      momentum=RMSPROP_MOMENTUM,
                                      epsilon=RMSPROP_EPSILON)

      images, labels = image_processing.distorted_inputs(
          dataset,
          batch_size=FLAGS.batch_size,
          num_preprocess_threads=FLAGS.num_preprocess_threads)

      # Number of classes in the Dataset label set plus 1.
      # Label 0 is reserved for an (unused) background class.
      num_classes = dataset.num_classes() + 1
      logits = inception.inference(images, num_classes, for_training=True)
      # Add classification loss.
      inception.loss(logits, labels)

      # Gather all of the losses including regularization losses.
      losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
      losses += tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

      total_loss = tf.add_n(losses, name='total_loss')

      if is_chief:
        # Compute the moving average of all individual losses and the
        # total loss.
        loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
        loss_averages_op = loss_averages.apply(losses + [total_loss])

        # Attach a scalar summmary to all individual losses and the total loss;
        # do the same for the averaged version of the losses.
        for l in losses + [total_loss]:
          loss_name = l.op.name
          # Name each loss as '(raw)' and name the moving average version of the
          # loss as the original loss name.
          tf.scalar_summary(loss_name + ' (raw)', l)
          tf.scalar_summary(loss_name, loss_averages.average(l))

        # Add dependency to compute loss_averages.
        with tf.control_dependencies([loss_averages_op]):
          total_loss = tf.identity(total_loss)

      # Track the moving averages of all trainable variables.
      # Note that we maintain a 'double-average' of the BatchNormalization
      # global statistics.
      # This is not needed when the number of replicas are small but important
      # for synchronous distributed training with tens of workers/replicas.
      exp_moving_averager = tf.train.ExponentialMovingAverage(
          inception.MOVING_AVERAGE_DECAY, global_step)

      variables_to_average = (
          tf.trainable_variables() + tf.moving_average_variables())

      # Add histograms for model variables.
      for var in variables_to_average:
        tf.histogram_summary(var.op.name, var)

      # Create synchronous replica optimizer.
      opt = tf.train.SyncReplicasOptimizer(
          opt,
          replicas_to_aggregate=num_replicas_to_aggregate,
          replica_id=FLAGS.task_id,
          total_num_replicas=num_workers,
          variable_averages=exp_moving_averager,
          variables_to_average=variables_to_average)

      batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
      assert batchnorm_updates, 'Batchnorm updates are missing'
      batchnorm_updates_op = tf.group(*batchnorm_updates)
      # Add dependency to compute batchnorm_updates.
      with tf.control_dependencies([batchnorm_updates_op]):
        total_loss = tf.identity(total_loss)

      # Compute gradients with respect to the loss.
      grads = opt.compute_gradients(total_loss)

      # Add histograms for gradients.
      for grad, var in grads:
        if grad is not None:
          tf.histogram_summary(var.op.name + '/gradients', grad)

      apply_gradients_op = opt.apply_gradients(grads, global_step=global_step)

      with tf.control_dependencies([apply_gradients_op]):
        train_op = tf.identity(total_loss, name='train_op')

      # Get chief queue_runners, init_tokens and clean_up_op, which is used to
      # synchronize replicas.
      # More details can be found in sync_replicas_optimizer.
      chief_queue_runners = [opt.get_chief_queue_runner()]
      init_tokens_op = opt.get_init_tokens_op()
      clean_up_op = opt.get_clean_up_op()

      # Create a saver.
      saver = tf.train.Saver()

      # Build the summary operation based on the TF collection of Summaries.
      summary_op = tf.merge_all_summaries()

      # Build an initialization operation to run below.
      init_op = tf.initialize_all_variables()

      # We run the summaries in the same thread as the training operations by
      # passing in None for summary_op to avoid a summary_thread being started.
      # Running summaries and training operations in parallel could run out of
      # GPU memory.
      sv = tf.train.Supervisor(is_chief=is_chief,
                               logdir=FLAGS.train_dir,
                               init_op=init_op,
                               summary_op=None,
                               global_step=global_step,
                               saver=saver,
                               save_model_secs=FLAGS.save_interval_secs)

      tf.logging.info('%s Supervisor' % datetime.now())

      sess_config = tf.ConfigProto(
          allow_soft_placement=True,
          log_device_placement=FLAGS.log_device_placement)

      # Get a session.
      sess = sv.prepare_or_wait_for_session(target, config=sess_config)

      # Start the queue runners.
      queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)
      sv.start_queue_runners(sess, queue_runners)
      tf.logging.info('Started %d queues for processing input data.',
                      len(queue_runners))

      if is_chief:
        sv.start_queue_runners(sess, chief_queue_runners)
        sess.run(init_tokens_op)

      # Train, checking for Nans. Concurrently run the summary operation at a
      # specified interval. Note that the summary_op and train_op never run
      # simultaneously in order to prevent running out of GPU memory.
      next_summary_time = time.time() + FLAGS.save_summaries_secs
      while not sv.should_stop():
        try:
          start_time = time.time()
          loss_value, step = sess.run([train_op, global_step])
          assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
          if step > FLAGS.max_steps:
            break
          duration = time.time() - start_time

          if step % 30 == 0:
            examples_per_sec = FLAGS.batch_size / float(duration)
            format_str = ('Worker %d: %s: step %d, loss = %.2f'
                          '(%.1f examples/sec; %.3f  sec/batch)')
            tf.logging.info(format_str %
                            (FLAGS.task_id, datetime.now(), step, loss_value,
                             examples_per_sec, duration))

          # Determine if the summary_op should be run on the chief worker.
          if is_chief and next_summary_time < time.time():
            tf.logging.info('Running Summary operation on the chief.')
            summary_str = sess.run(summary_op)
            sv.summary_computed(sess, summary_str)
            tf.logging.info('Finished running Summary operation.')

            # Determine the next time for running the summary.
            next_summary_time += FLAGS.save_summaries_secs
        except:
          if is_chief:
            tf.logging.info('About to execute sync_clean_up_op!')
            sess.run(clean_up_op)
          raise

      # Stop the supervisor.  This also waits for service threads to finish.
      sv.stop()

      # Save after the training ends.
      if is_chief:
        saver.save(sess,
                   os.path.join(FLAGS.train_dir, 'model.ckpt'),
                   global_step=global_step)

Example 50

Project: models
Source File: inception_train.py
View license
def train(dataset):
  """Train on dataset for a number of steps."""
  with tf.Graph().as_default(), tf.device('/cpu:0'):
    # Create a variable to count the number of train() calls. This equals the
    # number of batches processed * FLAGS.num_gpus.
    global_step = tf.get_variable(
        'global_step', [],
        initializer=tf.constant_initializer(0), trainable=False)

    # Calculate the learning rate schedule.
    num_batches_per_epoch = (dataset.num_examples_per_epoch() /
                             FLAGS.batch_size)
    decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)

    # Decay the learning rate exponentially based on the number of steps.
    lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
                                    global_step,
                                    decay_steps,
                                    FLAGS.learning_rate_decay_factor,
                                    staircase=True)

    # Create an optimizer that performs gradient descent.
    opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY,
                                    momentum=RMSPROP_MOMENTUM,
                                    epsilon=RMSPROP_EPSILON)

    # Get images and labels for ImageNet and split the batch across GPUs.
    assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
        'Batch size must be divisible by number of GPUs')
    split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)

    # Override the number of preprocessing threads to account for the increased
    # number of GPU towers.
    num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
    images, labels = image_processing.distorted_inputs(
        dataset,
        num_preprocess_threads=num_preprocess_threads)

    input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))

    # Number of classes in the Dataset label set plus 1.
    # Label 0 is reserved for an (unused) background class.
    num_classes = dataset.num_classes() + 1
    
     # Split the batch of images and labels for towers.
    images_splits = tf.split(0, FLAGS.num_gpus, images)
    labels_splits = tf.split(0, FLAGS.num_gpus, labels)

    # Calculate the gradients for each model tower.
    tower_grads = []
    for i in xrange(FLAGS.num_gpus):
      with tf.device('/gpu:%d' % i):
        with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope:
          # Force all Variables to reside on the CPU.
          with slim.arg_scope([slim.variables.variable], device='/cpu:0'):
            # Calculate the loss for one tower of the ImageNet model. This
            # function constructs the entire ImageNet model but shares the
            # variables across all towers.
            loss = _tower_loss(images_splits[i], labels_splits[i], num_classes,
                               scope)

          # Reuse variables for the next tower.
          tf.get_variable_scope().reuse_variables()

          # Retain the summaries from the final tower.
          summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)

          # Retain the Batch Normalization updates operations only from the
          # final tower. Ideally, we should grab the updates from all towers
          # but these stats accumulate extremely fast so we can ignore the
          # other stats from the other towers without significant detriment.
          batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
                                                scope)

          # Calculate the gradients for the batch of data on this ImageNet
          # tower.
          grads = opt.compute_gradients(loss)

          # Keep track of the gradients across all towers.
          tower_grads.append(grads)

    # We must calculate the mean of each gradient. Note that this is the
    # synchronization point across all towers.
    grads = _average_gradients(tower_grads)

    # Add a summaries for the input processing and global_step.
    summaries.extend(input_summaries)

    # Add a summary to track the learning rate.
    summaries.append(tf.scalar_summary('learning_rate', lr))

    # Add histograms for gradients.
    for grad, var in grads:
      if grad is not None:
        summaries.append(
            tf.histogram_summary(var.op.name + '/gradients', grad))

    # Apply the gradients to adjust the shared variables.
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # Add histograms for trainable variables.
    for var in tf.trainable_variables():
      summaries.append(tf.histogram_summary(var.op.name, var))

    # Track the moving averages of all trainable variables.
    # Note that we maintain a "double-average" of the BatchNormalization
    # global statistics. This is more complicated then need be but we employ
    # this for backward-compatibility with our previous models.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception.MOVING_AVERAGE_DECAY, global_step)

    # Another possiblility is to use tf.slim.get_variables().
    variables_to_average = (tf.trainable_variables() +
                            tf.moving_average_variables())
    variables_averages_op = variable_averages.apply(variables_to_average)

    # Group all updates to into a single train op.
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    train_op = tf.group(apply_gradient_op, variables_averages_op,
                        batchnorm_updates_op)

    # Create a saver.
    saver = tf.train.Saver(tf.all_variables())

    # Build the summary operation from the last tower summaries.
    summary_op = tf.merge_summary(summaries)

    # Build an initialization operation to run below.
    init = tf.initialize_all_variables()

    # Start running operations on the Graph. allow_soft_placement must be set to
    # True to build towers on GPU, as some of the ops do not have GPU
    # implementations.
    sess = tf.Session(config=tf.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=FLAGS.log_device_placement))
    sess.run(init)

    if FLAGS.pretrained_model_checkpoint_path:
      assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
      variables_to_restore = tf.get_collection(
          slim.variables.VARIABLES_TO_RESTORE)
      restorer = tf.train.Saver(variables_to_restore)
      restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
      print('%s: Pre-trained model restored from %s' %
            (datetime.now(), FLAGS.pretrained_model_checkpoint_path))

    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)

    summary_writer = tf.train.SummaryWriter(
        FLAGS.train_dir,
        graph_def=sess.graph.as_graph_def(add_shapes=True))

    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      _, loss_value = sess.run([train_op, loss])
      duration = time.time() - start_time

      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

      if step % 10 == 0:
        examples_per_sec = FLAGS.batch_size / float(duration)
        format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                      'sec/batch)')
        print(format_str % (datetime.now(), step, loss_value,
                            examples_per_sec, duration))

      if step % 100 == 0:
        summary_str = sess.run(summary_op)
        summary_writer.add_summary(summary_str, step)

      # Save the model checkpoint periodically.
      if step % 5000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)