sqlalchemy.update.where.values

Here are the examples of the python api sqlalchemy.update.where.values taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

2 Examples 7

Example 1

Project: metrique Source File: sqlalchemy.py
    def upsert(self, objects, autosnap=None, batch_size=None, table=None):
        objects = objects.values() if isinstance(objects, Mapping) else objects
        is_array(objects, 'objects must be a list')
        table = self.get_table(table)
        if autosnap is None:
            # assume autosnap:True if all objects have _end:None
            # otherwise, false (all objects have _end:non-null or
            # a mix of both)
            autosnap = all(o['_end'] is None for o in objects)
            logger.warn('AUTOSNAP auto-set to: %s' % autosnap)

        # TODO remove the use of _id and _hash
        _ids = sorted(set([o['_id'] for o in objects]))
        oids = sorted(set([o['_oid'] for o in objects]))
        session = self.session_new()
        try:
            if autosnap:
                # Snapshot - relevant only for cubes which objects
                # stored always are pushed with _end:None ('current value')
                # If we already have an object with same _oid, but different
                # _hash, we know we have a NEW object state for the given _oid
                # In this case, we update the existing object by adding
                # current object's _start -> existing _end and then add
                # the current object as=is; IOW rotate out the previous
                # version by giving it a _end and insert the new version
                # as current with _end:None
                existing = session.query(table).\
                    filter(table.c._oid.in_(oids)).\
                    filter(table.c._end.is_(None)).all()
                existing = {o._oid: o for o in existing}
                inserts = [o for o in objects if o['_oid'] not in existing]
                snap_k = len(inserts)
                dup_k = 0
                objects = [o for o in objects if o['_oid'] in existing]
                for o in objects:
                    oe = existing[o['_oid']]
                    if oe._hash != o['_hash']:
                        new_id = '%s:%s' % (oe._oid, oe._start)
                        session.execute(
                            update(table).where(table.c.id == oe.id).
                            values(_end=o['_start'], _id=new_id))
                        _ids.append(new_id)
                        inserts.append(o)
                        snap_k += 1
                    else:
                        dup_k += 1
                logger.debug('%s existing objects snapshotted' % snap_k)
                logger.debug('%s duplicates not re-saved' % dup_k)
                objects = inserts
            else:
                # History import
                # delete all existing versions for given _oids,
                # then we'll insert all the new historical versions
                # below
                # NOTE: THIS EXPECTS THAT THE CURRENT BATCH CONTAINS
                # ALL HISTORICAL VERSIONS OF A GIVEN _oid!
                session.query(table).filter(table.c._oid.in_(oids)).\
                    delete(synchronize_session=False)

            # insert new versions
            session.flush()
            if objects:
                session.execute(table.insert(), objects)
            session.commit()
        except Exception as e:
            logger.error('Session Error: %s' % e)
            session.rollback()
            raise

        return sorted(map(unicode, _ids))

Example 2

Project: glance Source File: 037_add_changes_to_satisfy_models.py
def upgrade(migrate_engine):
    meta = sqlalchemy.MetaData()
    meta.bind = migrate_engine

    if migrate_engine.name not in ['mysql', 'postgresql']:
        return

    image_properties = Table('image_properties', meta, autoload=True)
    image_members = Table('image_members', meta, autoload=True)
    images = Table('images', meta, autoload=True)

    # We have to ensure that we doesn't have `nulls` values since we are going
    # to set nullable=False
    migrate_engine.execute(
        update(image_members)
        .where(image_members.c.status == sql.expression.null())
        .values(status='pending'))

    migrate_engine.execute(
        update(images)
        .where(images.c.protected == sql.expression.null())
        .values(protected=sql.expression.false()))

    image_members.c.status.alter(nullable=False, server_default='pending')
    images.c.protected.alter(
        nullable=False, server_default=sql.expression.false())

    if migrate_engine.name == 'postgresql':
        Index('ix_image_properties_image_id_name',
              image_properties.c.image_id,
              image_properties.c.name).drop()

        # We have different names of this constraint in different versions of
        # postgresql. Since we have only one constraint on this table, we can
        # get it in the following way.
        name = migrate_engine.execute(
            """SELECT conname
               FROM pg_constraint
               WHERE conrelid =
                   (SELECT oid
                    FROM pg_class
                    WHERE relname LIKE 'image_properties')
                  AND contype = 'u';""").scalar()

        constraint = UniqueConstraint(image_properties.c.image_id,
                                      image_properties.c.name,
                                      name='%s' % name)
        migrate_engine.execute(DropConstraint(constraint))

        constraint = UniqueConstraint(image_properties.c.image_id,
                                      image_properties.c.name,
                                      name='ix_image_properties_image_id_name')
        migrate_engine.execute(AddConstraint(constraint))

        images.c.id.alter(server_default=None)
    if migrate_engine.name == 'mysql':
        constraint = UniqueConstraint(image_properties.c.image_id,
                                      image_properties.c.name,
                                      name='image_id')
        migrate_engine.execute(DropConstraint(constraint))
        image_locations = Table('image_locations', meta, autoload=True)
        if len(image_locations.foreign_keys) == 0:
            migrate_engine.execute(AddConstraint(ForeignKeyConstraint(
                [image_locations.c.image_id], [images.c.id])))