system.osi.run_command

Here are the examples of the python api system.osi.run_command taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

85 Examples 7

Example 51

Project: rockstor-core Source File: btrfs.py
def pool_usage(mnt_pt):
    # @todo: remove temporary raid5/6 custom logic once fi usage
    # supports raid5/6.
    cmd = [BTRFS, 'fi', 'usage', '-b', mnt_pt]
    total = 0
    inuse = 0
    free = 0
    data_ratio = 1
    raid56 = False
    parity = 1
    disks = set()
    out, err, rc = run_command(cmd)
    for e in err:
        e = e.strip()
        if (re.match('WARNING: RAID56', e) is not None):
            raid56 = True

    for o in out:
        o = o.strip()
        if (raid56 is True and re.match('/dev/', o) is not None):
            disks.add(o.split()[0])
        elif (raid56 is True and re.match('Data,RAID', o) is not None):
            if (o[5:10] == 'RAID6'):
                parity = 2
        elif (re.match('Device size:', o) is not None):
            total = int(o.split()[2]) / 1024
        elif (re.match('Used:', o) is not None):
            inuse = int(o.split()[1]) / 1024
        elif (re.match('Free ', o) is not None):
            free = int(o.split()[2]) / 1024
        elif (re.match('Data ratio:', o) is not None):
            data_ratio = float(o.split()[2])
            if (data_ratio < 0.01):
                data_ratio = 0.01
    if (raid56 is True):
        num_disks = len(disks)
        if (num_disks > 0):
            per_disk = total / num_disks
            total = (num_disks - parity) * per_disk
    else:
        total = total / data_ratio
        inuse = inuse / data_ratio
    free = total - inuse
    return (total, inuse, free)

Example 52

Project: rockstor-core Source File: btrfs.py
Function: scrub_status
def scrub_status(pool):
    stats = {'status': 'unknown', }
    mnt_pt = mount_root(pool)
    out, err, rc = run_command([BTRFS, 'scrub', 'status', '-R', mnt_pt])
    if (len(out) > 1):
        if (re.search('running', out[1]) is not None):
            stats['status'] = 'running'
        elif (re.search('finished', out[1]) is not None):
            stats['status'] = 'finished'
            dfields = out[1].split()[-1].split(':')
            stats['duration'] = ((int(dfields[0]) * 60 * 60) +
                                 (int(dfields[1]) * 60) + int(dfields[2]))
        else:
            return stats
    else:
        return stats
    for l in out[2:-1]:
        fields = l.strip().split(': ')
        if (fields[0] == 'data_bytes_scrubbed'):
            stats['kb_scrubbed'] = int(fields[1]) / 1024
        else:
            stats[fields[0]] = int(fields[1])
    return stats

Example 53

Project: rockstor-core Source File: btrfs.py
@task()
def start_balance(mnt_pt, force=False, convert=None):
    cmd = ['btrfs', 'balance', 'start', mnt_pt]
    # TODO: Confirm -f is doing what is intended, man states for reducing
    # TODO: metadata from say raid1 to single.
    # With no filters we also get a warning that block some balances due to
    # expected long execution time, in this case "--full-balance" is required.
    # N.B. currently force in Web-UI does not mean force here.
    if (force):
        cmd.insert(3, '-f')
    if (convert is not None):
        cmd.insert(3, '-dconvert=%s' % convert)
        cmd.insert(3, '-mconvert=%s' % convert)
    else:
        # As we are running with no convert filters a warning and 10 second
        # countdown with ^C prompt will result unless we use "--full-balance".
        # This warning is now present in the Web-UI "Start a new balance"
        # button tooltip.
        cmd.insert(3, '--full-balance')
    run_command(cmd)

Example 54

Project: rockstor-core Source File: btrfs.py
Function: balance_status
def balance_status(pool):
    """
    Wrapper around btrfs balance status pool_mount_point to extract info about
    the current status of a balance.
    :param pool: pool object to query
    :return: dictionary containing parsed info about the balance status,
    ie indexed by 'status' and 'percent_done'.
    """
    stats = {'status': 'unknown', }
    mnt_pt = mount_root(pool)
    out, err, rc = run_command([BTRFS, 'balance', 'status', mnt_pt],
                               throw=False)
    if (len(out) > 0):
        if (re.match('Balance', out[0]) is not None):
            if (re.search('cancel requested', out[0]) is not None):
                stats['status'] = 'cancelling'
            elif (re.search('pause requested', out[0]) is not None):
                stats['status'] = 'pausing'
            elif (re.search('paused', out[0]) is not None):
                stats['status'] = 'paused'
            else:
                stats['status'] = 'running'
            # make sure we have a second line before parsing it.
            if ((len(out) > 1 and
                    re.search('chunks balanced', out[1]) is not None)):
                percent_left = out[1].split()[-2][:-1]
                try:
                    percent_left = int(percent_left)
                    stats['percent_done'] = 100 - percent_left
                except:
                    pass
        elif (re.match('No balance', out[0]) is not None):
            stats['status'] = 'finished'
            stats['percent_done'] = 100
    return stats

Example 55

Project: rockstor-core Source File: btrfs.py
Function: device_scan
def device_scan():
    return run_command([BTRFS, 'device', 'scan'])

Example 56

Project: rockstor-core Source File: btrfs.py
def btrfs_uuid(disk):
    """return uuid of a btrfs filesystem"""
    o, e, rc = run_command(
        [BTRFS, 'filesystem', 'show', '/dev/disk/by-id/%s' % disk])
    return o[0].split()[3]

Example 57

Project: rockstor-core Source File: btrfs.py
def set_property(mnt_pt, name, val, mount=True):
    if (mount is not True or is_mounted(mnt_pt)):
        cmd = [BTRFS, 'property', 'set', mnt_pt, name, val]
        return run_command(cmd)

Example 58

Project: rockstor-core Source File: btrfs.py
def get_snap(subvol_path, oldest=False, num_retain=None, regex=None):
    if (not os.path.isdir(subvol_path)): return None
    share_name = subvol_path.split('/')[-1]
    cmd = [BTRFS, 'subvol', 'list', '-o', subvol_path]
    o, e, rc = run_command(cmd)
    snaps = {}
    for l in o:
        fields = l.split()
        if (len(fields) > 0):
            snap_fields = fields[-1].split('/')
            if (len(snap_fields) != 3 or
                snap_fields[1] != share_name):
                #not the Share we are interested in.
                continue
            if (regex is not None and re.search(regex, snap_fields[2]) is None):
                #regex not in the name
                continue
            snaps[int(fields[1])] = snap_fields[2]
    snap_ids = sorted(snaps.keys())
    if (oldest):
        if(len(snap_ids) > num_retain):
            return snaps[snap_ids[0]]
    elif (len(snap_ids) > 0):
        return snaps[snap_ids[-1]]
    return None

Example 59

Project: rockstor-core Source File: bootstrap.py
Function: main
def main():

    try:
        device_scan()
    except Exception, e:
        print ('BTRFS device scan failed due to an exception. This indicates '
               'a serious problem. Aborting. Exception: %s' % e.__str__())
        sys.exit(1)
    print('BTRFS device scan complete')

    #if the appliance is not setup, there's nothing more to do beyond
    #device scan
    setup = Setup.objects.first()
    if (setup is None or setup.setup_user is False):
        print('Appliance is not yet setup.')
        return

    num_attempts = 0
    while True:
        try:
            aw = APIWrapper()
            aw.api_call('network')
            aw.api_call('commands/bootstrap', calltype='post')
            break
        except Exception, e:
            #Retry on every exception, primarily because of django-oauth related
            #code behaving unpredictably while setting tokens. Retrying is a
            #decent workaround for now(11302015).
            if (num_attempts > 15):
                print('Max attempts(15) reached. Connection errors persist. '
                      'Failed to bootstrap. Error: %s' % e.__str__())
                sys.exit(1)
            print('Exception occured while bootstrapping. This could be because '
                  'rockstor.service is still starting up. will wait 2 seconds '
                  'and try again. Exception: %s' % e.__str__())
            time.sleep(2)
            num_attempts += 1
    print('Bootstrapping complete')

    try:
        print('Running qgroup cleanup. %s' % QGROUP_CLEAN)
        run_command([QGROUP_CLEAN])
    except Exception, e:
        print('Exception while running %s: %s' % (QGROUP_CLEAN, e.__str__()))

    try:
        print('Running qgroup limit maxout. %s' % QGROUP_MAXOUT_LIMIT)
        run_command([QGROUP_MAXOUT_LIMIT])
    except Exception, e:
        print('Exception while running %s: %s' % (QGROUP_MAXOUT_LIMIT, e.__str__()))

Example 60

Project: rockstor-core Source File: flash_optimize.py
def is_flash(disk):
    flash = False
    o, e, rc = run_command(['udevadm', 'info', '--path=/sys/block/%s' % disk])
    for l in o:
        if (re.search('ID_BUS=', l) is not None):
            if (l.strip().split()[1].split('=')[1] != 'usb'):
                logging.debug('drive(%s) is not on usb bus. info: %s' % (disk, l))
                flash = flash and False
        if (re.search('ID_USB_DRIVER=usb-storage', l) is not None):
            logging.debug('usb-storage driver confirmed for %s' % disk)
            flash = flash or True
    logging.info('usb flash drive validation from udevadm: %s' % flash)
    #/sys/block/disk/queue/rotational is not reliable, but if
    #[deadline] is in /sys/block/disk/queue/scheduler, it's fair to assume flash
    logging.debug('Checking if scheduler is set to [deadline] for %s' % disk)
    with open('/sys/block/%s/queue/scheduler' % disk) as sfo:
        for l in sfo.readlines():
            if (re.search('\[deadline\]', l) is not None):
                logging.debug('scheduler: %s' % l)
                flash = flash and True
            else:
                flash = flash or False
                logging.debug('scheduler is not flash friendly. info: %s' % l)
    logging.info('flashiness of the drive(%s): %s' % (disk, flash))
    return flash

Example 61

Project: rockstor-core Source File: flash_optimize.py
def update_sysctl():
    logging.debug('updating %s' % SYSCTL_CONF)
    tuneups = ['vm.swappiness = 1',
               'vm.vfs_cache_pressure = 50',
               'vm.dirty_writeback_centisecs = 12000',
               'vm.dirty_expire_centisecs = 12000',
               'vm.dirty_ratio = 20',
               'vm.dirty_background_ratio = 1',]
    fo, npath = mkstemp()
    with open(SYSCTL_CONF) as sfo, open(npath, 'w') as tfo:
        for line in sfo.readlines():
            tfo.write(line)
            if (line.strip() in tuneups):
                tuneups.remove(line.strip())
        for t in tuneups:
            tfo.write('%s\n' % t)
    move(npath, SYSCTL_CONF)
    logging.info('moved %s to %s' % (npath, SYSCTL_CONF))
    o, e, rc = run_command(['/usr/sbin/sysctl', '-p'])
    logging.info('Successfully updated sysctl')
    logging.debug('sysctl -p out: %s err: %s' % (o, e))

Example 62

Project: rockstor-core Source File: flash_optimize.py
def main():
    loglevel = logging.INFO
    if (len(sys.argv) > 1 and sys.argv[1] == '-x'):
        loglevel = logging.DEBUG
    logging.basicConfig(format='%(asctime)s: %(message)s', level=loglevel)
    rd = root_disk()
    logging.debug('Root drive is %s' % rd)
    do_more = False
    if (trim_support(rd) is True):
        do_more = True
        logging.info('TRIM support is available for %s' % rd)
        fstrim_systemd()
        logging.debug('Finished setting up fstrim timer')
    do_more = do_more or is_flash(rd)

    if (do_more):
        update_sysctl()
        logging.info('updated sysctl')
        #emable tmpfs on /tmp
        tmpmnt = 'tmp.mount'
        systemctl(tmpmnt, 'enable')
        logging.info('enabled %s' % tmpmnt)
        systemctl(tmpmnt, 'start')
        logging.info('started %s' % tmpmnt)

        #mount stuff with noatime
        #add noatime to /, /home and /boot in /etc/fstab
        update_fstab()
        logging.info('updated fstab')
        for fs in ROOT_FS:
            run_command(['mount', fs, '-o', 'remount'])
            logging.info('remounted %s' % fs)

Example 63

Project: rockstor-core Source File: initrock.py
def delete_old_kernels(logging, num_retain=5):
    #Don't keep more than num_retain kernels
    o, e, rc = run_command([RPM, '-q', 'kernel-ml'])
    ml_kernels = o[:-1] #last entry is an empty string.
    ml_kernels = sorted(ml_kernels)
    #centos kernels, may or may not be installed.
    centos_kernels = []
    o, e, rc = run_command([RPM, '-q', 'kernel'], throw=False)
    if (rc == 0): centos_kernels = o[:-1]

    #Don't delete current running kernel
    #Don't delete current default kernel
    running_kernel = os.uname()[2]
    default_kernel = settings.SUPPORTED_KERNEL_VERSION
    deleted = 0
    for k in centos_kernels:
        kv = k.split('kernel-')[1]
        if (kv != running_kernel and
            kv != default_kernel):
            run_command([YUM, 'remove', '-y', k])
            deleted += 1
            logging.info('Deleted old Kernel: %s' % k)
    for i in range(len(centos_kernels) + len(ml_kernels) - deleted - num_retain):
        kv = ml_kernels[i].split('kernel-ml-')[1]
        if (kv != running_kernel and
            kv != default_kernel):
            run_command([YUM, 'remove', '-y', ml_kernels[i]])
            logging.info('Deleted old Kernel: %s' % ml_kernels[i])

Example 64

Project: rockstor-core Source File: initrock.py
def init_update_issue():
    from smart_manager.models import Service
    from storageadmin.models import NetworkConnection
    ipaddr = None
    so = Service.objects.get(name='rockstor')
    if (so.config is not None):
        config = json.loads(so.config)
        try:
            ipaddr = NetworkConnection.objects.get(name=config['network_interface']).ipaddr
        except NetworkConnection.DoesNotExist:
            pass
    if (ipaddr is None):
        default_if = None
        some_if = None

        o, e, c = run_command(['/usr/sbin/route'])
        for i in o:
            if (len(i.strip()) == 0):
                continue
            if (re.match('default', i) is not None):
                default_if = i.split()[-1]
            else:
                some_if = i.split()[-1]
        if (default_if is None):
            default_if = some_if
        if (default_if is not None):
            o2, e, c = run_command(['/usr/sbin/ifconfig', default_if])
            for i2 in o2:
                if (re.match('inet ', i2.strip()) is not None):
                    ipaddr = i2.split()[1]

    with open('/etc/issue', 'w') as ifo:
        if (ipaddr is None):
            ifo.write('The system does not yet have an ip address.\n')
            ifo.write('Rockstor cannot be configured using the web interface '
                        'without this.\n\n')
            ifo.write('Press Enter to receive updated network status\n')
            ifo.write('If this message persists please login as root and configure '
                      'your network using nmtui, then reboot.\n')
        else:
            ifo.write('\nRockstor is successfully installed.\n\n')
            ifo.write('You can access the web-ui by pointing your browser to '
                      'https://%s\n\n' % ipaddr)
    return ipaddr

Example 65

Project: rockstor-core Source File: initrock.py
def set_def_kernel(logger, version=settings.SUPPORTED_KERNEL_VERSION):
    supported_kernel_path = ('/boot/vmlinuz-%s' % version)
    if (not os.path.isfile(supported_kernel_path)):
        return logger.error('Supported kernel(%s) does not exist' %
                            supported_kernel_path)
    try:
        o, e, rc = run_command([GRUBBY, '--default-kernel'])
        if (o[0] == supported_kernel_path):
            return logging.info('Supported kernel(%s) is already the default' %
                                supported_kernel_path)
    except Exception, e:
        return logger.error('Exception while listing the default kernel: %s'
                            % e.__str__())

    try:
        run_command([GRUBBY, '--set-default=%s' % supported_kernel_path])
        return logger.info('Default kernel set to %s' % supported_kernel_path)
    except Exception, e:
        return logger.error('Exception while setting kernel(%s) as default: %s' %
                            (version, e.__str__()))

Example 66

Project: rockstor-core Source File: initrock.py
def main():
    loglevel = logging.INFO
    if (len(sys.argv) > 1 and sys.argv[1] == '-x'):
        loglevel = logging.DEBUG
    logging.basicConfig(format='%(asctime)s: %(message)s', level=loglevel)
    set_def_kernel(logging)
    try:
        delete_old_kernels(logging)
    except Exception, e:
        logging.debug('Exception while deleting old kernels. Soft error. Moving on.')
        logging.exception(e)

    cert_loc = '%s/certs/' % BASE_DIR
    if (os.path.isdir(cert_loc)):
        if (not os.path.isfile('%s/rockstor.cert' % cert_loc) or
            not os.path.isfile('%s/rockstor.key' % cert_loc)):
            shutil.rmtree(cert_loc)

    if (not os.path.isdir(cert_loc)):
        os.mkdir(cert_loc)
        dn = ("/C=US/ST=Rockstor user's state/L=Rockstor user's "
              "city/O=Rockstor user/OU=Rockstor dept/CN=rockstor.user")
        logging.info('Creating openssl cert...')
        run_command([OPENSSL, 'req', '-nodes', '-newkey', 'rsa:2048',
                     '-keyout', '%s/first.key' % cert_loc, '-out',
                     '%s/rockstor.csr' % cert_loc, '-subj', dn])
        logging.debug('openssl cert created')
        logging.info('Creating rockstor key...')
        run_command([OPENSSL, 'rsa', '-in', '%s/first.key' % cert_loc, '-out',
                     '%s/rockstor.key' % cert_loc])
        logging.debug('rockstor key created')
        logging.info('Singing cert with rockstor key...')
        run_command([OPENSSL, 'x509', '-in', '%s/rockstor.csr' % cert_loc,
                     '-out', '%s/rockstor.cert' % cert_loc, '-req', '-signkey',
                     '%s/rockstor.key' % cert_loc, '-days', '3650'])
        logging.debug('cert signed.')
        logging.info('restarting nginx...')
        run_command([SUPERCTL, 'restart', 'nginx'])

    cleanup_rclocal(logging)
    logging.info('Checking for flash and Running flash optimizations if appropriate.')
    run_command([FLASH_OPTIMIZE, '-x'], throw=False)
    tz_updated = False
    try:
        logging.info('Updating the timezone from the system')
        tz_updated = update_tz(logging)
    except Exception, e:
        logging.error('Exception while updating timezone: %s' % e.__str__())
        logging.exception(e)

    try:
        logging.info('Updating sshd_config')
        bootstrap_sshd_config(logging)
    except Exception, e:
        logging.error('Exception while updating sshd_config: %s' % e.__str__())

    if (not os.path.isfile(STAMP)):
        logging.info('Please be patient. This script could take a few minutes')
        shutil.copyfile('%s/conf/django-hack' % BASE_DIR,
                        '%s/django' % BASE_BIN)
        run_command([SYSCTL, 'enable', 'postgresql'])
        logging.debug('Progresql enabled')
        shutil.rmtree('/var/lib/pgsql/data')
        logging.info('initializing Postgresql...')
        run_command(['/usr/bin/postgresql-setup', 'initdb'])
        logging.info('Done.')
        run_command([SYSCTL, 'restart', 'postgresql'])
        run_command([SYSCTL, 'status', 'postgresql'])
        logging.debug('Postgresql restarted')
        logging.info('Creating app databases...')
        run_command(['su', '-', 'postgres', '-c', '/usr/bin/createdb smartdb'])
        logging.debug('smartdb created')
        run_command(['su', '-', 'postgres', '-c',
                     '/usr/bin/createdb storageadmin'])
        logging.debug('storageadmin created')
        logging.info('Done')
        logging.info('Initializing app databases...')
        run_command(['su', '-', 'postgres', '-c', "psql -c \"CREATE ROLE rocky WITH SUPERUSER LOGIN PASSWORD 'rocky'\""])
        logging.debug('rocky ROLE created')
        run_command(['su', '-', 'postgres', '-c', "psql storageadmin -f %s/conf/storageadmin.sql.in" % BASE_DIR])
        logging.debug('storageadmin app database loaded')
        run_command(['su', '-', 'postgres', '-c', "psql smartdb -f %s/conf/smartdb.sql.in" % BASE_DIR])
        logging.debug('smartdb app database loaded')
        run_command(['su', '-', 'postgres', '-c', "psql storageadmin -c \"select setval('south_migrationhistory_id_seq', (select max(id) from south_migrationhistory))\""])
        logging.debug('storageadmin migration history copied')
        run_command(['su', '-', 'postgres', '-c', "psql smartdb -c \"select setval('south_migrationhistory_id_seq', (select max(id) from south_migrationhistory))\""])
        logging.debug('smartdb migration history copied')
        logging.info('Done')
        run_command(['cp', '-f', '%s/conf/postgresql.conf' % BASE_DIR,
                     '/var/lib/pgsql/data/'])
        logging.debug('postgresql.conf copied')
        run_command(['cp', '-f', '%s/conf/pg_hba.conf' % BASE_DIR,
                     '/var/lib/pgsql/data/'])
        logging.debug('pg_hba.conf copied')
        run_command([SYSCTL, 'restart', 'postgresql'])
        logging.info('Postgresql restarted')
        logging.info('Running app database migrations...')
        run_command([DJANGO, 'migrate', 'oauth2_provider', '--database=default',
                     '--noinput'])
        run_command([DJANGO, 'migrate', 'storageadmin', '--database=default',
                     '--noinput'])
        logging.debug('storageadmin migrated')
        run_command([DJANGO, 'migrate', 'django_ztask', '--database=default',
                     '--noinput'])
        logging.debug('django_ztask migrated')
        run_command([DJANGO, 'migrate', 'smart_manager',
                     '--database=smart_manager', '--noinput'])
        logging.debug('smart manager migrated')
        logging.info('Done')
        logging.info('Running prepdb...')
        run_command([PREP_DB, ])
        logging.info('Done')
        run_command(['touch', STAMP])
        require_postgres(logging)
        logging.info('Done')
    else:
        logging.info('Running prepdb...')
        run_command([PREP_DB, ])


    logging.info('stopping firewalld...')
    run_command([SYSCTL, 'stop', 'firewalld'])
    run_command([SYSCTL, 'disable', 'firewalld'])
    logging.info('firewalld stopped and disabled')
    update_nginx(logging)
    try:
        #downgrading python is a stopgap until it's fixed in upstream.
        downgrade_python(logging)
    except Exception, e:
        logging.error('Exception while downgrading python: %s' % e.__str__())
        logging.exception(e)

    shutil.copyfile('/etc/issue', '/etc/issue.rockstor')
    for i in range(30):
        try:
            if (init_update_issue() is not None):
                # init_update_issue() didn't cause an exception and did return
                # an ip so we break out of the multi try loop as we are done.
                break
            else:
                # execute except block with message so we can try again.
                raise Exception('default interface IP not yet configured')
        except Exception, e:
            # only executed if there is an actual exception with
            # init_update_issue() or if it returns None so we can try again
            # regardless as in both instances we may succeed on another try.
            logging.debug('Exception occurred while running update_issue: %s. '
                         'Trying again after 2 seconds.' % e.__str__())
            if (i > 28):
                logging.error('Waited too long and tried too many times. '
                              'Quiting.')
                raise e
            time.sleep(2)

    enable_rockstor_service(logging)
    enable_bootstrap_service(logging)

Example 67

Project: rockstor-core Source File: qgroup_clean.py
Function: main
def main():
    for p in Pool.objects.all():
        try:
            print('Processing pool(%s)' % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, 'subvol', 'list', mnt_pt])
            subvol_ids = []
            for l in o:
                if (re.match('ID ', l) is not None):
                    subvol_ids.append(l.split()[1])

            o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], throw=False)
            if (rc != 0):
                print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if (re.match('0/', l) is not None):
                    q = l.split()[0].split('/')[1]
                    if (q == '5'):
                        continue
                    qgroup_ids.append(l.split()[0].split('/')[1])

            for q in qgroup_ids:
                if (q not in subvol_ids):
                    print('qgroup %s not in use. deleting' % q)
                    run_command([BTRFS, 'qgroup', 'destroy', '0/%s' % q, mnt_pt])
                else:
                    print('qgroup %s is in use. Moving on.' % q)
            print('Finished processing pool(%s)' % p.name)
        except Exception, e:
            print ('Exception while qgroup-cleanup of Pool(%s): %s' %
                   (p.name, e.__str__()))

Example 68

Project: rockstor-core Source File: qgroup_maxout_limit.py
Function: main
def main():
    for p in Pool.objects.all():
        try:
            print('Processing pool(%s)' % p.name)
            mnt_pt = mount_root(p)
            o, e, rc = run_command([BTRFS, 'qgroup', 'show', '-p', mnt_pt],
                                   throw=False)
            if (rc != 0):
                print('Quotas not enabled on pool(%s). Skipping it.' % p.name)
                continue

            qgroup_ids = []
            for l in o:
                if (re.match('qgroupid', l) is not None or
                    re.match('-------', l) is not None):
                    continue
                cols = l.strip().split()
                if (len(cols) != 4):
                    print('Ignoring unexcepted line(%s).' % l)
                    continue
                if (cols[3] == '---'):
                    print('No parent qgroup for %s' % l)
                    continue
                qgroup_ids.append(cols[3])

            for q in qgroup_ids:
                print('relaxing the limit on qgroup %s' % q)
                run_command([BTRFS, 'qgroup', 'limit', 'none', q, mnt_pt])

            print('Finished processing pool(%s)' % p.name)
        except Exception, e:
            print ('Exception while qgroup-maxout of Pool(%s): %s' %
                   (p.name, e.__str__()))

Example 69

Project: rockstor-core Source File: qgroup_test.py
def fill_up_share(pname, sname, chunk=(1024 * 1024 * 2)):
    so = Share.objects.get(name=sname)
    rusage, eusage = share_usage(so.pool, so.qgroup)
    print ('Writing to Share(%s) until quota is exceeded.' % sname)
    print ('Share(%s) Size: %d Usage: %d' % (sname, so.size, rusage))
    spath = '/mnt2/%s/%s' % (pname, sname)
    file_indices = sorted([int(f.split('-')[1]) for f in os.listdir(spath)], reverse=True)
    counter = 0
    if (len(file_indices) > 0):
        counter = file_indices[0] + 1
    quota_exceeded = False
    while (not quota_exceeded):
        fname = '%s/file-%d' % (spath, counter)
        one_mb = 's' * chunk
        try:
            with open(fname, 'w') as ofo:
                for i in range(100):
                    ofo.write(one_mb)
        except IOError, e:
            if (re.search('Disk quota exceeded', e.__str__()) is not None):
                print (e.__str__())
                quota_exceeded = True
            else:
                raise e

        run_command(['/usr/bin/sync'])
        rusage, eusage = share_usage(so.pool, so.qgroup)
        print ('Share(%s) Size: %d Usage: %d' % (sname, so.size, rusage))
        counter += 1

Example 70

Project: rockstor-core Source File: rockon_delete.py
def delete_rockon():
    try:
        name = sys.argv[1]
    except IndexError:
        sys.exit('Delete metadata, containers and images of a Rock-on\n\tUsage: %s <rockon name>' % sys.argv[0])

    try:
        ro = RockOn.objects.get(name=name)
    except RockOn.DoesNotExist:
        sys.exit('Rock-On(%s) does not exist' % name)

    for c in DContainer.objects.filter(rockon=ro).order_by('-launch_order'):
        #We don't throw any exceptions because we want to ensure metadata is
        #deleted for sure. It would be nice to fully delete containers and
        #images, but that's not a hard requirement.
        run_command([DOCKER, 'stop', c.name], throw=False)
        run_command([DOCKER, 'rm', c.name], throw=False)
        run_command([DOCKER, 'rmi', c.dimage.name], throw=False)

    ro.delete()
    print('Rock-On(%s) metadata in the db is deleted' % name)

Example 71

Project: rockstor-core Source File: receiver.py
    def run(self):
        logger.debug('Id: %s. Starting a new Receiver for meta: %s' % (self.identity, self.meta))
        self.msg = ('Top level exception in receiver')
        latest_snap = None
        with self._clean_exit_handler():
            self.law = APIWrapper()
            self.poll = zmq.Poller()
            self.dealer = self.ctx.socket(zmq.DEALER)
            self.dealer.setsockopt_string(zmq.IDENTITY, u'%s' % self.identity)
            self.dealer.set_hwm(10)
            self.dealer.connect('ipc://%s' % settings.REPLICATION.get('ipc_socket'))
            self.poll.register(self.dealer, zmq.POLLIN)

            self.ack = True
            self.msg = ('Failed to get the sender ip for appliance: %s' % self.sender_id)
            self.sender_ip = Appliance.objects.get(uuid=self.sender_id).ip

            if (not self.incremental):
                self.msg = ('Failed to verify/create share: %s.' % self.sname)
                self.create_share(self.sname, self.dest_pool)

                self.msg = ('Failed to create the replica metadata object '
                            'for share: %s.' % self.sname)
                data = {'share': self.sname,
                        'appliance': self.sender_ip,
                        'src_share': self.src_share, }
                self.rid = self.create_rshare(data)
            else:
                self.msg = ('Failed to retreive the replica metadata object for '
                            'share: %s.' % self.sname)
                rso = ReplicaShare.objects.get(share=self.sname)
                self.rid = rso.id
                #Find and send the current snapshot to the sender. This will
                #be used as the start by btrfs-send diff.
                self.msg = ('Failed to verify latest replication snapshot on the system.')
                latest_snap = self._latest_snap(rso)

            self.msg = ('Failed to create receive trail for rid: %d' % self.rid)
            data = {'snap_name': self.snap_name, }
            self.rtid = self.create_receive_trail(self.rid, data)

            #delete the share, move the oldest snap to share
            self.msg = ('Failed to promote the oldest Snapshot to Share.')
            oldest_snap = get_oldest_snap(self.snap_dir, self.num_retain_snaps, regex='_replication_')
            if (oldest_snap is not None):
                snap_path = ('%s/%s' % (self.snap_dir, oldest_snap))
                share_path = ('%s%s/%s' %
                              (settings.MNT_PT, self.dest_pool,
                               self.sname))
                pool = Pool.objects.get(name=self.dest_pool)
                remove_share(pool, self.sname, '-1/-1')
                set_property(snap_path, 'ro', 'false',
                             mount=False)
                run_command(['/usr/bin/rm', '-rf', share_path],
                            throw=False)
                shutil.move(snap_path, share_path)
                self.delete_snapshot(self.sname, oldest_snap)

            self.msg = ('Failed to prune old Snapshots')
            self._delete_old_snaps(self.sname, self.snap_dir, self.num_retain_snaps + 1)

            self.msg = ('Failed to validate the source share(%s) on sender(uuid: %s '
                        ') Did the ip of the sender change?' %
                        (self.src_share, self.sender_id))
            self.validate_src_share(self.sender_id, self.src_share)

            sub_vol = ('%s%s/%s' % (settings.MNT_PT, self.dest_pool, self.sname))
            if (not is_subvol(sub_vol)):
                self.msg = ('Failed to create parent subvolume %s' % sub_vol)
                run_command([BTRFS, 'subvolume', 'create', sub_vol])

            self.msg = ('Failed to create snapshot directory: %s' % self.snap_dir)
            run_command(['/usr/bin/mkdir', '-p', self.snap_dir])
            snap_fp = ('%s/%s' % (self.snap_dir, self.snap_name))

            #If the snapshot already exists, presumably from the previous attempt and
            #the sender tries to send the same, reply back with snap_exists and do not
            #start the btrfs-receive
            if (is_subvol(snap_fp)):
                logger.debug('Id: %s. Snapshot to be sent(%s) already exists. Not '
                             'starting a new receive process' % (self.identity, snap_fp))
                self._send_recv('snap-exists')
                self._sys_exit(0)

            cmd = [BTRFS, 'receive', self.snap_dir]
            self.msg = ('Failed to start the low level btrfs receive command(%s)'
                        '. Aborting.' % cmd)
            self.rp = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)

            self.msg = ('Failed to send receiver-ready')
            rcommand, rmsg = self._send_recv('receiver-ready', latest_snap or '')
            if (rcommand is None):
                logger.error('Id: %s. No response from the broker for '
                             'receiver-ready command. Aborting.' % self.identity)
                self._sys_exit(3)

            term_commands = ('btrfs-send-init-error', 'btrfs-send-unexpected-termination-error',
                             'btrfs-send-nonzero-termination-error',)
            num_tries = 10
            poll_interval = 6000 # 6 seconds
            num_msgs = 0
            t0 = time.time()
            while (True):
                socks = dict(self.poll.poll(poll_interval))
                if (socks.get(self.dealer) == zmq.POLLIN):
                    #reset to wait upto 60(poll_interval x num_tries milliseconds) for every message
                    num_tries = 10
                    command, message = self.dealer.recv_multipart()
                    if (command == 'btrfs-send-stream-finished'):
                        #this command concludes fsdata transfer. After this, btrfs-recev
                        #process should be terminated(.communicate).
                        if (self.rp.poll() is None):
                            self.msg = ('Failed to terminate btrfs-recv command')
                            out, err = self.rp.communicate()
                            out = out.split('\n')
                            err = err.split('\n')
                            logger.debug('Id: %s. Terminated btrfs-recv. cmd = %s '
                                         'out = %s err: %s rc: %s' %
                                         (self.identity, cmd, out, err, self.rp.returncode))
                        if (self.rp.returncode != 0):
                            self.msg = ('btrfs-recv exited with unexpected exitcode(%s). ' % self.rp.returncode)
                            raise Exception(self.msg)
                        self._send_recv('btrfs-recv-finished')
                        self.refresh_share_state()
                        self.refresh_snapshot_state()

                        self.msg = ('Failed to update receive trail for rtid: %d' % self.rtid)
                        self.update_receive_trail(self.rtid, {'status': 'succeeded',})
                        dsize, drate = self.size_report(self.total_bytes_received, t0)
                        logger.debug('Id: %s. Receive complete. Total data '
                                     'transferred: %s. Rate: %s/sec.' %
                                     (self.identity, dsize, drate))
                        self._sys_exit(0)

                    if (command in term_commands):
                        self.msg = ('Terminal command(%s) received from the sender. Aborting.' % command)
                        raise Exception(self.msg)

                    if (self.rp.poll() is None):
                        self.rp.stdin.write(message)
                        self.rp.stdin.flush()
                        #@todo: implement advanced credit request system.
                        self.dealer.send_multipart([b'send-more', ''])
                        num_msgs += 1
                        self.total_bytes_received += len(message)
                        if (num_msgs == 1000):
                            num_msgs = 0
                            dsize, drate = self.size_report(self.total_bytes_received, t0)
                            logger.debug('Id: %s. Receiver alive. Data '
                                         'transferred: %s. Rate: %s/sec.' %
                                         (self.identity, dsize, drate))
                    else:
                        out, err = self.rp.communicate()
                        out = out.split('\n')
                        err = err.split('\n')
                        logger.error('Id: %s. btrfs-recv died unexpectedly. cmd: %s out: %s. err: %s' %
                                     (self.identity, cmd, out, err))
                        msg = ('Low level system error from btrfs receive '
                               'command. cmd: %s out: %s err: %s for rtid: %s'
                               % (cmd, out, err, self.rtid))
                        data = {'status': 'failed',
                                'error': msg, }
                        self.msg = ('Failed to update receive trail for rtid: %d.' % self.rtid)
                        self.update_receive_trail(self.rtid, data)
                        self.msg = msg
                        raise Exception(self.msg)
                else:
                    num_tries -= 1
                    msg = ('No response received from the broker. '
                           'remaining tries: %d' % num_tries)
                    logger.error('Id: %s. %s' % (self.identity, msg))
                    if (num_tries == 0):
                        self.msg = ('%s. Terminating the receiver.' % msg)
                        raise Exception(self.msg)

Example 72

Project: rockstor-core Source File: active_directory.py
    @transaction.atomic
    def post(self, request, command):

        with self._handle_exception(request):
            method = 'winbind'
            service = Service.objects.get(name='active-directory')
            if (command == 'config'):
                config = request.data.get('config')
                self._validate_config(config, request)

                #1. Name resolution check
                self._resolve_check(config.get('domain'), request)

                #2. realm discover check?
                #@todo: phase our realm and just use net?
                domain = config.get('domain')
                try:
                    cmd = ['realm', 'discover', '--name-only', domain]
                    o, e, rc = run_command(cmd)
                except Exception, e:
                    e_msg = ('Failed to discover the given(%s) AD domain. '
                             'Error: %s' % (domain, e.__str__()))
                    handle_exception(Exception(e_msg), request)

                default_range = '10000 - 999999'
                idmap_range = config.get('idmap_range', '10000 - 999999')
                idmap_range = idmap_range.strip()
                if (len(idmap_range) > 0):
                    rfields = idmap_range.split()
                    if (len(rfields) != 3):
                        raise Exception('Invalid idmap range. valid format is '
                                        'two integers separated by a -. eg: '
                                        '10000 - 999999')
                    try:
                        rlow = int(rfields[0].strip())
                        rhigh = int(rfields[2].strip())
                    except Exception, e:
                        raise Exception('Invalid idmap range. Numbers in the '
                                        'range must be valid integers. '
                                        'Error: %s.' % e.__str__())
                    if (rlow >= rhigh):
                        raise Exception('Invalid idmap range. Numbers in the '
                                        'range must go from low to high. eg: '
                                        '10000 - 999999')
                else:
                    config['idmap_range'] = default_range

                self._save_config(service, config)

            elif (command == 'start'):
                config = self._config(service, request)
                smbo = Service.objects.get(name='smb')
                smb_config = self._get_config(smbo)
                domain = config.get('domain')
                #1. make sure ntpd is running, or else, don't start.
                self._ntp_check(request)
                #2. Name resolution check?
                self._resolve_check(config.get('domain'), request)

                if (method == 'winbind'):
                    cmd = ['/usr/sbin/authconfig', ]
                    #nss
                    cmd += ['--enablewinbind', '--enablewins',]
                    #pam
                    cmd += ['--enablewinbindauth',]
                    #smb
                    cmd += ['--smbsecurity', 'ads', '--smbrealm', domain.upper(),]
                    #kerberos
                    cmd += ['--krb5realm=%s' % domain.upper(),]
                    #winbind
                    cmd += ['--enablewinbindoffline', '--enablewinbindkrb5',
                            '--winbindtemplateshell=/bin/sh',]
                    #general
                    cmd += ['--update', '--enablelocauthorize',]
                    run_command(cmd)
                config['workgroup'] = self._domain_workgroup(domain, method=method)
                self._save_config(service, config)
                update_global_config(smb_config, config)
                self._join_domain(config, method=method)
                if (method == 'sssd' and config.get('enumerate') is True):
                    self._update_sssd(domain)

                if (method == 'winbind'):
                    systemctl('winbind', 'enable')
                    systemctl('winbind', 'start')
                systemctl('smb', 'restart')
                systemctl('nmb', 'restart')

            elif (command == 'stop'):
                config = self._config(service, request)
                try:
                    self._leave_domain(config, method=method)
                    smbo = Service.objects.get(name='smb')
                    smb_config = self._get_config(smbo)
                    update_global_config(smb_config)
                    systemctl('smb', 'restart')
                    systemctl('nmb', 'restart')
                except Exception, e:
                    e_msg = ('Failed to leave AD domain(%s). Error: %s' %
                             (config.get('domain'), e.__str__()))
                    handle_exception(Exception(e_msg), request)

            return Response()

Example 73

Project: rockstor-core Source File: ntp_service.py
Function: post
    @transaction.atomic
    def post(self, request, command):
        """
        execute a command on the service
        """
        service = Service.objects.get(name='ntpd')
        if (command == 'config'):
            e_msg = ('Invalid input for time servers. It must be '
                     'comma separated string of hostnames or IPs.')
            with self._handle_exception(request, e_msg):
                config = request.data['config']
                servers = [s.strip() for s in config['server'].split(',')]

            e_msg = ('Error while saving configuration(%s) to the '
                     'database' % config)
            with self._handle_exception(request, e_msg):
                self._save_config(service, config)

            e_msg = ('Error while validating time servers(%s). Check your '
                     'input and try again.' % config['server'])
            with self._handle_exception(request, e_msg):
                self._switch_ntpd('stop')
                cmd = [settings.COMMANDS['ntpdate']] + servers
                out, err, rc = run_command(cmd)
                if (rc != 0):
                    e_msg = ('Unable to sync time with given servers(%s)' %
                             config['server'])
                    handle_exception(Exception(e_msg), request)

            e_msg = ('Error while saving time server(%s) configuration. Try'
                     ' again' % servers)
            with self._handle_exception(request, e_msg):
                self._update_ntp_conf(servers)
                self._switch_ntpd('start')
        else:
            e_msg = ('Failed to %s NTP service due to system error.' %
                     command)
            with self._handle_exception(request, e_msg):
                self._switch_ntpd(command)
        return Response()

Example 74

Project: rockstor-core Source File: config_backup.py
Function: md5sum
    @staticmethod
    def _md5sum(fp):
        return run_command(['/usr/bin/md5sum', fp])[0][0].split()[0]

Example 75

Project: rockstor-core Source File: config_backup.py
Function: post
    @transaction.atomic
    def post(self, request):
        models = {'storageadmin':
                  ['user', 'group', 'sambashare', 'netatalkshare', 'nfsexport',
                   'nfsexportgroup', 'advancednfsexport', ],
                  'smart_manager':
                  ['service', ], }
        model_list = []
        for a in models:
            for m in models[a]:
                model_list.append('%s.%s' % (a, m))
        logger.debug('model list = %s' % model_list)
        with self._handle_exception(request):
            filename = ('backup-%s.json' % datetime.now().strftime('%Y-%m-%d-%H%M%S'))
            if (not os.path.isdir(self.cb_dir)):
                os.mkdir(self.cb_dir)
            fp = os.path.join(self.cb_dir, filename)
            with open(fp, 'w') as dfo:
                call_command('dumpdata', *model_list, stdout=dfo)
                dfo.write('\n')
                call_command('dumpdata', database='smart_manager', *model_list, stdout=dfo)
            run_command(['/usr/bin/gzip', fp])
            gz_name = ('%s.gz' % filename)
            fp = os.path.join(self.cb_dir, gz_name)
            md5sum = self._md5sum(fp)
            size = os.stat(fp).st_size
            cbo = ConfigBackup(filename=gz_name, md5sum=md5sum, size=size)
            cbo.save()
            return Response(ConfigBackupSerializer(cbo).data)

Example 76

Project: rockstor-core Source File: rockon_discourse.py
def discourse_install(rockon):
    #1. install git
    git = '/usr/bin/git'
    if (not os.path.isfile(git)):
        install_pkg('git')

    #2. prep Discourse.yml
    repo = discourse_repo(rockon)
    if (not os.path.isdir(repo)):
        run_command([git, 'clone',
                     'https://github.com/discourse/discourse_docker.git',
                     repo])

    co = DContainer.objects.get(rockon=rockon)
    po = DPort.objects.get(container=co)
    cc_map = {}
    for cco in DCustomConfig.objects.filter(rockon=rockon):
        cc_map[cco.key] = cco.val
    mem = int((psutil.virtual_memory().total / (1024 * 1024)) * .25)

    fo, npath = mkstemp()
    src_yml = '%s/samples/standalone.yml' % repo
    dst_yml = '%s/containers/%s.yml' % (repo, rockon.name.lower())
    with open(src_yml) as sfo, open(npath, 'w') as tfo:
        for line in sfo.readlines():
            if (re.match('  - "80:80"', line) is not None):
                tfo.write('  - "%d:80"\n' % po.hostp)
            elif (re.match('  #db_shared_buffers:', line) is not None):
                tfo.write('  db_shared_buffers: "%dMB"\n' % mem)
            elif (re.match('  #UNICORN_WORKERS:', line) is not None):
                tfo.write('  UNICORN_WORKERS: 3\n')
            elif (re.match('  DISCOURSE_DEVELOPER_EMAILS:', line) is not None):
                tfo.write("  DISCOURSE_DEVELOPER_EMAILS: '%s'\n" % cc_map['admin-email'])
            elif (re.match('  DISCOURSE_HOSTNAME:', line) is not None):
                tfo.write("  DISCOURSE_HOSTNAME: '%s'\n" % cc_map['hostname'])
            elif (re.match('  DISCOURSE_SMTP_ADDRESS:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_ADDRESS: %s\n' % cc_map['smtp-address'])
            elif (re.match('  #DISCOURSE_SMTP_PORT:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_PORT: %s\n' % cc_map['smtp-port'])
            elif (re.match('  #DISCOURSE_SMTP_USER_NAME:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_USER_NAME: %s\n' % cc_map['smtp-username'])
            elif (re.match('  #DISCOURSE_SMTP_PASSWORD:', line) is not None):
                tfo.write('  DISCOURSE_SMTP_PASSWORD: %s\n' % cc_map['smtp-password'])
            elif (re.match('      host: /var/discourse/shared/standalone', line) is not None):
                tfo.write('      host: %s/shares/standalone\n' % repo)
            elif (re.match('      host: /var/discourse/shared/standalone/log/var-log', line) is not None):
                tfo.write('      host: %s/shared/standalone/log/var-log\n' % repo)
            else:
                tfo.write(line)
    move(npath, dst_yml)

    #3. bootstrap: launcher bootstrap app
    run_command(['%s/launcher' % repo, 'bootstrap', rockon.name.lower()])

    #4. start: launcher start app
    run_command(['%s/launcher' % repo, 'start', rockon.name.lower()])

Example 77

Project: rockstor-core Source File: rockon_discourse.py
def discourse_uninstall(rockon):
    repo = discourse_repo(rockon)
    if (os.path.isdir(repo)):
        run_command(['%s/launcher' % repo, 'destroy', rockon.name.lower()])
    return run_command(['/usr/bin/rm', '-rf', repo])

Example 78

Project: rockstor-core Source File: rockon_discourse.py
def discourse_stop(rockon):
    repo = discourse_repo(rockon)
    return run_command(['%s/launcher' % repo, 'stop', rockon.name.lower()])

Example 79

Project: rockstor-core Source File: rockon_discourse.py
def discourse_start(rockon):
    repo = discourse_repo(rockon)
    return run_command(['%s/launcher' % repo, 'start', rockon.name.lower()])

Example 80

Project: rockstor-core Source File: rockon_helpers.py
Function: rm_container
def rm_container(name):
    o, e, rc = run_command([DOCKER, 'stop', name], throw=False)
    o, e, rc = run_command([DOCKER, 'rm', name], throw=False)
    return logger.debug('Attempted to remove a container(%s). out: %s '
                        'err: %s rc: %s.' %  (name, o, e, rc))

Example 81

Project: rockstor-core Source File: rockon_helpers.py
def openvpn_install(rockon):
    #volume container
    vol_co = DContainer.objects.get(rockon=rockon, launch_order=1)
    volc_cmd = list(DCMD) + ['--name', vol_co.name,]
    volc_cmd.extend(container_ops(vol_co))
    volc_cmd.append(vol_co.dimage.name)
    run_command(volc_cmd)
    #initialize vol container data
    cco = DCustomConfig.objects.get(rockon=rockon)
    oc = DContainer.objects.get(rockon=rockon, launch_order=2)
    dinit_cmd = list(DCMD) + ['--rm',]
    dinit_cmd.extend(container_ops(oc))
    dinit_cmd.extend([oc.dimage.name, 'ovpn_genconfig', '-u', 'udp://%s' % cco.val, ])
    run_command(dinit_cmd)
    #start the server
    server_cmd = list(DCMD2) + ['--name', oc.name,]
    server_cmd.extend(container_ops(oc))
    server_cmd.extend(port_ops(oc))
    server_cmd.append(oc.dimage.name)
    run_command(server_cmd)

Example 82

Project: rockstor-core Source File: rockon_helpers.py
def owncloud_install(rockon):
    for c in DContainer.objects.filter(rockon=rockon).order_by('launch_order'):
        rm_container(c.name)
        cmd = list(DCMD2) + ['--name', c.name, ]
        db_user = DCustomConfig.objects.get(rockon=rockon, key='db_user').val
        db_pw = DCustomConfig.objects.get(rockon=rockon, key='db_pw').val
        if (c.dimage.name == 'postgres'):
            #change permissions on the db volume to 700
            vo = DVolume.objects.get(container=c)
            share_mnt = ('%s%s' % (settings.MNT_PT, vo.share.name))
            run_command(['/usr/bin/chmod', '700', share_mnt])
            cmd.extend(['-e', 'POSTGRES_USER=%s' % db_user, '-e',
                        'POSTGRES_PASSWORD=%s' % db_pw])
        cmd.extend(port_ops(c))
        for lo in DContainerLink.objects.filter(destination=c):
            cmd.extend(['--link', '%s:%s' % (lo.source.name, lo.name)])
        cmd.extend(vol_ops(c))
        if (c.name == 'owncloud'):
            cmd.extend(['-v', '%s/rockstor.key:/etc/ssl/private/owncloud.key' % settings.CERTDIR,
                        '-v', '%s/rockstor.cert:/etc/ssl/certs/owncloud.crt' % settings.CERTDIR,
                        '-e', 'HTTPS_ENABLED=true'])
            cmd.extend(['-e', 'DB_USER=%s' % db_user, '-e', 'DB_PASS=%s' % db_pw,])
        cmd.append(c.dimage.name)
        logger.debug('docker cmd = %s' % cmd)
        run_command(cmd)
        if (c.dimage.name == 'postgres'):
            #make sure postgres is setup
            cur_wait = 0;
            while (True):
                o, e, rc = run_command([DOCKER, 'exec', c.name, 'psql', '-U',
                                        'postgres', '-c', "\l"], throw=False)
                if (rc == 0):
                    break
                if (cur_wait > 300):
                    logger.error('Waited too long(300 seconds) for '
                                 'postgres to initialize for owncloud. giving up.')
                    break
                time.sleep(1)
                cur_wait += 1

Example 83

Project: rockstor-core Source File: rockon_utils.py
Function: container_status
def container_status(name):
    state = 'unknown_error'
    try:
        o, e, rc = run_command([DOCKER, 'inspect', '-f',
                                '{{range $key, $value := .State}}{{$key}}:{{$value}},{{ end }}', name])
        state_d = {}
        for i in o[0].split(','):
            fields = i.split(':')
            if (len(fields) >= 2):
                state_d[fields[0]] = ':'.join(fields[1:])
        if ('Running' in state_d):
            if (state_d['Running'] == 'true'):
                state = 'started'
            else:
                state = 'stopped'
                if ('Error' in state_d and 'ExitCode' in state_d):
                    exitcode = int(state_d['ExitCode'])
                    if (exitcode != 0):
                        state = 'exitcode: %d error: %s' % (exitcode, state_d['Error'])
        return state
    except Exception, e:
        logger.exception(e)
    finally:
        return state

Example 84

Project: rockstor-core Source File: tls_certificate.py
Function: post
    @transaction.atomic
    def post(self, request):
        with self._handle_exception(request):
            name = request.data.get('name')
            cert = request.data.get('cert')
            key = request.data.get('key')
            TLSCertificate.objects.filter().exclude(name=name).delete()
            co, created = TLSCertificate.objects.get_or_create(name=name, defaults={'certificate': cert, 'key': key})
            if (not created):
                co.certificate = cert
                co.key = key
                co.save()
            fo, kpath = mkstemp()
            fo, cpath = mkstemp()
            with open(kpath, 'w') as kfo, open(cpath, 'w') as cfo:
                kfo.write(key)
                cfo.write(cert)
            try:
                o, e, rc = run_command([OPENSSL, 'rsa', '-noout', '-modulus',
                                        '-in', kpath])
            except Exception, e:
                logger.exception(e)
                e_msg = ('RSA key modulus could not be verified for the given '
                         'Private Key. Correct your input and try again')
                handle_exception(Exception(e_msg), request)
            try:
                o2, e, rc = run_command([OPENSSL, 'x509', '-noout',
                                         '-modulus', '-in', cpath])
            except Exception, e:
                logger.exception(e)
                e_msg = ('RSA key modulus could not be verified for the given '
                         'Certificate. Correct your input and try again')
                handle_exception(Exception(e_msg), request)
            if (o[0] != o2[0]):
                e_msg = ('Given Certificate and the Private Key do not match. '
                         'Correct your input and try again')
                handle_exception(Exception(e_msg), request)
            move(cpath, '%s/rockstor.cert' % settings.CERTDIR)
            move(kpath, '%s/rockstor.key' % settings.CERTDIR)
            superctl('nginx', 'restart')
            return Response(TLSCertificateSerializer(co).data)

Example 85

Project: rockstor-core Source File: nut.py
def configure_nut(config):
    """
    Top level nut config function. Takes the input config and initially applies
    any defaults that the front end failed to assert and then make a copy prior
    to sending to pre-processing and then in turn to final config application.
    Also establishes Rockstor defaults for upssched.
    :param config: sanitized config from input form
    :return:
    """
    # clean config and establish defaults
    establish_config_defaults(config)

    # As we change the config prior to its application in the config files we
    # must work on a deep copy to avoid breaking the front end 'memory'.
    # Note we could use a custom deepcopy to do some of our pre-processing
    # ie the re-writing of indexes and surrounding password and desc in ""
    config_copy = deepcopy(config)

    # Pre-process the config options so we know which files to put what options
    # in and in what order
    all_nut_configs = pre_process_nut_config(config_copy)
    # now go through each file - options pair and apply the config
    for config_file, config_options in all_nut_configs.items():
        # consider parallelizing these calls by executing on it's own thread
        # should be safe as "pleasingly parallel".
        update_config_in(config_file, config_options, REMARK_OUT,
                         settings.NUT_HEADER)
        # correct nut config file permissions from the default root rw -- --
        # without this nut services cannot access the details they require as
        # on startup nut mostly drops root privileges and runs as the nut user.
        # nut-client installs upsmon.conf and upssched.conf
        # ups.conf must be readable by upsdrvctl and any drivers and upsd
        # all nut config files by default in a CentOS install are 640 but our
        # file editing process creates a temp file and copies it over as root.
        # The files used to be left as root.nut owner group so previously we
        # only had to chmod but note the following comment re the chown addition
        # after our existing chmod:-
        run_command([CHMOD, '640', config_file])
        # N.B. as of around Dec 2015 an os update changed the behaviour of
        # the mechanism used here such that root.root became the owner group.
        # This takes effect once we instantiate a fresh config which then
        # results is a non working nut subsystems as nut the user no longer has
        # read access. Fixed by chown root.nut on all files we edit as a matter
        # of course.
        run_command([CHOWN, 'root.nut', config_file])
    config_upssched()
See More Examples - Go to Next Page
Page 1 Page 2 Selected