Here are the examples of the python api six.moves.map taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
178 Examples
0
Example 151
Project: pymatgen Source File: plotter.py
def get_pourbaix_plot_colorfill_by_element(self, limits=None, title="",
label_domains=True, element=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
entry_dict_of_multientries = collections.defaultdict(list)
plt = get_publication_quality_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_color = ['#FFFFA0', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
hatch = ['/', '\\', '|', '-', '+', 'o', '*']
(stable, unstable) = self.pourbaix_plot_data(limits)
num_of_overlaps = {key: 0 for key in stable.keys()}
for entry in stable:
if isinstance(entry, MultiEntry):
for e in entry.entrylist:
if element in e.composition.elements:
entry_dict_of_multientries[e.name].append(entry)
num_of_overlaps[entry] += 1
else:
entry_dict_of_multientries[entry.name].append(entry)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
from pymatgen import Composition, Element
from pymatgen.core.ion import Ion
def len_elts(entry):
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
return len([el for el in comp.elements if el not in
[Element("H"), Element("O")]])
sorted_entry = entry_dict_of_multientries.keys()
sorted_entry.sort(key=len_elts)
i = -1
label_chr = map(chr, list(range(65, 91)))
for entry in sorted_entry:
color_indx = 0
x_coord = 0.0
y_coord = 0.0
npts = 0
i += 1
for e in entry_dict_of_multientries[entry]:
hc = 0
fc = 0
bc = 0
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
x_coord += c[0]
y_coord += c[1]
npts += 1
color_indx = i
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
if len([el for el in comp.elements if el not in
[Element("H"), Element("O")]]) == 1:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
patch = Polygon(xy, facecolor=optim_colors[color_indx],
closed=True, lw=3.0, fill=True)
bc = optim_colors[color_indx]
else:
if color_indx >= len(hatch):
color_indx = color_indx - int(color_indx / len(hatch)) * len(hatch)
patch = Polygon(xy, hatch=hatch[color_indx], closed=True, lw=3.0, fill=False)
hc = hatch[color_indx]
ax.add_patch(patch)
xy_center = (x_coord / npts, y_coord / npts)
if label_domains:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
fc = optim_font_color[color_indx]
if bc and not hc:
bbox = dict(boxstyle="round", fc=fc)
if hc and not bc:
bc = 'k'
fc = 'w'
bbox = dict(boxstyle="round", hatch=hc, fill=False)
if bc and hc:
bbox = dict(boxstyle="round", hatch=hc, fc=fc)
# bbox.set_path_effects([PathEffects.withSimplePatchShadow()])
plt.annotate(latexify_ion(latexify(entry)), xy_center,
color=bc, fontsize=30, bbox=bbox)
# plt.annotate(label_chr[i], xy_center,
# color=bc, fontsize=30, bbox=bbox)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
0
Example 152
Project: pymatgen Source File: bader_caller.py
def __init__(self, chgcar_filename, potcar_filename=None):
"""
Initializes the Bader caller.
Args:
chgcar_filename: The filename of the CHGCAR.
potcar_filename: Optional: the filename of the corresponding
POTCAR file. Used for calculating the charge transfer. If
None, the get_charge_transfer method will raise a ValueError.
"""
self.chgcar = Chgcar.from_file(chgcar_filename)
self.potcar = Potcar.from_file(potcar_filename) \
if potcar_filename is not None else None
self.natoms = self.chgcar.poscar.natoms
chgcarpath = os.path.abspath(chgcar_filename)
with ScratchDir(".") as temp_dir:
shutil.copy(chgcarpath, os.path.join(temp_dir, "CHGCAR"))
rs = subprocess.Popen(["bader", "CHGCAR"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
data = []
with open("ACF.dat") as f:
raw = f.readlines()
headers = [s.lower() for s in raw.pop(0).split()]
raw.pop(0)
while True:
l = raw.pop(0).strip()
if l.startswith("-"):
break
vals = map(float, l.split()[1:])
data.append(dict(zip(headers[1:], vals)))
for l in raw:
toks = l.strip().split(":")
if toks[0] == "VACUUM CHARGE":
self.vacuum_charge = float(toks[1])
elif toks[0] == "VACUUM VOLUME":
self.vacuum_volume = float(toks[1])
elif toks[0] == "NUMBER OF ELECTRONS":
self.nelectrons = float(toks[1])
self.data = data
0
Example 153
def _magic_parser(stream, magic):
"""
Parse the section with the SCF cycle
Returns:
dict where the key are the name of columns and
the values are list of numbers. Note if no section was found.
.. warning::
The parser is very fragile and should be replaced by YAML.
"""
#Example (SCF cycle, similar format is used for phonons):
#
# iter Etot(hartree) deltaE(h) residm vres2
# ETOT 1 -8.8604027880849 -8.860E+00 2.458E-02 3.748E+00
# At SCF step 5 vres2 = 3.53E-08 < tolvrs= 1.00E-06 =>converged.
in_doc, fields = 0, None
for line in stream:
line = line.strip()
if line.startswith(magic):
keys = line.split()
fields = collections.OrderedDict((k, []) for k in keys)
if fields is not None:
#print(line)
in_doc += 1
if in_doc == 1:
continue
# End of the section.
if not line: break
tokens = list(map(float, line.split()[1:]))
assert len(tokens) == len(keys)
for l, v in zip(fields.values(), tokens):
l.append(v)
# Convert values to numpy arrays.
if fields:
return collections.OrderedDict([(k, np.array(v)) for k, v in fields.items()])
else:
return None
0
Example 154
Project: pymatgen Source File: wrappers.py
def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):
"""
Merge GGK files, return the absolute path of the new database.
Args:
gswfk_file: Ground-state WFK filename
dfpt_files: List of 1WFK files to merge.
gkk_files: List of GKK files to merge.
out_gkk: Name of the output GKK file
binascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output
"""
raise NotImplementedError("This method should be tested")
#out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)
# We work with absolute paths.
gswfk_file = os.path.absath(gswfk_file)
dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]
print("Will merge %d 1WF files, %d GKK file in output %s" %
(len(dfpt_files), len(gkk_files), out_gkk))
if self.verbose:
for i, f in enumerate(dfpt_files): print(" [%d] 1WF %s" % (i, f))
for i, f in enumerate(gkk_files): print(" [%d] GKK %s" % (i, f))
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [workdir], ["mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr"])
inp = cStringIO()
inp.write(out_gkk + "\n") # Name of the output file
inp.write(str(binascii) + "\n") # Integer flag: 0 --> binary output, 1 --> ascii formatted output
inp.write(gswfk_file + "\n") # Name of the groud state wavefunction file WF
#dims = len(dfpt_files, gkk_files, ?)
dims = " ".join([str(d) for d in dims])
inp.write(dims + "\n") # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files
# Names of the 1WF files...
for fname in dfpt_files:
inp.write(fname + "\n")
# Names of the GKK files...
for fname in gkk_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
self.execute(workdir)
return out_gkk
0
Example 155
Project: pymatgen Source File: wrappers.py
def merge(self, workdir, ddb_files, out_ddb, description, delete_source_ddbs=True):
"""Merge DDB file, return the absolute path of the new database in workdir."""
# We work with absolute paths.
ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
if not os.path.isabs(out_ddb):
out_ddb = os.path.join(os.path.abspath(workdir), os.path.basename(out_ddb))
if self.verbose:
print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb))
for i, f in enumerate(ddb_files):
print(" [%d] %s" % (i, f))
# Handle the case of a single file since mrgddb uses 1 to denote GS files!
if len(ddb_files) == 1:
with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
for line in inh:
out.write(line)
return out_ddb
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr"])
inp = cStringIO()
inp.write(out_ddb + "\n") # Name of the output file.
inp.write(str(description) + "\n") # Description.
inp.write(str(len(ddb_files)) + "\n") # Number of input DDBs.
# Names of the DDB files.
for fname in ddb_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "wt") as fh:
fh.writelines(self.stdin_data)
retcode = self.execute(workdir, exec_args=['--nostrict'])
if retcode == 0 and delete_source_ddbs:
# Remove ddb files.
for f in ddb_files:
try:
os.remove(f)
except IOError:
pass
return out_ddb
0
Example 156
Project: pymatgen Source File: wrappers.py
def merge(self, workdir, pot_files, out_dvdb, delete_source=True):
"""
Merge POT files containing 1st order DFPT potential
return the absolute path of the new database in workdir.
"""
# We work with absolute paths.
pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]
if not os.path.isabs(out_dvdb):
out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))
if self.verbose:
print("Will merge %d files into output DVDB %s" % (len(pot_files), out_dvdb))
for i, f in enumerate(pot_files):
print(" [%d] %s" % (i, f))
# Handle the case of a single file since mrgddb uses 1 to denote GS files!
if len(pot_files) == 1:
with open(pot_files[0], "r") as inh, open(out_dvdb, "w") as out:
for line in inh:
out.write(line)
return out_dvdb
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [workdir], ["mrgdvdb.stdin", "mrgdvdb.stdout", "mrgdvdb.stderr"])
inp = cStringIO()
inp.write(out_dvdb + "\n") # Name of the output file.
inp.write(str(len(pot_files)) + "\n") # Number of input POT files.
# Names of the POT files.
for fname in pot_files:
inp.write(fname + "\n")
self.stdin_data = [s for s in inp.getvalue()]
with open(self.stdin_fname, "wt") as fh:
fh.writelines(self.stdin_data)
retcode = self.execute(workdir)
if retcode == 0 and delete_source:
# Remove pot files.
for f in pot_files:
try:
os.remove(f)
except IOError:
pass
return out_dvdb
0
Example 157
Project: fastqp Source File: plots.py
def qualmap(qualities, filename, fig_kw):
fig = plt.figure(**fig_kw)
ax = fig.add_subplot(111)
values = map(Counter, tuple(qualities.values()))
counts = Counter()
for value in values:
counts = counts + value
max_qual = max(tuple(counts.keys()))
max_pos = max(tuple(qualities.keys()))
heat_map = np.zeros((max_qual, max_pos))
for p in range(max_pos):
for q in range(max_qual):
try:
heat_map[q][p] = qualities[p+1][q+1]
except KeyError:
pass
imax = ax.imshow(np.array(heat_map), cmap=viridis_cm, origin='lower', interpolation='none', aspect='auto')
ax.axhline(y=10, linestyle=':', color='gray')
ax.axhline(y=20, linestyle=':', color='gray')
ax.axhline(y=30, linestyle=':', color='gray')
cbar = fig.colorbar(imax, orientation='horizontal', shrink=0.5)
cbar_labels = [item.get_text() for item in cbar.ax.get_xticklabels()]
cbar.ax.set_xticklabels(cbar_labels, rotation=45)
cbar.ax.set_title('')
ax.set_title('Quality score heatmap')
ax.set_xlabel('Cycle')
ax.set_ylabel('Sum of Phred qualities')
add_figure_to_archive(fig, filename, 'quality_score_heatmap.png')
0
Example 158
Project: neural-network-animation Source File: afm.py
def _parse_char_metrics(fh):
"""
Return a character metric dictionary. Keys are the ASCII num of
the character, values are a (*wx*, *name*, *bbox*) tuple, where
*wx* is the character width, *name* is the postscript language
name, and *bbox* is a (*llx*, *lly*, *urx*, *ury*) tuple.
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
ascii_d = {}
name_d = {}
while 1:
line = fh.readline()
if not line:
break
line = line.rstrip()
if line.startswith(b'EndCharMetrics'):
return ascii_d, name_d
vals = line.split(b';')[:4]
if len(vals) != 4:
raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals[0].split()[1])
wx = _to_float(vals[1].split()[1])
name = vals[2].split()[1]
name = name.decode('ascii')
bbox = _to_list_of_floats(vals[3][2:])
bbox = list(map(int, bbox))
# Workaround: If the character name is 'Euro', give it the
# corresponding character code, according to WinAnsiEncoding (see PDF
# Reference).
if name == 'Euro':
num = 128
if num != -1:
ascii_d[num] = (wx, name, bbox)
name_d[name] = (wx, bbox)
raise RuntimeError('Bad parse')
0
Example 159
Project: neural-network-animation Source File: _subplots.py
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = list(map(int, s))
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit '
'integer')
self._subplotspec = GridSpec(rows, cols)[num - 1]
# num - 1 for converting from MATLAB to python indexing
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0] - 1:num[1]]
else:
if num < 0 or num > rows*cols:
raise ValueError(
"num must be 0 <= num <= {maxn}, not {num}".format(
maxn=rows*cols, num=num))
if num == 0:
warnings.warn("The use of 0 (which ends up being the "
"_last_ sub-plot) is deprecated in 1.4 "
"and will raise an error in 1.5",
mplDeprecation)
self._subplotspec = GridSpec(rows, cols)[int(num) - 1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
0
Example 160
Project: neural-network-animation Source File: test_dates.py
def test_auto_date_locator():
def _create_auto_date_locator(date1, date2):
locator = mdates.AutoDateLocator()
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
d1 = datetime.datetime(1990, 1, 1)
results = ([datetime.timedelta(weeks=52 * 200),
['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',
'2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',
'2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',
'2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',
'2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']
],
[datetime.timedelta(weeks=52),
['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',
'1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',
'1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',
'1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',
'1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',
'1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']
],
[datetime.timedelta(days=140),
['1990-01-06 00:00:00+00:00', '1990-01-27 00:00:00+00:00',
'1990-02-17 00:00:00+00:00', '1990-03-10 00:00:00+00:00',
'1990-03-31 00:00:00+00:00', '1990-04-21 00:00:00+00:00',
'1990-05-12 00:00:00+00:00']
],
[datetime.timedelta(days=40),
['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',
'1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',
'1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']
],
[datetime.timedelta(hours=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',
'1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',
'1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',
'1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',
'1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',
'1990-01-02 16:00:00+00:00']
],
[datetime.timedelta(minutes=20),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',
'1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',
'1990-01-01 00:20:00+00:00']
],
[datetime.timedelta(seconds=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',
'1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',
'1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',
'1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',
'1990-01-01 00:00:40+00:00']
],
[datetime.timedelta(microseconds=1500),
['1989-12-31 23:59:59.999507+00:00',
'1990-01-01 00:00:00+00:00',
'1990-01-01 00:00:00.000502+00:00',
'1990-01-01 00:00:00.001005+00:00',
'1990-01-01 00:00:00.001508+00:00']
],
)
for t_delta, expected in results:
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2)
assert_equal(list(map(str, mdates.num2date(locator()))),
expected)
0
Example 161
Project: chainer-faster-rcnn Source File: test_anchor_target_layer.py
def test_generate_anchors(self):
anchor_target_layer = AnchorTargetLayer()
ret = np.array([[-83., -39., 100., 56.],
[-175., -87., 192., 104.],
[-359., -183., 376., 200.],
[-55., -55., 72., 72.],
[-119., -119., 136., 136.],
[-247., -247., 264., 264.],
[-35., -79., 52., 96.],
[-79., -167., 96., 184.],
[-167., -343., 184., 360.]]) - 1
self.assertEqual(anchor_target_layer.anchors.shape, ret.shape)
np.testing.assert_array_equal(anchor_target_layer.anchors, ret)
ret = self.anchor_target_layer.anchors
min_x = ret[:, 0].min()
min_y = ret[:, 1].min()
max_x = ret[:, 2].max()
max_y = ret[:, 3].max()
canvas = np.zeros(
(int(abs(min_y) + max_y) + 1,
int(abs(min_x) + max_x) + 1), dtype=np.uint8)
ret[:, 0] -= min_x
ret[:, 2] -= min_x
ret[:, 1] -= min_y
ret[:, 3] -= min_y
for anchor in ret:
anchor = list(six.moves.map(int, anchor))
cv.rectangle(
canvas, (anchor[0], anchor[1]), (anchor[2], anchor[3]), 255)
cv.imwrite('tests/anchors.png', canvas)
0
Example 162
Project: chainer-faster-rcnn Source File: test_anchor_target_layer.py
def test_generate_shifts(self):
for i in range(len(self.shifts)):
self.assertEqual(self.shifts[i][0], self.shifts[i][2])
self.assertEqual(self.shifts[i][1], self.shifts[i][3])
i = 0
for y in range(self.height):
for x in range(self.width):
xx = x * self.feat_stride
yy = y * self.feat_stride
self.assertEqual(len(self.shifts[i]), 4)
self.assertEqual(self.shifts[i][0], xx)
self.assertEqual(self.shifts[i][1], yy)
self.assertEqual(self.shifts[i][2], xx)
self.assertEqual(self.shifts[i][3], yy)
i += 1
self.assertEqual(i, len(self.shifts))
min_x = self.shifts[:, 0].min()
min_y = self.shifts[:, 1].min()
max_x = self.shifts[:, 2].max()
max_y = self.shifts[:, 3].max()
canvas = np.zeros(
(int(abs(min_y) + max_y) + 1,
int(abs(min_x) + max_x) + 1), dtype=np.uint8)
shifts = self.shifts.copy()
shifts[:, 0] -= min_x
shifts[:, 2] -= min_x
shifts[:, 1] -= min_y
shifts[:, 3] -= min_y
for anchor in shifts:
anchor = list(six.moves.map(int, anchor))
cv.circle(canvas, (anchor[0], anchor[1]), 1, 255, -1)
cv.imwrite('tests/shifts.png', canvas)
0
Example 163
Project: fjord Source File: test_six.py
def test_map():
from six.moves import map
assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
0
Example 164
Project: python-armet Source File: resources.py
def segment_query(seg, attributes):
# Get the attribute in question.
attribute = attributes[seg.path[0]]
# Replace the initial path segment with the expanded
# attribute path.
seg.path[0:1] = attribute.path.split('.')
# Boolean's should use `exact` rather than `iexact`.
if attribute.type is bool:
op = '__exact'
else:
op = OPERATOR_MAP[seg.operator]
# Build the path from the segment.
path = '__'.join(seg.path) + op
# Construct a Q-object from the segment.
return reduce(operator.or_,
map(lambda x: Q((path, x)),
map(attribute.try_clean, seg.values)))
0
Example 165
Project: python-armet Source File: resources.py
def build_segment(model, segment, attr, clean):
# Get the associated column for the initial path.
path = segment.path.pop(0)
col = getattr(model, path)
# Resolve the inner-most path segment.
if segment.path:
if col.impl.accepts_scalar_loader:
return col.has(build_segment(
col.property.mapper.class_, segment, attr, clean))
else:
try:
return col.any(build_segment(
col.property.mapper.class_, deepcopy(segment),
attr, clean))
except InvalidRequestError:
return col.has(build_segment(
col.property.mapper.class_, deepcopy(segment),
attr, clean))
# Determine the operator.
op = OPERATOR_MAP[segment.operator]
# Apply the operator to the values and return the expression
qs = reduce(operator.or_,
map(partial(op, col),
map(lambda x: clean(attr.try_clean(x)), segment.values)))
# Apply the negation.
if segment.negated:
qs = ~qs
# Return our query object.
return qs
0
Example 166
def __str__(self):
return '(' + ' and '.join(six.moves.map(str, self.rule_targets)) + ')'
0
Example 167
def __str__(self):
return '(' + ' or '.join(six.moves.map(str, self.rule_targets)) + ')'
0
Example 168
Project: designate Source File: __init__.py
def create_zone(self, context, zone):
# Get a new session
sess = self.get_session()
try:
sess.begin()
def _parse_master(master):
return '%s:%d' % (master.host, master.port)
masters = six.moves.map(_parse_master, self.masters)
domain_values = {
'designate_id': zone['id'],
'name': zone['name'].rstrip('.'),
'master': ','.join(masters),
'type': 'SLAVE',
'account': context.tenant
}
self._create(sess, tables.domains, domain_values)
except DBDuplicateEntry:
LOG.debug('Successful create of %s in pdns, zone already exists'
% zone['name'])
# If create fails because the zone exists, don't reraise
pass
except Exception:
with excutils.save_and_reraise_exception():
sess.rollback()
else:
sess.commit()
self.mdns_api.notify_zone_changed(
context, zone, self.host, self.port, self.timeout,
self.retry_interval, self.max_retries, self.delay)
0
Example 169
@classmethod
def get_history(cls, transaction, nodes_ids=None, statuses=None,
tasks_names=None, include_summary=False):
"""Get deployment tasks history.
:param transaction: task SQLAlchemy object
:type transaction: models.Task
:param nodes_ids: filter by node IDs
:type nodes_ids: list[int]|None
:param statuses: filter by statuses
:type statuses: list[basestring]|None
:param tasks_names: filter by deployment graph task names
:param include_summary: bool flag to include summary
:type tasks_names: list[basestring]|None
:returns: tasks history
:rtype: list[dict]
"""
nodes_ids = nodes_ids and frozenset(nodes_ids)
statuses = statuses and frozenset(statuses)
tasks_names = tasks_names and frozenset(tasks_names)
task_parameters_by_name = {}
visited_tasks = set()
tasks_snapshot = Transaction.get_tasks_snapshot(transaction)
history = []
if tasks_snapshot:
# make a copy for each task to avoid modification
for task in six.moves.map(dict, tasks_snapshot):
# remove ambiguous id field
task.pop('id', None)
task_parameters_by_name[task['task_name']] = task
else:
logger.warning('No tasks snapshot is defined in given '
'transaction, probably it is a legacy '
'(Fuel<10.0) or malformed.')
query = None
if include_summary:
query = cls.options(query, undefer('summary'))
history_records = cls.filter_by(query, task_id=transaction.id)
if tasks_names:
history_records = cls.filter_by_list(
history_records, 'deployment_graph_task_name', tasks_names
)
if nodes_ids:
history_records = cls.filter_by_list(
history_records, 'node_id', nodes_ids
)
if statuses and HISTORY_TASK_STATUSES.skipped not in statuses:
history_records = cls.filter_by_list(
history_records, 'status', statuses
)
for history_record in history_records:
task_name = history_record.deployment_graph_task_name
visited_tasks.add(task_name)
# the visited tasks should be calculated, it is
# reason why the query filter cannot be used here
if statuses and history_record.status not in statuses:
continue
fields = list(DeploymentHistorySerializer.fields)
if include_summary:
fields.append('summary')
record = cls.single.to_dict(history_record, fields=fields)
history.append(record)
# remove ambiguous field
record['task_name'] = record.pop('deployment_graph_task_name')
if task_parameters_by_name:
try:
record.update(task_parameters_by_name[task_name])
except KeyError:
logger.warning(
'Definition of "{0}" task is not found'
.format(task_name)
)
# calculates absent tasks respecting filter
if (not nodes_ids and (
not statuses or HISTORY_TASK_STATUSES.skipped in statuses)):
for task_name in task_parameters_by_name:
if tasks_names and task_name not in tasks_names:
continue
if task_name in visited_tasks:
continue
history.append(dict(
task_parameters_by_name[task_name],
task_name=task_name,
node_id='-',
status=HISTORY_TASK_STATUSES.skipped,
time_start=None,
time_end=None,
))
return history
0
Example 170
Project: fuel-web Source File: manager.py
@classmethod
def get_volumes_metadata(cls, cluster):
"""Get volumes metadata for cluster from all plugins which enabled it.
:param cluster: A cluster instance
:type cluster: Cluster model
:return: dict -- Object with merged volumes data from plugins
"""
volumes_metadata = {
'volumes': [],
'volumes_roles_mapping': {},
'rule_to_pick_boot_disk': [],
}
release_volumes = cluster.release.volumes_metadata.get('volumes', [])
release_volumes_ids = [v['id'] for v in release_volumes]
processed_volumes = {}
enabled_plugins = ClusterPlugin.get_enabled(cluster.id)
for plugin_adapter in map(wrap_plugin, enabled_plugins):
metadata = plugin_adapter.volumes_metadata
for volume in metadata.get('volumes', []):
volume_id = volume['id']
if volume_id in release_volumes_ids:
raise errors.AlreadyExists(
'Plugin {0} is overlapping with release '
'by introducing the same volume with '
'id "{1}"'.format(plugin_adapter.full_name, volume_id)
)
elif volume_id in processed_volumes:
raise errors.AlreadyExists(
'Plugin {0} is overlapping with plugin {1} '
'by introducing the same volume with '
'id "{2}"'.format(
plugin_adapter.full_name,
processed_volumes[volume_id],
volume_id
)
)
processed_volumes[volume_id] = plugin_adapter.full_name
volumes_metadata.get('volumes_roles_mapping', {}).update(
metadata.get('volumes_roles_mapping', {}))
volumes_metadata.get('volumes', []).extend(
metadata.get('volumes', []))
volumes_metadata.get('rule_to_pick_boot_disk', []).extend(
metadata.get('rule_to_pick_boot_disk', []))
return volumes_metadata
0
Example 171
Project: fuel-web Source File: test_orchestrator_serializer_90.py
@mock.patch.object(
plugins.adapters.PluginAdapterBase, 'repo_files',
mock.MagicMock(return_value=True)
)
def test_plugins_in_serialized(self):
releases = [
{'repository_path': 'repositories/ubuntu',
'version': 'mitaka-9.0', 'os': 'ubuntu',
'mode': ['ha', 'multinode'],
'deployment_scripts_path': 'deployment_scripts/'}
]
plugin1 = self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_1',
attributes_metadata={'attributes': {'name': 'plugin_1'}},
package_version='4.0.0',
releases=releases
)
plugin2 = self.env.create_plugin(
cluster=self.cluster_db,
name='plugin_2',
attributes_metadata={'attributes': {'name': 'plugin_2'}},
package_version='4.0.0',
releases=releases
)
self.env.create_plugin(
cluster=self.cluster_db,
enabled=False,
name='plugin_3',
attributes_metadata={'attributes': {'name': 'plugin_3'}},
package_version='4.0.0',
releases=releases
)
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute']
)
plugins_data = [
{
'name': p.name,
'scripts': [{
'remote_url': p.master_scripts_path(self.cluster_db),
'local_path': p.slaves_scripts_path
}],
'repositories': [{
'type': 'deb',
'name': p.full_name,
'uri': p.repo_url(self.cluster_db),
'suite': '/',
'section': '',
'priority': 1100
}]
}
for p in six.moves.map(plugins.wrap_plugin, [plugin1, plugin2])
]
objects.Cluster.prepare_for_deployment(self.cluster_db)
serialized = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
self.assertIn('plugins', serialized['common'])
self.datadiff(plugins_data, serialized['common']['plugins'],
compare_sorted=True)
0
Example 172
def _process_nodes(self, node_list):
node_itr = six.moves.map(self._process_node, node_list)
return itertools.chain.from_iterable(node_itr)
0
Example 173
Project: heat Source File: plugin_manager.py
def __init__(self, *extra_packages):
"""Initialise the Heat Engine plugin package, and any others.
The heat.engine.plugins package is always created, if it does not
exist, from the plugin directories specified in the config file, and
searched for modules. In addition, any extra packages specified are
also searched for modules. e.g.
>>> PluginManager('heat.engine.resources')
will load all modules in the heat.engine.resources package as well as
any user-supplied plugin modules.
"""
def packages():
for package_name in extra_packages:
yield sys.modules[package_name]
cfg.CONF.import_opt('plugin_dirs', 'heat.common.config')
yield plugin_loader.create_subpackage(cfg.CONF.plugin_dirs,
'heat.engine')
def modules():
pkg_modules = six.moves.map(plugin_loader.load_modules,
packages())
return itertools.chain.from_iterable(pkg_modules)
self.modules = list(modules())
0
Example 174
Project: heat Source File: plugin_manager.py
def map_to_modules(self, function):
"""Iterate over the results of calling a function on every module."""
return six.moves.map(function, self.modules)
0
Example 175
Project: yaql Source File: strings.py
@specs.parameter('sequence', yaqltypes.Iterable())
@specs.parameter('separator', yaqltypes.String())
@specs.inject('str_delegate', yaqltypes.Delegate('str'))
@specs.method
def join(sequence, separator, str_delegate):
""":yaql:join
Returns a string with sequence elements joined by the separator.
:signature: sequence.join(separator)
:receiverArg sequence: chain of values to be joined
:argType sequence: sequence of strings
:arg separator: value to be placed between joined pairs
:argType separator: string
:returnType: string
.. code::
yaql> ["abc", "de", "f"].join("")
"abcdef"
yaql> ["abc", "de", "f"].join("|")
"abc|de|f"
"""
return separator.join(six.moves.map(str_delegate, sequence))
0
Example 176
def __init__(self, sdk, values):
self.count = values.get('count')
self.items = list(map(lambda x: Archive(sdk, x), values.get('items', [])))
0
Example 177
def attrs(self):
return {
'count': self.count,
'items': map(Archive.attrs, self.items)
}
0
Example 178
Project: vdsm Source File: misc.py
def namedtuple2dict(nt):
return dict(map(lambda f: (f, getattr(nt, f)), nt._fields))