Comment unused code

This commit is contained in:
gmatteo 2019-10-20 01:17:23 +02:00
parent a3be17f57f
commit 1f8d8270a5
26 changed files with 238 additions and 551 deletions

View File

@ -8,7 +8,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
python-version: [3.8]
python-version: [3.6]
steps:
- uses: actions/checkout@v1

View File

@ -32,9 +32,9 @@ jobs:
# Replace dep1 dep2 ... with your dependencies
conda create -q -n test-environment python=${{ matrix.python-version }}
source activate test-environment
conda install abinit -c abinit
abinit -v
abinit -b
#conda install abinit -c abinit
#`abinit -v
#abinit -b
- name: Install Python dependencies
run: |
python -m pip install --upgrade pip

View File

@ -536,7 +536,7 @@ class AbinitInputParser(object):
This function is not recursive hence expr like sqrt(1/2) are not supported
"""
import math # flake8: noqa
import math # noqa: F401
import re
re_sqrt = re.compile(r"[+|-]?sqrt\((.+)\)")

View File

@ -211,7 +211,8 @@ def build_bench_main_parser():
parser.add_argument("--min-eff", default=None, type=float, help="Minimum parallel efficiency accepted. Default None.")
parser.add_argument('--paw', default=False, action="store_true", help="Run PAW calculation if available")
parser.add_argument("-a", '--abivalidate', default=False, action="store_true", help="Call Abinit to validate input files and return")
parser.add_argument("-a", '--abivalidate', default=False, action="store_true",
help="Call Abinit to validate input files and return")
parser.add_argument("-i", '--info', default=False, action="store_true", help="Show benchmark info and exit")
parser.add_argument("-r", "--remove", default=False, action="store_true", help="Remove old flow workdir")

View File

@ -19,7 +19,7 @@ def make_inputs(paw=False):
# N=9
# Supercell and atoms
xcart= np.fromstring("""
xcart = np.fromstring("""
0.0000000000E+00 0.0000000000E+00 -4.2633349730E+00
3.7794522658E+00 3.7794522658E+00 -3.2803418097E+00
0.0000000000E+00 3.7794522658E+00 -3.6627278067E+00
@ -55,7 +55,7 @@ def make_inputs(paw=False):
structure = abilab.Structure.from_abivars(
#acell="4.0 4.0 28.0 Angstrom",
acell=abilab.ArrayWithUnit([4.0, 4.0, 28], "ang").to("bohr"),
rprim=np.eye(3),
rprim=np.eye(3),
typat=[int(i) for i in "3 3 2 3 3 2 1 3 3 3 2 1 3 3 3 2 1 3 3 3 2 1 3 3 3 2 3 3 2".split()],
znucl=[56, 22, 8],
xcart=xcart,
@ -135,7 +135,7 @@ def main(options):
if options.info:
# print doc string and exit.
print(__doc__)
return
return
return build_flow(options)

View File

@ -42,7 +42,7 @@ def make_input(): # pragma: no cover
2.9570301511E+00 5.5992672027E-02 -1.3560839453E-01""", sep=" ").reshape((-1,3))
structure = abilab.Structure.from_abivars(
acell = abilab.ArrayWithUnit([10, 5, 5], "ang").to("bohr"),
acell=abilab.ArrayWithUnit([10, 5, 5], "ang").to("bohr"),
rprim=np.eye(3),
typat=[1, 3, 3, 2, 3, 3, 3, 3], # Type of atoms (H2O + NH3 + H)
znucl=[8.0, 7.0, 1.0],
@ -136,8 +136,7 @@ def build_flow(options): # pragma: no cover
# print("wfoptalg:", wfoptalg, "done with MPI_PROCS:", mpi_procs, "and:", d)
# inp = template.new_with_vars(d)
# work.register_scf_task(inp, manager=manager)
#flow.register_work(work)
#flow.register_work(work)
return flow.allocate()

View File

@ -44,7 +44,6 @@ def make_inputs(paw=False):
0.5, 0.5, 0.5]
)
multi[0].set_vars(
kptopt=1, # Automatic generation of k points with symmetries.
tolvrs=1.0e-6,

View File

@ -82,7 +82,7 @@ def make_flow_ephinp(options):
# q-path for phonons and phonon linewidths.
ph_ndivsm=20,
ph_nqpath=3,
ph_qpath= [
ph_qpath=[
0 , 0 , 0,
0.5, 0 , 0,
0.5, 0.5, 0,],

View File

@ -194,7 +194,7 @@ def build_flow(options):
pconfs = [
dict(npkpt=1, npband=13, npfft=10), # 130
dict(npkpt=1, npband=26, npfft=10), # 260
dict(npkpt=1, npband=65, npfft=8 ), # 520
dict(npkpt=1, npband=65, npfft=8), # 520
dict(npkpt=1, npband=65, npfft=16), # 1040
]

View File

@ -61,6 +61,7 @@ global_vars = dict(
#iomode=3
)
def make_inputs(options):
structure = abilab.Structure.from_abivars(unit_cell)

View File

@ -1,6 +1,9 @@
#!/usr/bin/env python
"""Analyze the parallel efficiency of the MPI-FFT algorithsm in in the GS part.
Use paral_kgb=1 and fftalg_list = [312, 402, 401]"""
"""
Analyze the parallel efficiency of the MPI-FFT algorithsm in in the GS part.
Use paral_kgb=1 and fftalg_list = [312, 402, 401]
"""
import sys
import abipy.abilab as abilab
import abipy.flowtk as flowtk
@ -25,7 +28,7 @@ def make_input(paw=False):
ecut = 24
inp.set_vars(
ecut=ecut,
pawecutdg=ecut*2 if paw else None,
pawecutdg=ecut * 2 if paw else None,
paral_kgb=1,
nsppol=1,
nband=28,

View File

@ -19,8 +19,7 @@ def make_input(paw=False):
Build and return an input file for GS calculations with paral_kgb=1
"""
pseudos = abidata.pseudos("14si.pspnc", "8o.pspnc")
#if not paw else
#abidata.pseudos("Si.GGA_PBE-JTH-paw.xml")
#if not paw else abidata.pseudos("Si.GGA_PBE-JTH-paw.xml")
structure = abidata.structure_from_ucell("SiO2-alpha")
inp = abilab.AbinitInput(structure, pseudos)

View File

@ -341,7 +341,7 @@ def build_flow(options):
# Processor distribution.
pconfs = [
dict(npkpt=2, npband=8 , npfft=8 ), # 128
dict(npkpt=2, npband=8 , npfft=8), # 128
dict(npkpt=2, npband=8 , npfft=16), # 256
dict(npkpt=2, npband=16, npfft=16), # 512
dict(npkpt=2, npband=16, npfft=32), # 1024

View File

@ -194,8 +194,8 @@ def build_flow(options):
# Processor distribution.
pconfs = [
dict(npkpt=2, npband=8 , npfft=8 ), # 128 processeurs
dict(npkpt=2, npband=16, npfft=8 ), # 256 processeurs
dict(npkpt=2, npband=8 , npfft=8), # 128 processeurs
dict(npkpt=2, npband=16, npfft=8), # 256 processeurs
dict(npkpt=2, npband=16, npfft=16), # 512 processeurs
dict(npkpt=2, npband=16, npfft=32), # 1024 processeurs
]

View File

@ -33,7 +33,7 @@ def make_inputs(paw=False):
gs, nscf, scr, sigma = multi.split_datasets()
# This grid is the most economical, but does not contain the Gamma point.
ngkpt=[5, 5, 5]
ngkpt = [5, 5, 5]
gs_kmesh = dict(
ngkpt=ngkpt,
shiftk=[0.5, 0.5, 0.5,

View File

@ -30,7 +30,6 @@ def make_inputs(paw=False):
paral_kgb=0,
)
multi.set_kmesh(
ngkpt=[6,6,6],
shiftk=[0.0, 0.0, 0.0],

View File

@ -26,7 +26,6 @@ def make_inputs(paw=False):
timopt=-1,
)
multi.set_kmesh(ngkpt=[4,4,3], shiftk=[0.0, 0.0, 0.0])
gs, nscf, scr, sigma = multi.split_datasets()

View File

@ -111,30 +111,30 @@ class ElectronInterpolator(metaclass=abc.ABCMeta):
print("Trying to access valence band index with occtype:", self.occtype)
return int(self.nelect // 2 - 1)
def set_fermie(self, fermie, kmesh, is_shift=None):
"""
Change the Fermi level. Use the IDOS computed on the k-grid specifined by
`kmesh` and `is_shift` to recompute the total number of electrons.
#def set_fermie(self, fermie, kmesh, is_shift=None):
# """
# Change the Fermi level. Use the IDOS computed on the k-grid specifined by
# `kmesh` and `is_shift` to recompute the total number of electrons.
Args:
fermie: New Fermi level in eV
kmesh: Three integers with the number of divisions along the reciprocal primitive axes.
is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
# Args:
# fermie: New Fermi level in eV
# kmesh: Three integers with the number of divisions along the reciprocal primitive axes.
# is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
# the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
Return:
New value of `self.nelect`.
"""
# Compute DOS
edos = self._get_cached_edos(kmesh, is_shift)
#if edos is None:
# edos = self.get_edos(kmesh, is_shift=is_shift, method="gaussian", step=0.1, width=0.2, wmesh=None)
# self._cache_edos(kmesh, is_shift, edos)
# Return:
# New value of `self.nelect`.
# """
# # Compute DOS
# edos = self._get_cached_edos(kmesh, is_shift)
# #if edos is None:
# # edos = self.get_edos(kmesh, is_shift=is_shift, method="gaussian", step=0.1, width=0.2, wmesh=None)
# # self._cache_edos(kmesh, is_shift, edos)
self.fermie = fermie
# Find number of electrons from new chemical potential from nelect.
self.nelect = idos.spline(fermie)
return self.nelect
# self.fermie = fermie
# # Find number of electrons from new chemical potential from nelect.
# self.nelect = idos.spline(fermie)
# return self.nelect
#def set_nelect(self, nelect, kmesh, is_shift=None):
# """
@ -222,67 +222,67 @@ class ElectronInterpolator(metaclass=abc.ABCMeta):
return dict2namedtuple(mesh=wmesh, values=values, integral=integral)
#return ElectronDos(wmesh, values, integral, is_shift, method, step, width)
def get_jdos_q0(self, kmesh, is_shift=None, method="gaussian", step=0.1, width=0.2, wmesh=None):
r"""
Compute the join density of states at q==0
#def get_jdos_q0(self, kmesh, is_shift=None, method="gaussian", step=0.1, width=0.2, wmesh=None):
# r"""
# Compute the join density of states at q==0
:math:`\sum_{kbv} f_{vk} (1 - f_{ck}) \delta(\omega - E_{ck} + E_{vk})`
# :math:`\sum_{kbv} f_{vk} (1 - f_{ck}) \delta(\omega - E_{ck} + E_{vk})`
Args:
kmesh: Three integers with the number of divisions along the reciprocal primitive axes.
is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
method: String defining the method.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
wmesh: Frequency mesh to use. If None, the mesh is computed automatically from the eigenvalues.
# Args:
# kmesh: Three integers with the number of divisions along the reciprocal primitive axes.
# is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
# the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
# method: String defining the method.
# step: Energy step (eV) of the linear mesh.
# width: Standard deviation (eV) of the gaussian.
# wmesh: Frequency mesh to use. If None, the mesh is computed automatically from the eigenvalues.
Returns:
"""
k = self.get_sampling(kmesh, is_shift)
# Returns:
# """
# k = self.get_sampling(kmesh, is_shift)
# Interpolate eigenvalues in the IBZ.
eigens = self._get_cached_eigens(kmesh, is_shift, "ibz")
if eigens is None:
eigens = self.interp_kpts(k.ibz).eigens
self._cache_eigens(kmesh, is_shift, eigens, "ibz")
# # Interpolate eigenvalues in the IBZ.
# eigens = self._get_cached_eigens(kmesh, is_shift, "ibz")
# if eigens is None:
# eigens = self.interp_kpts(k.ibz).eigens
# self._cache_eigens(kmesh, is_shift, eigens, "ibz")
wmesh, step = self._get_w2mesh_step(eigens, wmesh, step)
nw = len(wmesh)
values = np.zeros((self.nsppol, nw))
# wmesh, step = self._get_w2mesh_step(eigens, wmesh, step)
# nw = len(wmesh)
# values = np.zeros((self.nsppol, nw))
if self.occtype == "insulator":
if method == "gaussian":
for spin in range(self.nsppol):
for ik, wtk in enumerate(k.weights):
for icb in range(self.val_ib + 1, self.nband):
ec = eigens[spin, ik, icb]
for icv in range(self.val_ib):
ev = eigens[spin, ik, icv]
values[spin] += wtk * gaussian(wmesh, width, center=ec-ev)
else:
raise ValueError("Method %s is not supported" % method)
# if self.occtype == "insulator":
# if method == "gaussian":
# for spin in range(self.nsppol):
# for ik, wtk in enumerate(k.weights):
# for icb in range(self.val_ib + 1, self.nband):
# ec = eigens[spin, ik, icb]
# for icv in range(self.val_ib):
# ev = eigens[spin, ik, icv]
# values[spin] += wtk * gaussian(wmesh, width, center=ec-ev)
# else:
# raise ValueError("Method %s is not supported" % method)
else:
#occfacts = self.
if method == "gaussian":
for spin in range(self.nsppol):
for ik, wtk in enumerate(k.weights):
for icb in conduction:
ec = eigens[spin, ik, icb]
fc = 1.0 - occfacts[spin, ik, icb]
for icv in valence:
ev = eigens[spin, ik, icv]
fv = occfacts[spin, ik, icv]
fact = wtk * fv * fc
values[spin] += fact * gaussian(wmesh, width, center=ec-ev)
else:
raise ValueError("Method %s is not supported" % method)
# else:
# raise NotImplementedError("not insulator")
# #if method == "gaussian":
# # for spin in range(self.nsppol):
# # for ik, wtk in enumerate(k.weights):
# # for icb in conduction:
# # ec = eigens[spin, ik, icb]
# # fc = 1.0 - occfacts[spin, ik, icb]
# # for icv in valence:
# # ev = eigens[spin, ik, icv]
# # fv = occfacts[spin, ik, icv]
# # fact = wtk * fv * fc
# # values[spin] += fact * gaussian(wmesh, width, center=ec-ev)
# #else:
# # raise ValueError("Method %s is not supported" % method)
if self.nsppol == 1: values *= 2.0
integral = scipy.integrate.cumtrapz(values, x=wmesh, initial=0.0)
# if self.nsppol == 1: values *= 2.0
# integral = scipy.integrate.cumtrapz(values, x=wmesh, initial=0.0)
return dict2namedtuple(mesh=wmesh, values=values, integral=integral)
# return dict2namedtuple(mesh=wmesh, values=values, integral=integral)
#def get_jdos_qpts(self, qpoints, kmesh, is_shift=None, method="gaussian", step=0.1, width=0.2, wmesh=None):
# qpoints = np.reshape(qpoints, (-1, 3))
@ -307,44 +307,44 @@ class ElectronInterpolator(metaclass=abc.ABCMeta):
# jdos_sqw *= 1. / k.nbz
# return jdos_sqw
def get_nesting_at_e0(self, qpoints, kmesh, e0, width=0.2, is_shift=None):
"""
Compute the nesting factor with gaussian broadening for an arbitrary list of q-points.
#def get_nesting_at_e0(self, qpoints, kmesh, e0, width=0.2, is_shift=None):
# """
# Compute the nesting factor with gaussian broadening for an arbitrary list of q-points.
Args:
qpoints: List of q-points in reduced coordinates.
kmesh: Three integers with the number of divisions along the reciprocal primitive axes.
e0: Energy level in eV.
width: Standard deviation (eV) of the gaussian.
is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
# Args:
# qpoints: List of q-points in reduced coordinates.
# kmesh: Three integers with the number of divisions along the reciprocal primitive axes.
# e0: Energy level in eV.
# width: Standard deviation (eV) of the gaussian.
# is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
# the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
Returns:
numpy array of shape [self.nsppol, len(qpoints)]
"""
qpoints = np.reshape(qpoints, (-1, 3))
k = self.get_sampling(kmesh, is_shift)
# Returns:
# numpy array of shape [self.nsppol, len(qpoints)]
# """
# qpoints = np.reshape(qpoints, (-1, 3))
# k = self.get_sampling(kmesh, is_shift)
# Interpolate eigenvalues in the full BZ.
eigens_kbz = self._get_cached_eigens(kmesh, is_shift, "bz")
if eigens is None:
eigens_kbz = self.interp_kpts(k.bz).eigens
self._cache_eigens(kmesh, is_shift, eigens_kbz, "bz")
# # Interpolate eigenvalues in the full BZ.
# eigens_kbz = self._get_cached_eigens(kmesh, is_shift, "bz")
# if eigens_kbz is None:
# eigens_kbz = self.interp_kpts(k.bz).eigens
# self._cache_eigens(kmesh, is_shift, eigens_kbz, "bz")
eigens_kbz = eigens_kbz - e0
g_skb = gaussian(eigens_kbz, width)
# eigens_kbz = eigens_kbz - e0
# g_skb = gaussian(eigens_kbz, width)
# TODO: One could reduce the sum to IBZ(q) with appropriate weight.
nest_sq = np.zeros((self.nsppol, len(qpoints)))
for iq, qpt in enumerate(qpoints):
kpq_bz = kbz + qpt
eigens_kqbz = self.interp_kpts(kpq_bz).eigens - e0
g_skqb = gaussian(eigens_kqbz, width)
vals = g_skb * g_skqb
nest_sq[:, iq] = vals.sum(axis=(1, 2))
# # TODO: One could reduce the sum to IBZ(q) with appropriate weight.
# nest_sq = np.zeros((self.nsppol, len(qpoints)))
# for iq, qpt in enumerate(qpoints):
# kpq_bz = kbz + qpt
# eigens_kqbz = self.interp_kpts(kpq_bz).eigens - e0
# g_skqb = gaussian(eigens_kqbz, width)
# vals = g_skb * g_skqb
# nest_sq[:, iq] = vals.sum(axis=(1, 2))
nest_sq *= 1. / k.nbz
return nest_sq
# nest_sq *= 1. / k.nbz
# return nest_sq
def _get_wmesh_step(self, eigens, wmesh, step):
if wmesh is not None:
@ -467,112 +467,112 @@ class ElectronInterpolator(metaclass=abc.ABCMeta):
return fig
@add_fig_kwargs
def plot_jdosq0_vs_kmeshes(self, kmeshes, is_shift=None, method="gaussian", step=0.1, width=0.2,
ax=None, fontsize=12, **kwargs):
"""
Plot (interpolated) Joint DOSes at q=0 computed with different meshes.
#@add_fig_kwargs
#def plot_jdosq0_vs_kmeshes(self, kmeshes, is_shift=None, method="gaussian", step=0.1, width=0.2,
# ax=None, fontsize=12, **kwargs):
# """
# Plot (interpolated) Joint DOSes at q=0 computed with different meshes.
Args:
kmeshes: List of kmeshes. Each item is given by three integers with the number of
divisions along the reciprocal primitive axes.
is_shift: three integers (spglib API). If None, the kmesh is shifted along
the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
ax: |matplotlib-Axes| or None if a new figure should be created.
fontsize: Legend and title fontsize.
# Args:
# kmeshes: List of kmeshes. Each item is given by three integers with the number of
# divisions along the reciprocal primitive axes.
# is_shift: three integers (spglib API). If None, the kmesh is shifted along
# the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
# method: String defining the method for the computation of the DOS.
# step: Energy step (eV) of the linear mesh.
# width: Standard deviation (eV) of the gaussian.
# ax: |matplotlib-Axes| or None if a new figure should be created.
# fontsize: Legend and title fontsize.
Returns: |matplotlib-Figure|
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
kmeshes = np.reshape(np.asarray(kmeshes, dtype=np.int), (-1, 3))
for kmesh in kmeshes:
jdos = self.get_jdos_q0(kmesh, is_shift=is_shift, method=method, step=step, width=width)
for spin in range(self.nsppol):
spin_sign = +1 if spin == 0 else -1
ax.plot(jdos.mesh, jdos.values[spin] * spin_sign, label=str(kmesh) if spin == 0 else None)
# Returns: |matplotlib-Figure|
# """
# ax, fig, plt = get_ax_fig_plt(ax=ax)
# kmeshes = np.reshape(np.asarray(kmeshes, dtype=np.int), (-1, 3))
# for kmesh in kmeshes:
# jdos = self.get_jdos_q0(kmesh, is_shift=is_shift, method=method, step=step, width=width)
# for spin in range(self.nsppol):
# spin_sign = +1 if spin == 0 else -1
# ax.plot(jdos.mesh, jdos.values[spin] * spin_sign, label=str(kmesh) if spin == 0 else None)
ax.grid(True)
ax.set_xlabel("Energy (eV)")
ax.set_ylabel('JDOS (states/eV)')
ax.legend(loc="best", fontsize=fontsize, shadow=True)
# ax.grid(True)
# ax.set_xlabel("Energy (eV)")
# ax.set_ylabel('JDOS (states/eV)')
# ax.legend(loc="best", fontsize=fontsize, shadow=True)
return fig
# return fig
@add_fig_kwargs
def plot_nesting_vs_widths(self, widths, kmesh, e0=None, qvertices_names=None,
line_density=20, is_shift=None, ax=None, fontsize=12, **kwargs):
"""
Plot (interpolated) nesting factor computed with different broadening.
#@add_fig_kwargs
#def plot_nesting_vs_widths(self, widths, kmesh, e0=None, qvertices_names=None,
# line_density=20, is_shift=None, ax=None, fontsize=12, **kwargs):
# """
# Plot (interpolated) nesting factor computed with different broadening.
Args:
widths: List of standard deviations (eV) of the gaussian.
kmeshes: List of kmeshes. Each item is given by three integers with the number of
divisions along the reciprocal primitive axes.
e0: Energy level in eV.
qvertices_names
line_density:
is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
ax: |matplotlib-Axes| or None if a new figure should be created.
fontsize: Legend and title fontsize.
# Args:
# widths: List of standard deviations (eV) of the gaussian.
# kmeshes: List of kmeshes. Each item is given by three integers with the number of
# divisions along the reciprocal primitive axes.
# e0: Energy level in eV.
# qvertices_names
# line_density:
# is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
# the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
# ax: |matplotlib-Axes| or None if a new figure should be created.
# fontsize: Legend and title fontsize.
Returns: |matplotlib-Figure|
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
qpoints = self._get_kpts_kticks_klabels(ax, qvertices_names, line_density)
# Returns: |matplotlib-Figure|
# """
# ax, fig, plt = get_ax_fig_plt(ax=ax)
# qpoints = self._get_kpts_kticks_klabels(ax, qvertices_names, line_density)
e0 = self.interpolated_fermie if e0 is None else e0
for width in np.asarray(widths):
nest_sq = self.get_nesting_at_e0(qpoints, kmesh, e0, width=width, is_shift=is_shift)
for spin in range(self.nsppol):
spin_sign = +1 if spin == 0 else -1
ax.plot(nest_sq[spin] * spin_sign, label=str(kmesh) if spin == 0 else None)
# e0 = self.interpolated_fermie if e0 is None else e0
# for width in np.asarray(widths):
# nest_sq = self.get_nesting_at_e0(qpoints, kmesh, e0, width=width, is_shift=is_shift)
# for spin in range(self.nsppol):
# spin_sign = +1 if spin == 0 else -1
# ax.plot(nest_sq[spin] * spin_sign, label=str(kmesh) if spin == 0 else None)
ax.grid(True)
ax.set_ylabel('Nesting factor')
ax.legend(loc="best", fontsize=fontsize, shadow=True)
# ax.grid(True)
# ax.set_ylabel('Nesting factor')
# ax.legend(loc="best", fontsize=fontsize, shadow=True)
return fig
# return fig
@add_fig_kwargs
def plot_nesting_vs_kmeshes(self, width, kmeshes, e0=None, qvertices_names=None, line_density=20,
is_shift=None, ax=None, fontsize=12, **kwargs):
"""
Plot (interpolated) nesting factor computed with different k-meshes.
#@add_fig_kwargs
#def plot_nesting_vs_kmeshes(self, width, kmeshes, e0=None, qvertices_names=None, line_density=20,
# is_shift=None, ax=None, fontsize=12, **kwargs):
# """
# Plot (interpolated) nesting factor computed with different k-meshes.
Args:
width: Gaussian broadening (eV).
kmeshes: List of kmeshes. Each item is given by three integers with the number of
divisions along the reciprocal primitive axes.
e0: Energy level in eV.
qvertices_names
line_density:
is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
ax: |matplotlib-Axes| or None if a new figure should be created.
fontsize: legend and title fontsize.
# Args:
# width: Gaussian broadening (eV).
# kmeshes: List of kmeshes. Each item is given by three integers with the number of
# divisions along the reciprocal primitive axes.
# e0: Energy level in eV.
# qvertices_names
# line_density:
# is_shift: three integers (spglib API). When is_shift is not None, the kmesh is shifted along
# the axis in half of adjacent mesh points irrespective of the mesh numbers. None means unshited mesh.
# ax: |matplotlib-Axes| or None if a new figure should be created.
# fontsize: legend and title fontsize.
Returns: |matplotlib-Figure|
"""
ax, fig, plt = get_ax_fig_plt(ax=ax)
qpoints = self._get_kpts_kticks_klabels(ax, qvertices_names, line_density)
# Returns: |matplotlib-Figure|
# """
# ax, fig, plt = get_ax_fig_plt(ax=ax)
# qpoints = self._get_kpts_kticks_klabels(ax, qvertices_names, line_density)
kmeshes = np.reshape(np.asarray(kmeshes, dtype=np.int), (-1, 3))
e0 = self.interpolated_fermie if e0 is None else e0
for kmesh in kmeshes:
nest_sq = self.get_nesting_at_e0(qpoints, kmesh, e0, width=width, is_shift=is_shift)
for spin in range(self.nsppol):
spin_sign = +1 if spin == 0 else -1
ax.plot(nest_sq[spin] * spin_sign, label=str(kmesh) if spin == 0 else None)
# kmeshes = np.reshape(np.asarray(kmeshes, dtype=np.int), (-1, 3))
# e0 = self.interpolated_fermie if e0 is None else e0
# for kmesh in kmeshes:
# nest_sq = self.get_nesting_at_e0(qpoints, kmesh, e0, width=width, is_shift=is_shift)
# for spin in range(self.nsppol):
# spin_sign = +1 if spin == 0 else -1
# ax.plot(nest_sq[spin] * spin_sign, label=str(kmesh) if spin == 0 else None)
ax.grid(True)
ax.set_ylabel('Nesting factor')
ax.legend(loc="best", fontsize=fontsize, shadow=True)
# ax.grid(True)
# ax.set_ylabel('Nesting factor')
# ax.legend(loc="best", fontsize=fontsize, shadow=True)
return fig
# return fig
#@add_fig_kwargs
#def plot_group_velocites(self, vertices_names=None, line_density=20, ax=None, **kwargs):

View File

@ -62,7 +62,7 @@ class TestSkwInterpolator(AbipyTest):
# Test interpolation routines (high-level API).
edos = skw.get_edos(kmesh, is_shift=None, method="gaussian", step=0.1, width=0.2, wmesh=None)
jdos = skw.get_jdos_q0(kmesh, is_shift=None, method="gaussian", step=0.1, width=0.2, wmesh=None)
#jdos = skw.get_jdos_q0(kmesh, is_shift=None, method="gaussian", step=0.1, width=0.2, wmesh=None)
#nest = skw.get_nesting_at_e0(qpoints, kmesh, e0, width=0.2, is_shift=None)
# Test pickle
@ -76,7 +76,7 @@ class TestSkwInterpolator(AbipyTest):
widths = [0.2, 0.3]
is_shift = None
assert skw.plot_dos_vs_kmeshes(kmeshes, is_shift=is_shift, show=False)
assert skw.plot_jdosq0_vs_kmeshes(kmeshes, is_shift=is_shift, show=False)
#assert skw.plot_jdosq0_vs_kmeshes(kmeshes, is_shift=is_shift, show=False)
#assert skw.plot_nesting_vs_widths(widths=widths, kmesh=[4, 4, 4], e0=None, qvertices_names=None,
# line_density=5, is_shift=is_shift, show=False)

View File

@ -17,6 +17,7 @@ import abipy.flowtk as flowtk
def build_flow(options):
# Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
if not options.workdir:
__file__ = os.path.join(os.getcwd(), "run_eph_al.py")
options.workdir = os.path.basename(__file__).replace(".py", "").replace("run_", "flow_")
# Preparatory run for E-PH calculations.
@ -43,20 +44,17 @@ def build_flow(options):
gs_inp = abilab.AbinitInput(structure, pseudos)
gs_inp.set_vars(
istwfk="*1",
ecut=8.0,
nband=5,
occopt=7, # include metallic occupation function with a small smearing
tsmear=0.04,
tolvrs=1e-7,
#timeopt=-1,
)
# The kpoint grid is minimalistic to keep the calculation manageable.
gs_inp.set_kmesh(
ngkpt=[8, 8, 8],
shiftk=[0.0, 0.0, 0.0],
#kptopt=3,
)
# Phonon calculation with 4x4x4
@ -76,10 +74,10 @@ def build_flow(options):
# Build input file for E-PH run.
eph_inp = gs_inp.new_with_vars(
optdriver=7,
ddb_ngqpt=ddb_ngqpt, # q-mesh used to produce the DDB file (must be consistent with DDB data)
eph_intmeth=2, # Tetra method
eph_fsewin="0.8 eV", # Energy window around Ef
eph_mustar=0.12, # mustar parameter
ddb_ngqpt=ddb_ngqpt, # q-mesh used to produce the DDB file (must be consistent with DDB data)
eph_intmeth=2, # Tetra method
eph_fsewin="0.8 eV", # Energy window around Ef
eph_mustar=0.12, # mustar parameter
eph_ngqpt_fine=eph_ngqpt_fine, # Interpolate DFPT potentials if != ddb_ngqpt
)
@ -98,11 +96,12 @@ def build_flow(options):
# This block generates the thumbnails in the Abipy gallery.
# You can safely REMOVE this part if you are using this script for production runs.
if os.getenv("GENERATE_SPHINX_GALLERY", False):
if os.getenv("READTHEDOCS", False):
__name__ = None
import tempfile
options = flowtk.build_flow_main_parser().parse_args(["-w", tempfile.mkdtemp()])
build_flow(options).plot_networkx(tight_layout=True)
build_flow(options).graphviz_imshow()
#build_flow(options).plot_networkx(tight_layout=True)
@flowtk.flow_main

View File

@ -2382,7 +2382,7 @@ class Task(Node, metaclass=abc.ABCMeta):
self.history.info("Removed files: %s" % paths)
return paths
def setup(self): # flake8: noqa
def setup(self): # noqa: E731
"""Base class does not provide any hook."""
#@check_spectator

View File

@ -6,9 +6,9 @@ import numpy as np
import pandas as pd
from pymatgen.core.tensors import Tensor, SquareTensor
from pymatgen.analysis.elasticity.elastic import ElasticTensor # flake8: noqa
from pymatgen.analysis.elasticity.elastic import ElasticTensor # noqa: F401
from pymatgen.analysis.elasticity.stress import Stress as pmg_Stress
from pymatgen.analysis.piezo import PiezoTensor # flake8: noqa
from pymatgen.analysis.piezo import PiezoTensor # noqa: F401
from abipy.iotools import ETSF_Reader

View File

@ -1,114 +0,0 @@
#!/bin/bash
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import copy
from matplotlib.colors import LogNorm
class Hexc(object):
def __init__(self, nsppol, nkpt, nval, ncond, hsize, ham, kpt, vcks2t,lomo, homo,lumo,humo,diag):
self.nsppol = nsppol
self.nkpt = nkpt
self.nval = nval
self.ncond = ncond
self.hsize = hsize
self.ham = ham
self.vcks2t = vcks2t
self.kpt = kpt
self.lomo = lomo
self.lumo = lumo
self.homo = homo
self.humo = humo
self.diag = diag
@classmethod
def from_file(cls,filename):
root = nc.Dataset(filename, 'r')
nsppol = len(root.dimensions['number_of_spins'])
nkpt = len(root.dimensions['number_of_kpoints'])
nval = len(root.dimensions['max_number_of_valence_bands'])
ncond = len(root.dimensions['max_number_of_conduction_bands'])
hsize = len(root.dimensions['total_number_of_transitions'])
kpt = root.variables['reduced_coordinates_of_kpoints'][:,:] # Nkpt, 3
ham = root.variables['hamiltonian'][:,:,:] # Itp, It, [0,1]
ham_c = np.vectorize(complex)(ham[:,:,0],ham[:,:,1])
vcks2t = root.variables['vcks2t'][:,:,:,:] # S, K, C, V
lomo = root.variables['lomo'][:]
homo = root.variables['homo'][:]
lumo = root.variables['lumo'][:]
humo = root.variables['humo'][:]
diag = root.variables['diagonal'][:,:]
diag_c = np.vectorize(complex)(diag[:,0],diag[:,1])
return cls(nsppol, nkpt, nval, ncond, hsize, ham_c, kpt, vcks2t, lomo, homo, lumo, humo, diag_c)
def plot(self,ic=None,iv=None,icp=None,ivp=None,list_ikpt=None, title=None):
fullmat = None
chk_bands = (ic is not None and iv is not None and icp is not None and ivp is not None)
if not chk_bands:
fullmat = self.ham
else:
fullmat = np.zeros((self.nkpt,self.nkpt),dtype=np.complex)
for is1 in np.arange(self.nsppol):
for is2 in np.arange(self.nsppol):
for ik in np.arange(self.nkpt):
for ikp in np.arange(self.nkpt):
it = self.vcks2t[is1,ik,ic-self.lumo[is1],iv-self.lomo[is1]]-1
itp = self.vcks2t[is2,ikp,icp-self.lumo[is2],ivp-self.lomo[is2]]-1
fullmat[ik,ikp] = self.ham[it,itp] # Fortran -> python
mymat = None
if list_ikpt is None:
mymat = fullmat
else:
nkpt2 = len(list_ikpt)
mymat = np.zeros((nkpt2,nkpt2),dtype=np.complex)
if chk_bands:
for ik1 in np.arange(nkpt2):
for ik2 in np.arange(nkpt2):
mymat[ik1,ik2] = fullmat[list_ikpt[ik1],list_ikpt[ik2]]
else:
mymat = np.zeros((self.nval*self.ncond*nkpt2,self.nval*self.ncond*nkpt2))
it2 = 0
for is1 in np.arange(self.nsppol):
for ik in np.arange(nkpt2):
for ic in np.arange(self.lumo,self.humo+1):
for iv in np.arange(self.lomo,self.homo+1):
itp2 = 0
for is2 in np.arange(self.nsppol):
for ikp in np.arange(nkpt2):
for icp in np.arange(self.lumo,self.humo+1):
for ivp in np.arange(self.lomo,self.homo+1):
it = self.vcks2t[is1,list_ikpt[ik],ic-self.lumo[is1],iv-self.lomo[is1]]-1
itp = self.vcks2t[is2,list_ikpt[ikp],icp-self.lumo[is2],ivp-self.lomo[is2]]-1
mymat[it2,itp2] = self.ham[it,itp] # Fortran -> python
itp2 += 1
it2 += 1
fig = plt.figure()
imgplot = plt.imshow(np.abs(mymat)*self.nkpt,norm=LogNorm(vmin=1e-5,vmax=10),interpolation='none')
if title is not None:
plt.title(title)
plt.colorbar()
def get_filtered_kpt(self,xval=None,yval=None,zval=None):
list_ikpt = []
for ikpt in np.arange(self.nkpt):
if ( (xval is None or np.abs(self.kpt[ikpt,0]-xval) < 1e-8)
and (yval is None or np.abs(self.kpt[ikpt,1]-yval) < 1e-8)
and (zval is None or np.abs(self.kpt[ikpt,2]-zval) < 1e-8)):
list_ikpt.append(ikpt)
return list_ikpt

View File

@ -1,51 +0,0 @@
#!/bin/bash
import netCDF4 as nc
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import copy
from hexc import Hexc
if __name__ == '__main__':
ham2 = Hexc.from_file('t31o_DS3_HEXC.nc')
ham3 = Hexc.from_file('t31o_DS5_HEXC.nc')
ham4 = Hexc.from_file('t32o_DS1_HEXC_I.nc')
ham5 = Hexc.from_file('t33o_DS1_HEXC_I.nc')
ham6 = Hexc.from_file('t34o_DS1_HEXC_I.nc')
list_ikpt2 = None
list_ikpt3 = None
list_ikpt4 = None
list_ikpt5 = None
list_ikpt6 = None
#yval = 0.06
#zval = 0.065
#list_ikpt2 = ham2.get_filtered_kpt(yval=yval,zval=zval)
#list_ikpt3 = ham3.get_filtered_kpt(yval=yval,zval=zval)
#list_ikpt4 = ham4.get_filtered_kpt(yval=yval,zval=zval)
#list_ikpt5 = ham5.get_filtered_kpt(yval=yval,zval=zval)
#list_ikpt6 = ham6.get_filtered_kpt(yval=yval,zval=zval)
ham2.ham[:,:] = ham2.ham[:,:] - np.diag(ham2.diag[:])
ham3.ham[:,:] = ham3.ham[:,:] - np.diag(ham3.diag[:])
icval = 4
ivval = 1
icpval = 4
ivpval = 1
ham2.plot(ic=icval,iv=ivval,icp=icpval,ivp=ivpval,list_ikpt=list_ikpt2,title='Coarse')
ham3.plot(ic=icval,iv=ivval,icp=icpval,ivp=ivpval,list_ikpt=list_ikpt3,title='Dense')
ham4.plot(ic=icval,iv=ivval,icp=icpval,ivp=ivpval,list_ikpt=list_ikpt4,title='M1')
ham5.plot(ic=icval,iv=ivval,icp=icpval,ivp=ivpval,list_ikpt=list_ikpt5,title='M2')
ham6.plot(ic=icval,iv=ivval,icp=icpval,ivp=ivpval,list_ikpt=list_ikpt6,title='M3')
plt.show()

View File

@ -1,147 +0,0 @@
"""
This module installs a hook for the built-in open so that one can detect if there are
files that have not been closed. To install the hook, import the module in the script
and call install. Example
import open_hook
# Install the hook
open_hook.install()
# Show open files.
open_hook.print_open_files()
# Remove the hook
open_hook.remove()
Initial version taken from
http://stackoverflow.com/questions/2023608/check-what-files-are-open-in-python
"""
from __future__ import print_function, division, unicode_literals
import sys
try:
import __builtin__
except ImportError:
# Py3k
import builtins as __builtin__
# Save the builtin version (do not change!)
_builtin_file = __builtin__.file
_builtin_open = __builtin__.open
# Global variables used to control the logging volume and the log file.
_VERBOSE = 1
_LOGFILE = sys.stdout
def set_options(**kwargs):
"""Set the value of verbose and logfile."""
verbose = kwargs.get("verbose", None)
if verbose is not None:
_VERBOSE = verbose
logfile = kwargs.get("logfile", None)
if logfile is not None:
_LOGFILE = logfile
# Set of files that have been opened in the python code.
_openfiles = set()
def print_open_files(file=sys.stdout):
"""Print the list of the files that are still open."""
print("### %d OPEN FILES: [%s]" % (len(_openfiles), ", ".join(f._x for f in _openfiles)), file=file)
def num_openfiles():
"""Number of files in use."""
return len(_openfiles)
def clear_openfiles():
"""Reinitialize the set of open files."""
_openfiles = set()
class _newfile(_builtin_file):
def __init__(self, *args, **kwargs):
self._x = args[0]
if _VERBOSE:
print("### OPENING %s ###" % str(self._x), file=_LOGFILE)
_builtin_file.__init__(self, *args, **kwargs)
_openfiles.add(self)
def close(self):
if _VERBOSE:
print("### CLOSING %s ###" % str(self._x), file=_LOGFILE)
_builtin_file.close(self)
try:
_openfiles.remove(self)
except KeyError:
print("File %s is not in openfiles set" % self)
def _newopen(*args, **kwargs):
"""Replacement for python open."""
return _newfile(*args, **kwargs)
def install():
"""Install the hook."""
__builtin__.file = _newfile
__builtin__.open = _newopen
def remove():
"""Remove the hook."""
__builtin__.file = _builtin_file = __builtin__.file
__builtin__.open = _builtin_open = __builtin__.open
def test_open_hook():
"""Unit tests for open_hook"""
import unittest
import tempfile
py3k = False
try:
import __builtin__
except ImportError:
py3k = True
import builtins as __builtin__
@unittest.skipIf(py3k, "OpenHook not compatible with py3k")
class OpenHookTest(unittest.TestCase):
"""Test open_hook module."""
def setUp(self):
from abipy.tools import open_hook
self.open_hook = open_hook
self.open_hook.install()
def test_open(self):
"""Test if open_hook detects open files."""
self.assertEqual(self.open_hook.num_openfiles(), 0)
_, fname = tempfile.mkstemp()
fh = open(fname)
self.open_hook.print_open_files()
self.assertEqual(self.open_hook.num_openfiles(), 1)
def tearDown(self):
self.open_hook.remove()
# Here we should have the __builtin__ version
self.assertTrue(file is __builtin__.file)
self.assertTrue(open is __builtin__.open)
if __name__ == "__main__":
import unittest
unittest.main()

View File

@ -13,20 +13,20 @@ universal=1
[nosetests]
verbosity=0
with-doctest=1
exclude-dir=abipy/gui
exclude-dir=abipy/gui
[pycodestyle]
count = True
max-line-length = 130
statistics = True
ignore = E116,E121,E122,E123,E124,E126,E128,E129,E131,E133,E201,E203,E226,E231,E241,E242,E261,E262,E265,E266,E306,E401,E402,E704,W503,W504,W505,E701,E702,E731,E741,W605
exclude= .git,__pycache__,abipy/gui,abipy/abio/abivar_database,dev_scripts,docs,.ipynb_checkpoints,abipy/examples/flows/develop/,test_*.py,abipy/benchmarks,abipy/data/refs
ignore = E114,E116,E121,E122,E123,E124,E126,E127,E128,E129,E131,E133,E201,E203,E226,E231,E241,E242,E261,E262,E265,E266,E306,E401,E402,E704,W503,W504,W505,E701,E702,E731,E741,W605
exclude= .git,__pycache__,abipy/gui,abipy/abio/abivar_database,dev_scripts,docs,.ipynb_checkpoints,abipy/examples/flows/develop/,test_*.py,,abipy/data/refs
[flake8]
# max-complexity = 10
max-line-length = 130
exclude= .git,__pycache__,abipy/gui,abipy/abio/abivar_database,dev_scripts,docs,.ipynb_checkpoints,abipy/examples/flows/develop/,test_*.py,abipy/benchmarks,abipy/data/refs,abilab.py
extend_ignore = E116,E121,E122,E123,E124,E126,E128,E129,E131,E133,E201,E203,E226,E231,E241,E242,E261,E262,E265,E266,E306,E401,E402,E704,W503,W504,W505,E701,E702,E731,E741,W605,F841
extend_ignore = E114,E116,E121,E122,E123,E124,E126,E127,E128,E129,E131,E133,E201,E203,E226,E231,E241,E242,E261,E262,E265,E266,E306,E401,E402,E704,W503,W504,W505,E701,E702,E731,E741,W605,F841
[pydocstyle]
ignore = D105,D2,D4