Add GWR scripts

This commit is contained in:
gmatteo 2023-08-05 16:52:08 +02:00
parent 5b5c47737c
commit 8b7caf7e2a
29 changed files with 551 additions and 235 deletions

View File

@ -12,7 +12,6 @@ class ChangeEnumStr:
class RUNL(ChangeEnumStr, IntEnum):
"""
Values of optdriver corresponding to the different run-levels. See defs_basis.F90
"""
@ -47,7 +46,9 @@ class WFK_TASK(ChangeEnumStr, Enum):
class GWR_TASK(ChangeEnumStr, Enum): # StrEnum added in 3.11
"""String """
"""
String flags defining the task to be performed in the GWR code.
"""
HDIAGO = "HDIAGO"
HDIAGO_FULL = "HDIAGO_FULL"
CC4S = "CC4S"
@ -59,3 +60,4 @@ class GWR_TASK(ChangeEnumStr, Enum): # StrEnum added in 3.11
G0EW = "G0EW"
RPA_ENERGY = "RPA_ENERGY"
GAMMA_GW = "GAMMA_GW"
CHI0 = "CHI0"

View File

@ -598,7 +598,7 @@ class AbinitInput(AbiAbstractInput, MSONable, Has_Structure):
if self.spell_check and not is_abivar(key):
raise self.Error("""
Cannot find variable `%s` in internal database. If you believe this is not a typo, use:
Cannot find variable `%s` in the internal database. If you believe this is not a typo, use:
input.set_spell_check(False)
@ -969,7 +969,7 @@ with the Abinit version you are using. Please contact the AbiPy developers.""" %
def set_cutoffs_for_accuracy(self, accuracy: str) -> dict:
"""
Set the value of ecut and pawcutdg (if PAW) using the hints reported in the pseudos.
Set the value of ecut and pawecutdg (if PAW) using the hints reported in the pseudos.
Raises ``AbinitInputError`` if pseudos do not provide hints.
In the case of PAW, pawecutdg is either taken from the hints or computed from the recommended
value of ecut using a scaling factor that depends on ``accuracy``.
@ -998,18 +998,46 @@ with the Abinit version you are using. Please contact the AbiPy developers.""" %
if has_paw: d.update(pawecutdg=pawecutdg)
return self.set_vars(**d)
#def set_auto_scf_nband(self, nsppol: int, nspinor: int, nspden: int,
# occopt: int, tsmear: float) -> dict:
# self._check_nsppol_nspinor(nsppol, nspinor)
def set_scf_nband_semicond(self, nsppol=1, nspinor=1, nspden=1, charge=0.0, spinat=None) -> dict:
"""
Set electronic pararameters, smearing options and compute ``nband`` for a GS-SCF run.
assuming a semiconductor. See set_scf_nband.
Return dict with variables.
"""
return self.set_scf_nband(nsppol=nsppol, nspinor=nspinor, nspden=nspden,
occopt=1, tsmear=0.0, charge=0.0, spinat=None)
# scf_electrons = aobj.Electrons(spin_mode=spin_mode, smearing=smearing, algorithm=scf_algorithm,
# charge=charge, nband=scf_nband, fband=None)
# _find_scf_nband(structure, pseudos, electrons, spinat=None)
# num_valence_electrons = self.structure.num_valence_electrons(pseudos)
# nband = num_valence_electrons // nsppol
# _, rest = divmod(nband, 4)
# nband += rest
# return self.set_vars(nsppol=nsppol, nspinor=nspinor, nsppden=nspden, nband=nband)
def set_scf_nband(self, nsppol: int, nspinor: int, nspden: int,
occopt: int, tsmear: float, charge: float, spinat) -> dict:
"""
Set electronic pararameters, smearing options and compute ``nband`` for a GS-SCF run.
Return dict with variables.
Args:
nsppol: Number of spins.
nspinor: Number of spinor components.
nspden: Number of spin density components.
occopt: Occoputation option.
tsmear: Electronic smearing.
charge: Extra charge.
spinat: If None and nsppol 2, spinat is automatically computed.
"""
self._check_nsppol_nspinor(nsppol, nspinor)
spin_mode = aobj.SpinMode(mode="test", nsppol=nsppol, nspinor=nspinor, nspden=nsppol)
smearing = aobj.Smearing(occopt, tsmear)
scf_electrons = aobj.Electrons(spin_mode=spin_mode, smearing=smearing, algorithm=None,
charge=charge, nband=None, fband=None)
if nsppol == 2 and spinat is None:
self.set_autospinat()
spinat = self["spinat"]
from abipy.abio.factories import _find_scf_nband
nband = _find_scf_nband(self.structure, self.pseudos, scf_electrons, spinat=spinat)
return self.set_vars(nsppol=nsppol, nspinor=nspinor, nspden=nspden,
occopt=occopt, tsmear=tsmear, charge=charge,
nband=nband, spinat=spinat)
def set_kmesh(self, ngkpt, shiftk, kptopt: int = 1) -> dict:
"""
@ -2419,16 +2447,21 @@ with the Abinit version you are using. Please contact the AbiPy developers.""" %
# return new
def make_gwr_qprange_input(self, gwr_ntau, nband, ecuteps, gw_qprange=0, **kwargs) -> AbinitInput:
def make_gwr_qprange_input(self, gwr_ntau, nband, ecuteps, gw_qprange=0, gwr_task=GWR_TASK.G0W0,
**kwargs) -> AbinitInput:
"""
Build and return an input file to compute QP corrections with the GWR code.
Args:
gw_qprange = 0 to Compute the QP corrections only for the fundamental and the direct gap.
gwr_ntau: Number of minimax points.
nband: Number of bands in Green's function
ecuteps: Cutoff energy for chi0
gw_qprange = 0 to compute the QP corrections only for the fundamental and the direct gap.
gwr_task: String defining the GWR task
"""
new = self.new_with_vars(
optdriver=RUNL.GWR,
gwr_task=GWR_TASK.G0W0,
gwr_task=gwr_task,
gwr_ntau=gwr_ntau,
nband=nband,
ecuteps=ecuteps,

View File

@ -15,7 +15,7 @@ import numpy as np
import pandas as pd
from collections import OrderedDict, deque
from typing import Callable, Union
from typing import Callable, Union, Any
from functools import wraps
from monty.string import is_string, list_strings
from monty.termcolor import cprint
@ -928,11 +928,8 @@ Expecting callable or attribute name or key in abifile.params""" % (type(hue), s
Example:
robot.plot_convergence("energy")
robot.plot_convergence("energy", sortby="nkpt")
robot.plot_convergence("pressure", sortby="nkpt", hue="tsmear")
robot.plot_convergence("pressure", sortby="nkpt", hue="tsmear", abs_conv=1e-3)
"""
if "marker" not in kwargs:

View File

@ -278,6 +278,16 @@ class TestAbinitInput(AbipyTest):
assert inp.pseudos_abivars["pseudos"] == f"{pseudo.filepath}"
# Test set_scf_nband.
d = inp.set_scf_nband(nsppol=1, nspinor=1, nspden=1,
occopt=1, tsmear=0.0, charge=0.0, spinat=None)
assert d["nband"] == 8
# Test set_scf_nband_semicond.
d = inp.set_scf_nband_semicond()
assert d["nband"] == 8
def test_new_with_structure(self):
"""Testing new_with_structure."""
si2_inp = AbinitInput(structure=abidata.cif_file("si.cif"), pseudos=abidata.pseudos("14si.pspnc"),

View File

@ -2636,14 +2636,26 @@ to build an appropriate supercell from partial occupancies or alternatively use
class StructDiff:
"""
Print difference between structures.
Print difference among structures.
"""
def __init__(self, labels: list[str], structures):
"""
Args:
labels: Labels associated to structures
structures: List of structures or objects that can be converted to structures.
"""
self.labels = labels
self.structs = [Structure.as_structure(s) for s in structures]
# Consistency check.
if len(self.labels) != len(self.structs):
raise ValueError("len(self.labels) != len(self.structs)")
if len(self.labels) != len(self.labels):
raise ValueError(f"Found duplicated entries in: {self.labels}")
natom = len(self.structs[0])
if any(len(s) != natom for s in self.structs):
raise ValueError(f"structures have different number of atoms!")
def del_label(self, label: str) -> None:
"""Remove entry associated to label."""
@ -2652,8 +2664,6 @@ class StructDiff:
del self.labels[il]
del self.structs[il]
#def summarize(self, file=sys.stdout) -> None:
def get_lattice_dataframe(self) -> pd.DataFrame:
"""
Build dataframe with lattice parameters.
@ -2681,6 +2691,7 @@ class StructDiff:
# shift_frac = site2.frac_coords - site1.frac_coords
d_list = []
natom = len(self.structs[0])
for isite in range(natom):
for label, structure in zip(self.labels, self.structs):
site = structure[isite]
@ -2696,7 +2707,13 @@ class StructDiff:
def tabulate(self, only_lattice=False, allow_rigid_shift=True, with_cart_coords=False, file=sys.stdout) -> None:
"""
Tabulate differences among structures.
Tabulate lattice parameters and atomic positions.
Args:
only_lattice:
allow_rigid_shift:
with_cart_coords:
file: Output stream
"""
# Compare lattices.
df = self.get_lattice_dataframe()
@ -2712,3 +2729,7 @@ class StructDiff:
print("\nAtomic sites (Ang units):", file=file)
print(df.to_string(), file=file)
print("", file=file)
#def diff(self, ref_label, with_cart_coords=False, file=sys.stdout) -> None:
# df = self.get_lattice_dataframe()
# df = self.get_sites_dataframe(with_cart_coords=with_cart_coords)

View File

@ -0,0 +1,37 @@
"""
Structures used in the VASP GWR paper
Table I of https://journals.aps.org/prb/pdf/10.1103/PhysRevB.75.235102
"""
from __future__ import annotations
from abipy.core.structure import Structure
# Chemical formula to (crystal_type, species, a_ang)
_SYMB_TO_DATA = {
"Si": ("zinc-blende", ("Si", "Si"), 5.430),
"GaAs": ("zinc-blende", ("Ga", "As"), 5.648),
"SiC": ("zinc-blende", ("Si", "C"), 4.350),
"ZnO": ("zinc-blende", ("Zn", "O"), 4.580),
"C": ("zinc-blende", ("C", "C"), 3.567),
"BN": ("zinc-blende", ("B", "N"), 3.615),
"MgO": ("rock-salt", ("Mg", "O"), 4.213),
"LiF": ("rock-salt", ("Li", "F"), 4.010),
}
def get_gwr_structure(symbol: str) -> Structure:
"""Return Structure from symbol"""
stype, species, a = _SYMB_TO_DATA[symbol]
if stype == "zinc-blende":
return Structure.zincblende(a, species)
if stype == "rock-salt":
return Structure.rocksalt(a, species)
raise ValueError(f"Invalid {stype=}")
if __name__ == "__main__":
import numpy as np
for symbol in _SYMB_TO_DATA:
structure = get_gwr_structure(symbol)
print("formula:", structure.formula, "a:", structure.lattice.abc[0] * np.sqrt(2))

View File

@ -19,10 +19,10 @@ from abipy.core.mixins import AbinitNcFile, Has_Structure, Has_ElectronBands, No
from abipy.iotools import ETSF_Reader
from abipy.tools import duck
from abipy.tools.typing import Figure, KptSelect
from abipy.tools.plotting import (ArrayPlotter, add_fig_kwargs, get_ax_fig_plt, get_axarray_fig_plt, Marker,
from abipy.tools.plotting import (add_fig_kwargs, get_ax_fig_plt, get_axarray_fig_plt, Marker,
set_axlims, set_ax_xylabels, set_visible, rotate_ticklabels, set_grid_legend, hspan_ax_line, Exposer)
from abipy.electrons.ebands import ElectronBands, RobotWithEbands
from abipy.electrons.gw import SelfEnergy, QPState, QPList, SigresFile, SigresRobot
from abipy.electrons.gw import SelfEnergy, QPState, QPList #, SigresFile, SigresRobot
from abipy.abio.robots import Robot
__all__ = [
@ -1303,7 +1303,6 @@ class TchimVsSus:
((0, 0, 0), (1, 0, 0)),
((1, 0, 0), (0, 1, 0)),
]
qpoint_list = [
[0, 0, 0],
[0.5, 0.5, 0],
@ -1312,7 +1311,6 @@ class TchimVsSus:
with TchimVsSus("runo_DS3_TCHIM.nc", "AW_CD/runo_DS3_SUS.nc") as o
o.expose_qpoints_gpairs(qpoint_list, gpairs, exposer="mpl")
"""
def __init__(self, tchim_filepath: str, sus_filepath: str):
"""
Args:
@ -1331,7 +1329,7 @@ class TchimVsSus:
Activated at the end of the with statement. It automatically closes the files.
"""
self.sus_file.close()
#self.sigres.close()
self.tchi_reader.close()
@add_fig_kwargs
def plot_qpoint_gpairs(self, qpoint, gpairs,
@ -1367,8 +1365,7 @@ class TchimVsSus:
def _find_g(gg, gvec):
for ig, g_sus in enumerate(gvec):
if all(gg == g_sus):
return ig
if all(gg == g_sus): return ig
else:
raise ValueError(f"Cannot find g-vector: {gg}")
@ -1456,6 +1453,10 @@ class TchimVsSus:
"""
Plot the Fourier components of the polarizability for a list of q-points and,
for each q-point, a list of (g, g') pairs.
qpoint_list: List of q-points to consider
gpairs: List of (g,g') pairs
exposer: "mpl" for matplotlib, "panel" for web interface.
"""
with Exposer.as_exposer(exposer) as e:
for qpoint in qpoint_list:

View File

@ -0,0 +1,73 @@
#!/usr/bin/env python
r"""
GWR flow with convergence studies
=================================
This script computes the irreducbile polarizability with GWR and the quartic scaling algorithm
(Adler-Wiser equation) using the same minimax mesh on the imagimary axis so
that one can compare the results
"""
import os
import sys
import abipy.data as data
import abipy.abilab as abilab
from abipy import flowtk
def build_flow(options):
# Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
if not options.workdir:
options.workdir = os.path.basename(sys.argv[0]).replace(".py", "").replace("run_","flow_")
from abipy.data.gwr_structures import get_gwr_structure
symbol = "Si"
structure = get_gwr_structure(symbol)
from abipy.flowtk.psrepos import get_repo_from_name
pseudos = get_repo_from_name("ONCVPSP-PBE-SR-PDv0.4").get_pseudos("stringent")
scf_input = abilab.AbinitInput(structure=structure, pseudos=pseudos)
scf_input.set_cutoffs_for_accuracy("normal")
ecut = scf_input["ecut"]
scf_input.set_scf_nband_semicond()
# Global variables.
scf_input.set_vars(
tolvrs=1e-8,
paral_kgb=1,
)
scf_input.set_kmesh(
ngkpt=[2, 2, 2],
#ngkpt=[8, 8, 8],
shiftk=[0.0, 0.0, 0.0], # IMPORTANT: k-grid for GWR must be Gamma-centered.
)
# Get max number of PWs.
dims, _ = scf_input.abiget_dims_spginfo()
mpw = dims["mpw"]
#print(f"{mpw=}")
flow = flowtk.Flow(workdir=options.workdir)
# GS-SCF run to get the DEN, followed by direct diago to obtain green_nband bands.
from abipy.flowtk.gwr_works import DirectDiagoWork, GWRChiCompareWork
green_nband = -1 # -1 this means full diago
diago_work = DirectDiagoWork.from_scf_input(scf_input, green_nband)
flow.register_work(diago_work)
work = GWRChiCompareWork.from_scf_input(scf_input, gwr_ntau=6, nband=int(mpw*0.2), ecuteps=4,
den_node=diago_work[0], wfk_node=diago_work[1])
flow.register_work(work)
return flow
@flowtk.flow_main
def main(options):
return build_flow(options)
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,91 @@
#!/usr/bin/env python
r"""
GWR flow with convergence studies
=================================
This script shows how to compute the G0W0 corrections in silicon.
More specifically, we build a flow to analyze the convergence of the QP corrections
wrt to the number of bands in the self-energy. More complicated convergence studies
can be implemented on the basis of this example.
"""
import os
import sys
import abipy.data as data
import abipy.abilab as abilab
from abipy import flowtk
def build_flow(options):
from abipy.data.gwr_structures import get_gwr_structure
symbol = "Si"
structure = get_gwr_structure(symbol)
# Working directory (default is the name of the script with '.py' removed and "run_" replaced by "flow_")
if not options.workdir:
options.workdir = os.path.basename(sys.argv[0]).replace(".py", "").replace("run_","flow_")
from abipy.flowtk.psrepos import get_repo_from_name
pseudos = get_repo_from_name("ONCVPSP-PBE-SR-PDv0.4").get_pseudos("stringent")
scf_input = abilab.AbinitInput(structure=structure, pseudos=pseudos)
scf_input.set_cutoffs_for_accuracy("normal")
ecut = scf_input["ecut"]
scf_input.set_scf_nband_semicond()
# Global variables.
scf_input.set_vars(
tolvrs=1e-8,
paral_kgb=1,
)
scf_input.set_kmesh(
ngkpt=[2, 2, 2],
#ngkpt=[8, 8, 8],
shiftk=[0.0, 0.0, 0.0], # IMPORTANT: k-grid for GWR must be Gamma-centered.
)
# Get max number of PWs.
dims, _ = scf_input.abiget_dims_spginfo()
mpw = dims["mpw"]
flow = flowtk.Flow(workdir=options.workdir)
# GS-SCF run to get the DEN, followed by direct diago to obtain green_nband bands.
from abipy.flowtk.gwr_works import DirectDiagoWork, GWRSigmaConvWork
green_nband = -1 # -1 this means full diago
diago_work = DirectDiagoWork.from_scf_input(scf_input, green_nband)
flow.register_work(diago_work)
# Build template for GWR.
ecuteps = 12
gwr_template = scf_input.make_gwr_qprange_input(gwr_ntau=6, nband=mpw * 0.9, ecuteps=ecuteps)
gwr_ntau_list = list(range(6, 34, 2))
gwr_ntau_list = [6, 8]
# 1) Change the value of one variable:
varname_values = ("gwr_ntau", gwr_ntau_list)
# or take the Cartesian product of two or more variables with e.g.:
#
varname_values = [
("gwr_ntau", gwr_ntau_list),
("userra", [0.0, 1e-6), # Compute QP corrections with/without regterm.
#("ecuteps", [4, 6]),
]
gwr_work = GWRSigmaConvWork.from_varname_values(
varname_values, gwr_template, den_node=diago_work[0], wfk_node=diago_work[1])
flow.register_work(gwr_work)
return flow
@flowtk.flow_main
def main(options):
return build_flow(options)
if __name__ == "__main__":
sys.exit(main())

View File

@ -22,32 +22,33 @@ def build_flow(options):
if not options.workdir:
options.workdir = os.path.basename(sys.argv[0]).replace(".py", "").replace("run_","flow_")
#from abipy.flowtk.psrepos import get_repo_from_name
#pseudos = get_repo_from_name("ONCVPSP-PBE-SR-PDv0.4").get_pseudos("stringent")
scf_input = abilab.AbinitInput(structure=data.cif_file("si.cif"),
pseudos=data.pseudos("14si.pspnc"))
# Global variables. gw_kmesh is used in all datasets except DATASET 1.
# Global variables.
scf_input.set_vars(
ecut=6,
tolvrs=1e-6,
tolvrs=1e-8,
nband=4,
paral_kgb=1,
)
# IMPORTANT: k-grid for GWR must be Gamma-centered.
scf_input.set_kmesh(
ngkpt=[2, 2, 2],
shiftk=[0.0, 0.0, 0.0],
shiftk=[0.0, 0.0, 0.0], # IMPORTANT: k-grid for GWR must be Gamma-centered.
)
flow = flowtk.Flow(workdir=options.workdir)
# GS-SCF to get the DEN, followed by direct diago to obtain green_nband bands.
from abipy.flowtk.gwr_works import DirectDiagoWork
green_nband = -1
# GS-SCF run to get the DEN, followed by direct diago to obtain green_nband bands.
from abipy.flowtk.gwr_works import DirectDiagoWork, GwrSigmaConvWork
green_nband = -1 # -1 this means full diago
diago_work = DirectDiagoWork.from_scf_input(scf_input, green_nband)
flow.register_work(diago_work)
# Build template for GWR
# Build template for GWR.
gwr_template = scf_input.make_gwr_qprange_input(gwr_ntau=6, nband=8, ecuteps=4)
# Two possibilities:
@ -56,16 +57,14 @@ def build_flow(options):
varname_values = ("nband", [8, 12, 14])
# or take the Cartesian product of two or more variables with e.g.:
#
#varname_values = [
# ("gwr_ntau", [6, 8]),
# ("ecuteps", [2, 4]),
#]
from abipy.flowtk.gwr_works import GwrSigmaConvWork
gwr_work = GwrSigmaConvWork.from_varname_values(
varname_values, gwr_template, den_node=diago_work[0], wfk_node=diago_work[1])
flow.register_work(gwr_work)
return flow
@ -86,69 +85,4 @@ def main(options):
if __name__ == "__main__":
sys.exit(main())
############################################################################
#
# Run the script with:
#
# run_si_gwr.py -s
#
# The last three tasks (``w0_t3``, ``w0_t4``, ``w0_t5``) are the SigmaTask who have produced
# a netcdf file with the GW results with different number of bands.
# We can check this with the command:
#
# abirun.py flow_si_g0w0/ listext SIGRES
#
# .. code-block:: bash
#
# Found 3 files with extension `SIGRES` produced by the flow
# File Size [Mb] Node_ID Node Class
# ---------------------------------------- ----------- --------- ------------
# flow_si_g0w0/w0/t3/outdata/out_SIGRES.nc 0.05 241325 SigmaTask
# flow_si_g0w0/w0/t4/outdata/out_SIGRES.nc 0.08 241326 SigmaTask
# flow_si_g0w0/w0/t5/outdata/out_SIGRES.nc 0.13 241327 SigmaTask
#
# Let's use the SIGRES robot to collect and analyze the results:
#
# abirun.py flow_si_g0w0/ robot SIGRES
#
# and then, inside the ipython terminal, type:
#
# .. code-block:: ipython
#
# In [1]: df = robot.get_dataframe()
# In [2]: df
# Out[2]:
# nsppol qpgap ecutwfn \
# flow_si_g0w0/w0/t3/outdata/out_SIGRES.nc 1 3.627960 5.914381651684836
# flow_si_g0w0/w0/t4/outdata/out_SIGRES.nc 1 3.531781 5.914381651684836
# flow_si_g0w0/w0/t5/outdata/out_SIGRES.nc 1 3.512285 5.914381651684836
#
# ecuteps \
# flow_si_g0w0/w0/t3/outdata/out_SIGRES.nc 3.6964885323070074
# flow_si_g0w0/w0/t4/outdata/out_SIGRES.nc 3.6964885323070074
# flow_si_g0w0/w0/t5/outdata/out_SIGRES.nc 3.6964885323070074
#
# ecutsigx scr_nband \
# flow_si_g0w0/w0/t3/outdata/out_SIGRES.nc 5.914381651684846 25
# flow_si_g0w0/w0/t4/outdata/out_SIGRES.nc 5.914381651684846 25
# flow_si_g0w0/w0/t5/outdata/out_SIGRES.nc 5.914381651684846 25
#
# sigma_nband gwcalctyp scissor_ene \
# flow_si_g0w0/w0/t3/outdata/out_SIGRES.nc 10 0 0.0
# flow_si_g0w0/w0/t4/outdata/out_SIGRES.nc 20 0 0.0
# flow_si_g0w0/w0/t5/outdata/out_SIGRES.nc 30 0 0.0
#
# nkibz
# flow_si_g0w0/w0/t3/outdata/out_SIGRES.nc 6
# flow_si_g0w0/w0/t4/outdata/out_SIGRES.nc 6
# flow_si_g0w0/w0/t5/outdata/out_SIGRES.nc 6
#
# In [3]: %matplotlib
# In [4]: df.plot("sigma_nband", "qpgap", marker="o")
#
# .. image:: https://github.com/abinit/abipy_assets/blob/master/run_si_g0w0.png?raw=true
# :alt: QP results in Si plotted vs the KS energy e0.
#
sys.exit(main())

View File

@ -1,15 +1,18 @@
# coding: utf-8
"""
Works and Flows for GWR calculations (GW with supercells).
NB: An Abinit build with Scalapack is required to run GWR.
"""
from __future__ import annotations
#import numpy as np
import os
from abipy.abio.inputs import AbinitInput, RUNL, GWR_TASK
from abipy.electrons.gwr import GwrRobot
from abipy.tools.iotools import make_executable
from .nodes import Node
from .tasks import TaskManager # ScfTask, NscfTask,
from .tasks import TaskManager
from .works import Work
@ -19,8 +22,6 @@ class DirectDiagoWork(Work):
using the density produced by a GS-SCF run and produces a WFK file with
empty states in the outdir of the second task.
NB: An Abinit build with Scalapack is required to run GWR.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: DirectDiagoWork
"""
@ -29,8 +30,10 @@ class DirectDiagoWork(Work):
def from_scf_input(cls, scf_input: AbinitInput, green_nband: int,
manager: TaskManager=None) -> DirectDiagoWork:
"""
Build object from an input representing a GS-SCF calculation.
Args:
scf_input: Input for the GS-SCF calculations.
scf_input: Input for the GS-SCF calculation.
green_nband: Number of bands to compute in the direct diagonalization.
A negative value activate full diagonalization with nband equal to
the number of PWs.
@ -45,24 +48,25 @@ class DirectDiagoWork(Work):
return work
class _BaseGwrWork(Work):
class _BaseGWRWork(Work):
@classmethod
def from_varname_values(cls, varname_values: tuple, gwr_template: AbinitInput,
den_node: Node, wfk_node: Node,
manager: TaskManager = None):
"""
Generate the work by changing the values of selected variables in a template for GWR calculations.
Args:
varname_values:
gwr_template:
den_node:
wfk_node:
manager:
varname_values: Tuple or List of tuples. See examples below.
gwr_template: GWR input to be used as template to generate other inputs.
den_node: The Node who produced the DEN file.
wfk_node: The Node who produced the WFK file.
manager: Abipy Task Manager.
Example:
varname_values = ("nband", [8, 12, 14])
Work.from_varname_values(varname_values, gwr_template, den_node, wfk_node)
or:
@ -81,14 +85,14 @@ class _BaseGwrWork(Work):
return work
class GwrSigmaConvWork(_BaseGwrWork):
class GWRSigmaConvWork(_BaseGWRWork):
"""
This work performs multiple QP calculations with the GWR code
and produces `xlsx` files in its `outdata` directory
with the QP results obtained with the different parameters.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: GwrSigmaConvWork
.. inheritance-diagram:: GWRSigmaConvWork
"""
def on_all_ok(self):
@ -98,22 +102,117 @@ class GwrSigmaConvWork(_BaseGwrWork):
gwr_files = self.get_all_outdata_files_with_ext("_GWR.nc")
with GwrRobot.from_files(gwr_files) as gwr_robot:
dirgaps_df = gwr_robot.get_dirgaps_dataframe()
dirgaps_df.to_excel(self.outdir.path_in("dirgaps.xlsx"))
dirgaps_df.to_csv(self.outdir.path_in("dirgaps.csv"))
qpdata_df = gwr_robot.get_dataframe()
qpdata_df.to_excel(self.outdir.path_in("qpdata.xlsx"))
qpdata_df.to_csv(self.outdir.path_in("qpdata.csv"))
with gwr_robot.get_pyscript(self.outdir.path_in("gwr_robot.py")) as script:
script.add_text("""
#robot.plot_selfenergy_conv(spin=0, kpoint=?, band=?, axis="wreal", sortby=None, hue=None)
#robot.plot_qpgaps_convergence(self, qp_kpoints="all", qp_type="qpz0", sortby=None, hue=None)
""")
return super().on_all_ok()
#class GwrRpaEneConvWork(_BaseGwrWork):
class GWRChiCompareWork(_BaseGWRWork):
"""
This work computes the irreducibile polarizability along the imaginary axis
using GWR and the quartic-scaling algorithm using the same minimax mesh
so that one can compare the two quantities.
.. rubric:: Inheritance Diagram
.. inheritance-diagram:: GwrChiCompareConvWork
"""
@classmethod
def from_scf_input(cls, scf_input: AbinitInput, gwr_ntau, nband, ecuteps,
den_node: Node, wfk_node: Node,
gwr_kwargs=None, scr_kwargs=None,
manager: TaskManager = None):
"""
Build Work from an input for GS-SCF calculation
Args:
scf_input: input for GS run.
gwr_ntau: Number of points in minimax mesh.
nband: Number of bands to build G and chi.
ecuteps: Cutoff energy for chi
den_node: The Node who produced the DEN file.
wfk_node: The Node who produced the WFK file.
gwr_kwargs: Extra kwargs used to build the GWR input.
scr_kwargs: Extra kwargs used to build the SCR input.
manager: Abipy Task Manager.
"""
gwr_input = scf_input.make_gwr_qprange_input(gwr_ntau=gwr_ntau, nband=nband,
ecuteps=ecuteps, gwr_task=GWR_TASK.CHI0)
gwr_input.set_vars(prtsuscep=1, iomode=3)
if gwr_kwargs is not None: gwr_input.set_vars(**gwr_kwargs)
chi_input = scf_input.new_with_vars(optdriver=3,
gwcalctyp=1, # Analytic continuation.
nfreqim=gwr_ntau,
ecuteps=ecuteps,
nband=nband,
prtsuscep=1,
userie=4242, # Magic number to use minimax mesh in the SCR driver.
)
if scr_kwargs is not None: chi_input.set_vars(**scr_kwargs)
work = cls(manager=manager)
work.register_scr_task(chi_input, deps={wfk_node: "WFK"})
work.register_gwr_task(gwr_input, deps={den_node: "DEN", wfk_node: "WFK"})
return work
def on_all_ok(self):
"""
Write python script to compare chi0 matrix elements.
"""
sus_path = self[0].outdir.path_in("out_SUS.nc")
tchi_path = self[1].outdir.path_in("out_TCHI.nc")
py_text = f"""
#!/usr/bin/env python
from abipy.electrons.gwr import TchimVsSus
o = TchimVsSus("{tchi_path}", "{sus_path}")
gpairs = [
((0, 0, 0), (0, 0, 0)),
((0, 0, 0), (1, 0, 0)),
((0, 0, 0), (2, 0, 0)),
((1, 0, 0), (1, 0, 0)),
((1, 0, 0), (0, 1, 0)),
#((0, 1, 0), (1, 0, 0)), # transpose of previous entry
#((2, 0, 0), (2, 0, 0)),
#((2, 0, 0), (-1, -1, 0)),
#((1, 0, 0), (-1, -1, 0)),
#((2, 1, 0), (-1, -1, 1)),
#((2, 1, -2), (-1, -1, 2)),
]
qpoint_list = [
[0, 0, 0],
[0.5, 0, 0],
]
import seaborn as sns
sns.set(context="paper", style='darkgrid', palette='deep',
font='sans-serif', font_scale=0.8, color_codes=False, rc=None)
o.expose_qpoints_gpairs(self, qpoint_list, gpairs, exposer="panel")
"""
py_path = os.path.join(self.workdir, "plot.py")
with open(py_path, "wt") as fh:
fh.write(py_text)
make_executable(py_path)
return super().on_all_ok()
#class GwrRpaEneConvWork(_BaseGWRWork):
# """
# This work computes the RPA energy for different number
# of points in the minimax mesh.
@ -125,6 +224,6 @@ class GwrSigmaConvWork(_BaseGwrWork):
# gwr_files = self.get_all_outdata_files_with_ext("_GWR.nc")
# with GwrRobot.from_files(gwr_files) as robot:
# df = robot.get_rpaene_dataframe(with_params=True)
# df.to_excel(self.outdir.path_in("rpaene.xlsx"))
# df.to_csv(self.outdir.path_in("rpaene.csv"))
#
# return super().on_all_ok()

View File

@ -1,10 +1,10 @@
"""
This module provides an API to deal with pseudopotential repositories.
A repo is essentially a collection of versioned pseudopotentials files installed within the same root directory.
A repo is a collection of versioned pseudopotentials files installed within the same root directory.
A repo has a unique name that encodes the XC functional, relativity type, the kind of pseudopotential and the version.
The default root is ~/.abinit/pseudos although it is possible to change it via the ABIPY_PSREPOS_ROOT env variable
The default root is $HOME/.abinit/pseudos although it is possible to change it via the ABIPY_PSREPOS_ROOT env variable
Note that all pseudos in a repo share the same XC functional, the type (NC, PAW) and the
Note that all pseudos in a repo share the same XC functional, the type e.g. NC or PAW and the
treatment of relativistic corrections although one might have multiple pseudos for the same element.
Due to this ambiguity, a repo cannot be directly used for running calculations in an automatic fashion
@ -178,7 +178,7 @@ class PseudosRepo(abc.ABC):
version: str, url: str):
"""
Args:
ps_generator: Name of the pseudopotential generator
ps_generator: Name of the pseudopotential generator.
xc_name: XC functional.
relativity_type: SR for scalar-relativistic or FR for fully relativistic.
project_name: Name of the project associated to this repository.
@ -297,11 +297,14 @@ class PseudosRepo(abc.ABC):
class OncvpspRepo(PseudosRepo):
"""
A repo containing ONCVPSP pseudopotentials.
"""
@classmethod
def from_github(cls, xc_name: str, relativity_type: str, version: str) -> OncvpspRepo:
"""
Build a OncvpsRepo assuming a github repository.
Build an OncvpsRepo from a github repository.
"""
ps_generator, project_name = "ONCVPSP", "PD"
@ -335,6 +338,9 @@ class OncvpspRepo(PseudosRepo):
]
def validate_checksums(self, verbose: int) -> None:
"""
Compare checksums given in the djson file with the ones computed from file after the donwload.
"""
print(f"\nValidating md5 checksums of {repr(self)}...")
djson_paths = [os.path.join(self.dirpath, jfile) for jfile in ("standard.djson", "stringent.djson")]
@ -394,6 +400,9 @@ class OncvpspRepo(PseudosRepo):
class JthRepo(PseudosRepo):
"""
A Repo containing JTH PAW pseudos.
"""
@classmethod
def from_abinit_website(cls, xc_name: str, relativity_type: str, version: str) -> JthRepo:
@ -466,7 +475,15 @@ def repo_from_name(repo_name: str) -> PseudosRepo:
def tabulate_repos(repos: list[PseudosRepo], exclude: Optional[list[str]] = None,
with_citations: bool = False, verbose: int = 0) -> str:
"""
Return string with info on a list of PseudosRepo.
Args:
repos: List of PseudosRepo
exclude: List of keys to exclude
with_citations: True if citations should be reported.
verbose: Verbosity level.
"""
bool2color = {True: "green", False: "red"}
rows = []

View File

@ -1,5 +1,5 @@
"""
Classes to perform ASE calculations with machine-learned potentials.
Objects to perform ASE calculations with machine-learned potentials.
"""
from __future__ import annotations
@ -77,18 +77,6 @@ def abisanitize_atoms(atoms: Atoms, **kwargs) -> Atoms:
return to_ase_atoms(get_atoms(new_structure), calc=atoms.calc)
#def get_structure(obj: Any) -> Structure:
# """Return abipy Structure from object."""
# if isinstance(obj, str):
# return Structure.from_file(obj)
# if isinstance(obj, PmgStructure):
# return obj
# if isinstance(obj, Atoms):
# return AseAtomsAdaptor.get_structure(obj, cls=Structure)
#
# raise TypeError(f"Don't know how to construct Atoms object from {type(obj)}")
def fix_atoms(atoms: Atoms, fix_inds: list[int], fix_symbols: list[str], verbose: int) -> None:
"""
Fix atoms by indices and by symbols.
@ -161,38 +149,6 @@ def print_atoms(atoms: Atoms, title=None, cart_forces=None, stream=sys.stdout) -
pf("\t", frac_coords, cart_forces[ia])
#def scompare_two_atoms(label1: str, atoms1: Atoms,
# label2: str, atoms2: Atoms) -> str:
# """Compare two atoms object, return string."""
# lines = []; app = lines.append
#
# d1 = dict(zip(_CELLPAR_KEYS, atoms1.cell.cellpar()))
# d1["label"] = label1
# d2 = dict(zip(_CELLPAR_KEYS, atoms2.cell.cellpar()))
# d2["label"] = label2
#
# cell_df = pd.DataFrame([d1, d2])
# app(str(cell_df))
#
# if len(atoms1) != len(atoms2):
# app("Atoms instances have different number of atoms!")
# return "\n".join(lines)
#
# # (natom, 3) arrays
# natom = len(atoms1)
# pos1, pos2 = atoms1.get_positions(), atoms2.get_positions()
# data = {}
# for i, key in enumerate(("x", "y", "z")):
# data[key] = np.vstack((pos1[:,i], pos2[:,i])).ravel('F')
#
# data["symbols"] = np.vstack((atoms1.symbols, atoms2.symbols)).ravel('F')
# data["atoms"] = np.vstack((0 * np.ones(natom, dtype=int), np.ones(natom, dtype=int))).ravel('F')
# pos_df = pd.DataFrame(data)
# app(pos_df.to_string())
#
# return "\n".join(lines)
def diff_two_structures(label1, structure1, label2, structure2, fmt, file=sys.stdout):
"""
Diff two structures using format `fmt`and print results to `file`.
@ -204,6 +160,34 @@ def diff_two_structures(label1, structure1, label2, structure2, fmt, file=sys.st
for l1, l2 in zip(lines1, lines2):
print(l1.ljust(pad), " | ", l2, file=file)
import enum
class _StrEnum(str, enum.Enum):
def __new__(cls, *args):
for arg in args:
if not isinstance(arg, (str, enum.auto)):
raise TypeError(
"Values of StrEnums must be strings: {} is a {}".format(
repr(arg), type(arg)
)
)
return super().__new__(cls, *args)
def __str__(self):
return self.value
# The first argument to this function is documented to be the name of the
# enum member, not `self`:
# https://docs.python.org/3.6/library/enum.html#using-automatic-values
def _generate_next_value_(name, *_):
return name
class RX_MODE(_StrEnum): # StrEnum added in 3.11
no = "no"
ions = "ions"
cell = "cell"
@dataclass
class AseResults:
@ -336,7 +320,7 @@ def dataframe_from_results_list(index: list, results_list: list[AseResults],
return df
def ase_optimizer_cls(s: str | None) -> Type | list[str]:
def ase_optimizer_cls(s: str | Optimizer) -> Type | list[str]:
"""
Return an ASE Optimizer subclass from string `s`.
If s == "__all__", return list with all Optimizer subclasses supported by ASE.
@ -359,14 +343,14 @@ def ase_optimizer_cls(s: str | None) -> Type | list[str]:
return getattr(optimize, s)
def relax_atoms(atoms: Atoms, relax_cell: bool, optimizer: str, fmax: float, pressure: float,
def relax_atoms(atoms: Atoms, relax_mode: str, optimizer: str, fmax: float, pressure: float,
verbose: int, steps: int = 500, opt_kwargs=None, traj_path=None, calculator=None):
"""
Relax atoms using an ASE calculator and ASE algorithms.
Args:
atoms: ASE atoms.
relax_cell: True to relax the lattice cell.
relax_mode:
optimizer: name of the ASE optimizer class to use
fmax: total force tolerance for relaxation convergence.
Here fmax is a sum of force and stress forces. Defaults to 0.1.
@ -377,10 +361,12 @@ def relax_atoms(atoms: Atoms, relax_cell: bool, optimizer: str, fmax: float, pre
traj_path:
calculator:
"""
#from ase import optimize
from ase.constraints import ExpCellFilter
from ase.io import read
if relax_mode == RX_MODE.no:
raise ValueError(f"Invalid {relax_mode:}")
opt_kwargs = opt_kwargs or {}
if traj_path is not None:
opt_kwargs["trajectory"] = str(traj_path)
@ -395,14 +381,14 @@ def relax_atoms(atoms: Atoms, relax_cell: bool, optimizer: str, fmax: float, pre
print(*args, file=stream, **kwargs)
with contextlib.redirect_stdout(stream):
pf(f"Relaxation parameters: fmax: {fmax}, relax_cell: {relax_cell}, steps: {steps}, optimizer: {optimizer}")
pf(f"Relaxation parameters: fmax: {fmax}, relax_mode: {relax_mode}, steps: {steps}, optimizer: {optimizer}")
if atoms.constraints:
pf(f"Number of constraints: {len(atoms.constraints)}")
for c in atoms.constraints:
pf("\t", c)
pf("")
dyn = opt_class(ExpCellFilter(atoms, scalar_pressure=pressure), **opt_kwargs) if relax_cell else \
dyn = opt_class(ExpCellFilter(atoms, scalar_pressure=pressure), **opt_kwargs) if relax_mode == RX_MODE.cell else \
opt_class(atoms, **opt_kwargs)
t_start = time.time()
@ -431,6 +417,8 @@ def silence_tensorflow() -> None:
class _MyCalculatorMixin:
"""
Add _delta_forces and _delta_stress attributes to an ASE calculator.
Extend `calculate` so that forces and stresses are corrected accordingly.
"""
def set_delta_forces(self, delta_forces):
"""F_Abinitio - F_ML"""
@ -452,35 +440,37 @@ class _MyCalculatorMixin:
properties: list | None = None,
system_changes: list | None = None,
):
"""
Perform calculation for an input Atoms.
"""
Perform calculation for an input Atoms.
Args:
atoms (ase.Atoms): ase Atoms object
properties (list): list of properties to calculate
system_changes (list): monitor which properties of atoms were
changed for new calculation. If not, the previous calculation
results will be loaded.
"""
super().calculate(atoms=atoms, properties=properties, system_changes=system_changes)
Args:
atoms (ase.Atoms): ase Atoms object
properties (list): list of properties to calculate
system_changes (list): monitor which properties of atoms were
changed for new calculation. If not, the previous calculation
results will be loaded.
"""
super().calculate(atoms=atoms, properties=properties, system_changes=system_changes)
forces = self.results["forces"]
delta_forces = self.get_delta_forces()
if delta_forces is not None:
forces += delta_forces
#print("Updating forces with delta_forces:\n", forces)
self.results.update(
forces=forces,
)
# Apply delta correction to forces.
forces = self.results["forces"]
delta_forces = self.get_delta_forces()
if delta_forces is not None:
forces += delta_forces
#print("Updating forces with delta_forces:\n", forces)
self.results.update(
forces=forces,
)
stress = self.results["stress"]
delta_stress = self.get_delta_stress()
if delta_stress is not None:
stress += delta_stress
#print("Updating stress with delta_stress:\n", stress)
self.results.update(
stress=stress,
)
# Apply delta correction to stress.
stress = self.results["stress"]
delta_stress = self.get_delta_stress()
if delta_stress is not None:
stress += delta_stress
#print("Updating stress with delta_stress:\n", stress)
self.results.update(
stress=stress,
)
class CalcBuilder:
@ -563,7 +553,11 @@ class _MlBase:
self.delta_forces = None
self.delta_stress = None
def set_delta_forces_stress_from_abiml_nc(self, filepath: str):
def set_delta_forces_stress_from_abiml_nc(self, filepath: str) -> None:
"""
Read ab-initio forces from a netcdf file produced by ABINIT and use
these values to set the delta corrections in the calculator.
"""
if os.path.basename(filepath) != "ABIML_RELAX_IN.nc": return
# Read structure, forces and stresses from the nc file produced by ABINIT.
@ -581,7 +575,8 @@ class _MlBase:
# Set delta forces/stresses so that the next invokation to get_calculator include the deltas
self.set_delta_forces_stress(abi_forces - ml_forces, abi_stress - ml_stress)
def set_delta_forces_stress(self, delta_forces, delta_stress):
def set_delta_forces_stress(self, delta_forces, delta_stress) -> None:
"""Set the value of the delta corrections."""
self.delta_forces = delta_forces
self.delta_stress = delta_stress
@ -594,11 +589,11 @@ class _MlBase:
calc = self.calc_builder.get_calculator()
if self.delta_forces is not None:
print("delta_forces:\n", self.delta_forces)
print("Setting delta_forces:\n", self.delta_forces)
calc.set_delta_forces(self.delta_forces)
if self.delta_stress is not None:
print("delta_stress:\n", self.delta_stress)
ml_calc.set_delta_stress(self.delta_stress)
print("Setting delta_stress:\n", self.delta_stress)
calc.set_delta_stress(self.delta_stress)
return calc
@ -761,7 +756,7 @@ class MlRelaxer(_MlBase):
print(f"Relaxing structure with relax mode: {self.relax_mode} ...")
relax_kws = dict(calculator=self.atoms.calc,
optimizer=self.optimizer,
relax_cell={"ions": False, "cell": True}[self.relax_mode],
relax_mode=self.relax_mode,
fmax=self.fmax,
pressure=self.pressure,
steps=self.steps,
@ -1009,10 +1004,10 @@ class MlNeb(_MlNebBase):
workdir = self.workdir
initial_atoms, final_atoms = self.initial_atoms, self.final_atoms
if self.relax_mode != "no":
if self.relax_mode != RX_MODE.no:
relax_kws = dict(calculator=self.get_calculator(),
optimizer=self.optimizer,
relax_cell={"ions": False, "cell": True}[self.relax_mode],
relax_mode=self.relax_mode,
fmax=self.fmax,
pressure=self.pressure,
verbose=self.verbose,
@ -1301,11 +1296,11 @@ class MlOrderer(_MlBase):
for group in groups:
print(group[0])
if self.relax_mode != "no":
if self.relax_mode != RX_MODE.no:
print(f"Relaxing structures with relax mode: {self.relax_mode}")
relax_kws = dict(calculator=self.get_calculator(),
optimizer=self.optimizer,
relax_cell={"ions": False, "cell": True}[self.relax_mode],
relax_mode=self.relax_mode,
fmax=self.fmax,
pressure=self.pressure,
return_trajectory=True,
@ -1409,7 +1404,7 @@ class MlPhonons(_MlBase):
print(f"Relaxing atoms with relax mode: {self.relax_mode}.")
relax_kws = dict(calculator=calculator,
optimizer=self.optimizer,
relax_cell={"ions": False, "cell": True}[self.relax_mode],
relax_mode=self.relax_mode,
fmax=self.fmax,
pressure=self.pressure,
steps=self.steps,

View File

@ -93,8 +93,8 @@ class RelaxationProfiler:
# TODO: Fix issue with ixc set by ASE.
self.relax_kwargs = dict(
#ecutsm=0.5, # Smoothing PW cutoff energy (mandatory for cell optimization)
#ionmov=2,
ionmov=22,
ionmov=2,
#ionmov=22,
#ionmov=28, # activate i-pi/socket mode
optcell=0 if self.relax_mode == "ions" else 2,
tolmxf=self.fmax * eV_Ha * Ang_Bohr,
@ -297,16 +297,18 @@ class RelaxationProfiler:
diff = StructDiff(["INITIAL", self.nn_name + "_RELAX", "ABINIT_RELAX", "ABI_ML"],
[self.initial_atoms, ml_opt.atoms, abi_relax.atoms, final_mlabi_relax.atoms])
diff.tabulate()
print(f"{ml_nsteps=}, {abiml_nsteps=}, {abi_relax.nsteps=}")
print(f"GS steps in ML mode {ml_nsteps=}")
print(f"GS steps in ABINIT mode {abi_relax.nsteps=}")
print(f"GS steps in ABI+ML mode {abiml_nsteps=}")
#forces_file.close(); stress_file.close()
# Write json file with output results.
with open(workdir / "data.json", "wt") as fh:
data = dict(
#xc=self.xc
#gs_kwargs=self.gs_kwargs,
#relax_kwargs=self.relax_kwargs,
xc=self.xc,
gs_kwargs=self.gs_kwargs,
relax_kwargs=self.relax_kwargs,
ml_nsteps=ml_nsteps,
abiml_nsteps=abiml_nsteps,
abi_nsteps=abi_relax.nsteps,
@ -315,10 +317,6 @@ class RelaxationProfiler:
abiml_relaxed_structure=Structure.as_structure(final_mlabi_relax.atoms),
)
json.dump(data, fh, indent=4, cls=MontyEncoder)
# Print JSON data
#print("")
#print(marquee("Results", mark="="))
#print(json.dumps(data, indent=4, cls=MontyEncoder), end="\n")
if __name__ == "__main__":

View File

@ -820,6 +820,13 @@ class Marker(namedtuple("Marker", "x y s")):
class Exposer:
"""
Base class for Exposer objects.
Example:
kws = dict(show=False)
with Exposer.as_exposer("panel") as e:
e(obj.plot1(**plot_kws))
e(obj.plot2(**plot_kws))
"""
@classmethod

View File

@ -56,10 +56,11 @@ def add_workdir_verbose_opts(f):
@click.option("-n","--mpi-nprocs", default=2, type=int, show_default=True, help="Number of MPI processes to run ABINIT")
@click.option("-xc", default="PBE", show_default=True, type=click.Choice(["PBE", "PBEsol", "LDA"]),
help="XC functional.")
@click.option("-m","--mpi-runner", default="mpirun", type=str, show_default=True, help="String used to invoke the MPI runner. ")
@add_workdir_verbose_opts
def main(filepath, nn_name,
relax_mode, fmax, steps, optimizer,
kppa, rattle, scale_volume, mpi_nprocs, xc,
kppa, rattle, scale_volume, mpi_nprocs, xc, mpi_runner,
workdir, verbose,
):
@ -91,7 +92,7 @@ def main(filepath, nn_name,
atoms.rattle(stdev=abs(rattle), seed=42)
prof = RelaxationProfiler(atoms, pseudos, xc, kppa, relax_mode, fmax, mpi_nprocs, steps=steps,
verbose=verbose, optimizer=optimizer, nn_name=nn_name)
verbose=verbose, optimizer=optimizer, nn_name=nn_name, mpi_runner=mpi_runner)
prof.run(workdir=workdir)
return 0