Simple espressions parser re-inroduced in code for atomic positions and

occupations. Added two very simple tests to check it. Not yet implemented
for NEB path selection.


git-svn-id: http://qeforge.qe-forge.org/svn/q-e/trunk/espresso@5060 c92efa57-630b-4861-b058-cf58834340f0
This commit is contained in:
paulatto 2008-07-22 12:40:51 +00:00
parent f793e4265b
commit 31b7791b9a
6 changed files with 1030 additions and 488 deletions

View File

@ -16,7 +16,7 @@ MODULE read_cards_module
USE kinds, ONLY : DP
USE io_global, ONLY : stdout
USE constants, ONLY : angstrom_au
USE parser, ONLY : field_count, read_line
USE parser, ONLY : field_count, read_line, get_field
USE io_global, ONLY : ionode, ionode_id
!
USE input_parameters
@ -422,6 +422,11 @@ MODULE read_cards_module
LOGICAL :: tend
LOGICAL, SAVE :: tread = .FALSE.
!
REAL(DP),EXTERNAL :: eval_infix
INTEGER :: ifield, ierr
REAL(DP) :: field_value
CHARACTER(len=256) :: field_str, error_msg
!
!
IF ( tread ) THEN
CALL errore( 'card_atomic_positions', 'two occurrences', 2 )
@ -551,37 +556,42 @@ MODULE read_cards_module
CALL errore( 'read_cards', &
'ATOMIC_POSITIONS with sic, 8 columns required', 1 )
!
IF ( nfield == 4 ) THEN
!
READ(input_line,*) lb_pos, ( rd_pos(k,ia), k = 1, 3 )
!
ELSE IF ( nfield == 7 ) THEN
!
READ(input_line,*) lb_pos, rd_pos(1,ia), &
rd_pos(2,ia), &
rd_pos(3,ia), &
if_pos(1,ia), &
if_pos(2,ia), &
if_pos(3,ia)
!
ELSE IF ( nfield == 8 ) THEN
!
READ(input_line,*) lb_pos, rd_pos(1,ia), &
rd_pos(2,ia), &
rd_pos(3,ia), &
if_pos(1,ia), &
if_pos(2,ia), &
if_pos(3,ia), &
id_loc(ia)
!
ELSE
!
IF ( nfield /= 4 .and. nfield /= 7 .and. nfield /= 8) &
CALL errore( 'read_cards', 'wrong number of columns ' // &
& 'in ATOMIC_POSITIONS', ia )
!
END IF
! read atom symbol (column 1) and coordinate
CALL get_field(1, lb_pos, input_line)
lb_pos = TRIM(lb_pos)
!
lb_pos = ADJUSTL( lb_pos )
error_msg = 'Error while parsing atomic position card.'
! read field 2 (atom X coordinate)
CALL get_field(2, field_str, input_line)
rd_pos(1,ia) = eval_infix(ierr, field_str )
CALL errore('card_atomic_positions', error_msg, ierr)
! read field 2 (atom Y coordinate)
CALL get_field(3, field_str, input_line)
rd_pos(2,ia) = eval_infix(ierr, field_str )
CALL errore('card_atomic_positions', error_msg, ierr)
! read field 2 (atom Z coordinate)
CALL get_field(4, field_str, input_line)
rd_pos(3,ia) = eval_infix(ierr, field_str )
CALL errore('card_atomic_positions', error_msg, ierr)
!
IF ( nfield >= 7 ) THEN
! read constrains (fields 5-7, if present)
CALL get_field(5, field_str, input_line)
read(field_str, *) if_pos(1,ia)
CALL get_field(6, field_str, input_line)
read(field_str, *) if_pos(2,ia)
CALL get_field(7, field_str, input_line)
read(field_str, *) if_pos(3,ia)
ENDIF
!
IF ( nfield == 8 ) THEN
CALL get_field(5, field_str, input_line)
read(field_str, *) id_loc(ia)
END IF
!
match_label: DO is = 1, ntyp
!
@ -961,9 +971,11 @@ MODULE read_cards_module
!
IMPLICIT NONE
!
CHARACTER(LEN=256) :: input_line
CHARACTER(LEN=256) :: input_line, field_str
INTEGER :: is, nx10, i, j, nspin0
INTEGER :: nfield, nbnd_read, nf, ierr
LOGICAL, SAVE :: tread = .FALSE.
REAL(DP),EXTERNAL :: eval_infix
!
!
IF ( tread ) THEN
@ -975,15 +987,20 @@ MODULE read_cards_module
ALLOCATE ( f_inp ( nbnd, nspin0 ) )
DO is = 1, nspin0
!
nx10 = 10 * INT( nbnd / 10 )
DO i = 1, nx10, 10
CALL read_line( input_line )
READ(input_line,*,err=100) ( f_inp(j,is), j = i, ( i + 9 ) )
END DO
IF ( MOD( nbnd, 10 ) > 0 ) THEN
CALL read_line( input_line )
READ(input_line,*,err=100) ( f_inp(j,is), j = ( nx10 + 1 ), nbnd)
END IF
nbnd_read = 0
DO WHILE ( nbnd_read < nbnd)
CALL read_line( input_line )
CALL field_count( nfield, input_line )
!
DO nf = 1,nfield
nbnd_read = nbnd_read+1
CALL get_field(nf, field_str, input_line)
!
f_inp(nbnd_read,is) = eval_infix(ierr, field_str )
CALL errore('card_occupations',&
'Error parsing occupation: '//TRIM(field_str), nbnd_read*ierr)
ENDDO
ENDDO
!
END DO
!
@ -991,8 +1008,6 @@ MODULE read_cards_module
tread = .TRUE.
!
RETURN
100 call errore('card_occupations', 'Error while reading occupations! &
&Note: you cannot specify more than 10 cards per line!',1)
!
END SUBROUTINE card_occupations
!

File diff suppressed because it is too large Load Diff

20
tests/eval_infix.in Normal file
View File

@ -0,0 +1,20 @@
&control
calculation = 'scf'
tstress=.true.
/
&system
ibrav=2, celldm(1) =10.20,
nat=2, ntyp=1,
ecutwfc=12.0
/
&electrons
/
ATOMIC_SPECIES
Si 28.086 Si.vbc.UPF
ATOMIC_POSITIONS
Si 1-1 0/2 (1+1)*0
Si 1/4 2*(1/8) 1/(2/(1/2))
K_POINTS
2
0.250000 0.250000 0.250000 1.00
0.250000 0.250000 0.750000 3.00

24
tests/eval_infix.in2 Executable file
View File

@ -0,0 +1,24 @@
&control
calculation='scf',
/
&system
ibrav=1,
celldm(1)=10.0,
nat=1,
ntyp=1,
nbnd=6,
ecutwfc=25.0,
ecutrho=200.0,
occupations='from_input',
/
&electrons
mixing_beta=0.25,
/
ATOMIC_SPECIES
O 15.99994 O.pz-rrkjus.UPF
ATOMIC_POSITIONS
O 0.000000000 0.000000000 0.000000000
K_POINTS {gamma}
OCCUPATIONS
2 4/3 1+1/3 (1+2/2*3)/3 3*0 1-1

226
tests/eval_infix.ref Normal file
View File

@ -0,0 +1,226 @@
Program PWSCF v.4.0 starts ...
Today is 22Jul2008 at 10:58:55
Parallel version (MPI)
Number of processors in use: 1
For Norm-Conserving or Ultrasoft (Vanderbilt) Pseudopotentials or PAW
Current dimensions of program pwscf are:
Max number of different atomic species (ntypx) = 10
Max number of k-points (npk) = 40000
Max angular momentum in pseudopotentials (lmaxx) = 3
Subspace diagonalization in iterative solution of the eigenvalue problem:
Too few procs for parallel algorithm
we need at least 4 procs per pool
a serial algorithm will be used
Planes per process (thick) : nr3 = 16 npp = 16 ncplane = 256
Proc/ planes cols G planes cols G columns G
Pool (dense grid) (smooth grid) (wavefct grid)
1 16 163 1459 16 163 1459 55 283
bravais-lattice index = 2
lattice parameter (a_0) = 10.2000 a.u.
unit-cell volume = 265.3020 (a.u.)^3
number of atoms/cell = 2
number of atomic types = 1
number of electrons = 8.00
number of Kohn-Sham states= 4
kinetic-energy cutoff = 12.0000 Ry
charge density cutoff = 48.0000 Ry
convergence threshold = 1.0E-06
mixing beta = 0.7000
number of iterations used = 8 plain mixing
Exchange-correlation = SLA PZ NOGX NOGC (1100)
celldm(1)= 10.200000 celldm(2)= 0.000000 celldm(3)= 0.000000
celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000
crystal axes: (cart. coord. in units of a_0)
a(1) = ( -0.500000 0.000000 0.500000 )
a(2) = ( 0.000000 0.500000 0.500000 )
a(3) = ( -0.500000 0.500000 0.000000 )
reciprocal axes: (cart. coord. in units 2 pi/a_0)
b(1) = ( -1.000000 -1.000000 1.000000 )
b(2) = ( 1.000000 1.000000 1.000000 )
b(3) = ( -1.000000 1.000000 -1.000000 )
PseudoPot. # 1 for Si read from file Si.vbc.UPF
Pseudo is Norm-conserving, Zval = 4.0
Generated by new atomic code, or converted to UPF format
Using radial grid of 431 points, 2 beta functions with:
l(1) = 0
l(2) = 1
atomic species valence mass pseudopotential
Si 4.00 28.08600 Si( 1.00)
48 Sym.Ops. (with inversion)
Cartesian axes
site n. atom positions (a_0 units)
1 Si tau( 1) = ( 0.0000000 0.0000000 0.0000000 )
2 Si tau( 2) = ( 0.2500000 0.2500000 0.2500000 )
number of k points= 2
cart. coord. in units 2pi/a_0
k( 1) = ( 0.2500000 0.2500000 0.2500000), wk = 0.5000000
k( 2) = ( 0.2500000 0.2500000 0.7500000), wk = 1.5000000
G cutoff = 126.4975 ( 1459 G-vectors) FFT grid: ( 16, 16, 16)
Largest allocated arrays est. size (Mb) dimensions
Kohn-Sham Wavefunctions 0.01 Mb ( 186, 4)
NL pseudopotentials 0.02 Mb ( 186, 8)
Each V/rho on FFT grid 0.06 Mb ( 4096)
Each G-vector array 0.01 Mb ( 1459)
G-vector shells 0.00 Mb ( 43)
Largest temporary arrays est. size (Mb) dimensions
Auxiliary wavefunctions 0.05 Mb ( 186, 16)
Each subspace H/S matrix 0.00 Mb ( 16, 16)
Each <psi_i|beta_j> matrix 0.00 Mb ( 8, 4)
Arrays for rho mixing 0.50 Mb ( 4096, 8)
Initial potential from superposition of free atoms
starting charge 7.99901, renormalised to 8.00000
Starting wfc are 8 atomic wfcs
total cpu time spent up to now is 0.12 secs
per-process dynamical memory: 3.2 Mb
Self-consistent Calculation
iteration # 1 ecut= 12.00 Ry beta=0.70
Davidson diagonalization with overlap
ethr = 1.00E-02, avg # of iterations = 2.0
Threshold (ethr) on eigenvalues was too large:
Diagonalizing with lowered threshold
Davidson diagonalization with overlap
ethr = 7.93E-04, avg # of iterations = 1.0
total cpu time spent up to now is 0.15 secs
total energy = -15.79103983 Ry
Harris-Foulkes estimate = -15.81239602 Ry
estimated scf accuracy < 0.06375741 Ry
iteration # 2 ecut= 12.00 Ry beta=0.70
Davidson diagonalization with overlap
ethr = 7.97E-04, avg # of iterations = 1.0
total cpu time spent up to now is 0.16 secs
total energy = -15.79409517 Ry
Harris-Foulkes estimate = -15.79442220 Ry
estimated scf accuracy < 0.00230261 Ry
iteration # 3 ecut= 12.00 Ry beta=0.70
Davidson diagonalization with overlap
ethr = 2.88E-05, avg # of iterations = 2.0
total cpu time spent up to now is 0.17 secs
total energy = -15.79447768 Ry
Harris-Foulkes estimate = -15.79450039 Ry
estimated scf accuracy < 0.00006345 Ry
iteration # 4 ecut= 12.00 Ry beta=0.70
Davidson diagonalization with overlap
ethr = 7.93E-07, avg # of iterations = 2.0
total cpu time spent up to now is 0.19 secs
total energy = -15.79449472 Ry
Harris-Foulkes estimate = -15.79449644 Ry
estimated scf accuracy < 0.00000455 Ry
iteration # 5 ecut= 12.00 Ry beta=0.70
Davidson diagonalization with overlap
ethr = 5.69E-08, avg # of iterations = 2.5
total cpu time spent up to now is 0.20 secs
End of self-consistent calculation
k = 0.2500 0.2500 0.2500 ( 180 PWs) bands (ev):
-4.8701 2.3792 5.5371 5.5371
k = 0.2500 0.2500 0.7500 ( 186 PWs) bands (ev):
-2.9165 -0.0653 2.6795 4.0355
! total energy = -15.79449556 Ry
Harris-Foulkes estimate = -15.79449558 Ry
estimated scf accuracy < 0.00000005 Ry
The total energy is the sum of the following terms:
one-electron contribution = 4.83378726 Ry
hartree contribution = 1.08428951 Ry
xc contribution = -4.81281375 Ry
ewald contribution = -16.89975858 Ry
convergence has been achieved in 5 iterations
entering subroutine stress ...
total stress (Ry/bohr**3) (kbar) P= -30.30
-0.00020597 0.00000000 0.00000000 -30.30 0.00 0.00
0.00000000 -0.00020597 0.00000000 0.00 -30.30 0.00
0.00000000 0.00000000 -0.00020597 0.00 0.00 -30.30
Writing output data file pwscf.save
PWSCF : 0.27s CPU time, 0.31s wall time
init_run : 0.07s CPU
electrons : 0.08s CPU
stress : 0.01s CPU
Called by init_run:
wfcinit : 0.00s CPU
potinit : 0.00s CPU
Called by electrons:
c_bands : 0.05s CPU ( 6 calls, 0.008 s avg)
sum_band : 0.02s CPU ( 6 calls, 0.003 s avg)
v_of_rho : 0.01s CPU ( 6 calls, 0.002 s avg)
mix_rho : 0.00s CPU ( 6 calls, 0.000 s avg)
Called by c_bands:
init_us_2 : 0.00s CPU ( 28 calls, 0.000 s avg)
cegterg : 0.05s CPU ( 12 calls, 0.004 s avg)
Called by *egterg:
h_psi : 0.04s CPU ( 35 calls, 0.001 s avg)
g_psi : 0.00s CPU ( 21 calls, 0.000 s avg)
cdiaghg : 0.00s CPU ( 31 calls, 0.000 s avg)
Called by h_psi:
add_vuspsi : 0.00s CPU ( 35 calls, 0.000 s avg)
General routines
calbec : 0.00s CPU ( 37 calls, 0.000 s avg)
cft3s : 0.03s CPU ( 354 calls, 0.000 s avg)
davcio : 0.00s CPU ( 40 calls, 0.000 s avg)
Parallel routines

231
tests/eval_infix.ref2 Normal file
View File

@ -0,0 +1,231 @@
Program PWSCF v.4.0 starts ...
Today is 22Jul2008 at 11:39:28
Parallel version (MPI)
Number of processors in use: 1
For Norm-Conserving or Ultrasoft (Vanderbilt) Pseudopotentials or PAW
Current dimensions of program pwscf are:
Max number of different atomic species (ntypx) = 10
Max number of k-points (npk) = 40000
Max angular momentum in pseudopotentials (lmaxx) = 3
6 6
2.00000000000000 1.33333333333333 1.33333333333333
1.33333333333333 0.000000000000000E+000 0.000000000000000E+000
gamma-point specific algorithms are used
Subspace diagonalization in iterative solution of the eigenvalue problem:
Too few procs for parallel algorithm
we need at least 4 procs per pool
a serial algorithm will be used
Planes per process (thick) : nr3 = 48 npp = 48 ncplane = 2304
Planes per process (smooth): nr3s= 32 npps= 32 ncplanes= 1024
Proc/ planes cols G planes cols G columns G
Pool (dense grid) (smooth grid) (wavefct grid)
1 48 1597 47833 32 793 16879 193 2103
bravais-lattice index = 1
lattice parameter (a_0) = 10.0000 a.u.
unit-cell volume = 1000.0000 (a.u.)^3
number of atoms/cell = 1
number of atomic types = 1
number of electrons = 6.00
number of Kohn-Sham states= 6
kinetic-energy cutoff = 25.0000 Ry
charge density cutoff = 200.0000 Ry
convergence threshold = 1.0E-06
mixing beta = 0.2500
number of iterations used = 8 plain mixing
Exchange-correlation = SLA PZ NOGX NOGC (1100)
celldm(1)= 10.000000 celldm(2)= 0.000000 celldm(3)= 0.000000
celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000
crystal axes: (cart. coord. in units of a_0)
a(1) = ( 1.000000 0.000000 0.000000 )
a(2) = ( 0.000000 1.000000 0.000000 )
a(3) = ( 0.000000 0.000000 1.000000 )
reciprocal axes: (cart. coord. in units 2 pi/a_0)
b(1) = ( 1.000000 0.000000 0.000000 )
b(2) = ( 0.000000 1.000000 0.000000 )
b(3) = ( 0.000000 0.000000 1.000000 )
PseudoPot. # 1 for O read from file O.pz-rrkjus.UPF
Pseudo is Ultrasoft, Zval = 6.0
Generated by new atomic code, or converted to UPF format
Using radial grid of 1269 points, 4 beta functions with:
l(1) = 0
l(2) = 0
l(3) = 1
l(4) = 1
Q(r) pseudized with 0 coefficients
atomic species valence mass pseudopotential
O 6.00 15.99994 O ( 1.00)
48 Sym.Ops. (with inversion)
Cartesian axes
site n. atom positions (a_0 units)
1 O tau( 1) = ( 0.0000000 0.0000000 0.0000000 )
number of k points= 1
cart. coord. in units 2pi/a_0
k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 2.0000000
G cutoff = 506.6059 ( 23917 G-vectors) FFT grid: ( 48, 48, 48)
G cutoff = 253.3030 ( 8440 G-vectors) smooth grid: ( 32, 32, 32)
Occupations read from input
2.0000 1.3333 1.3333 1.3333 0.0000 0.0000
Largest allocated arrays est. size (Mb) dimensions
Kohn-Sham Wavefunctions 0.10 Mb ( 1052, 6)
NL pseudopotentials 0.13 Mb ( 1052, 8)
Each V/rho on FFT grid 1.69 Mb ( 110592)
Each G-vector array 0.18 Mb ( 23917)
G-vector shells 0.00 Mb ( 424)
Largest temporary arrays est. size (Mb) dimensions
Auxiliary wavefunctions 0.19 Mb ( 1052, 24)
Each subspace H/S matrix 0.00 Mb ( 24, 24)
Each <psi_i|beta_j> matrix 0.00 Mb ( 8, 6)
Arrays for rho mixing 13.50 Mb ( 110592, 8)
Initial potential from superposition of free atoms
starting charge 6.00000, renormalised to 6.00000
negative rho (up, down): 0.101E-04 0.000E+00
Starting wfc are 4 atomic + 2 random wfc
total cpu time spent up to now is 1.56 secs
per-process dynamical memory: 19.2 Mb
Self-consistent Calculation
iteration # 1 ecut= 25.00 Ry beta=0.25
Davidson diagonalization with overlap
ethr = 1.00E-02, avg # of iterations = 6.0
Threshold (ethr) on eigenvalues was too large:
Diagonalizing with lowered threshold
Davidson diagonalization with overlap
ethr = 4.69E-06, avg # of iterations = 10.0
negative rho (up, down): 0.823E-05 0.000E+00
total cpu time spent up to now is 2.19 secs
total energy = -31.29441645 Ry
Harris-Foulkes estimate = -31.29442348 Ry
estimated scf accuracy < 0.00028144 Ry
iteration # 2 ecut= 25.00 Ry beta=0.25
Davidson diagonalization with overlap
ethr = 4.69E-06, avg # of iterations = 1.0
negative rho (up, down): 0.115E-03 0.000E+00
total cpu time spent up to now is 2.55 secs
total energy = -31.29443476 Ry
Harris-Foulkes estimate = -31.29442165 Ry
estimated scf accuracy < 0.00012382 Ry
iteration # 3 ecut= 25.00 Ry beta=0.25
Davidson diagonalization with overlap
ethr = 2.06E-06, avg # of iterations = 2.0
negative rho (up, down): 0.212E-03 0.000E+00
total cpu time spent up to now is 2.92 secs
total energy = -31.29444852 Ry
Harris-Foulkes estimate = -31.29444503 Ry
estimated scf accuracy < 0.00001258 Ry
iteration # 4 ecut= 25.00 Ry beta=0.25
Davidson diagonalization with overlap
ethr = 2.10E-07, avg # of iterations = 1.0
negative rho (up, down): 0.704E-05 0.000E+00
total cpu time spent up to now is 3.24 secs
End of self-consistent calculation
k = 0.0000 0.0000 0.0000 ( 1052 PWs) bands (ev):
-23.0787 -8.4554 -8.4554 -8.4554 -0.4300 4.4874
highest occupied, lowest unoccupied level (ev): -8.4554 -0.4300
! total energy = -31.29445489 Ry
Harris-Foulkes estimate = -31.29444957 Ry
estimated scf accuracy < 0.00000012 Ry
The total energy is the sum of the following terms:
one-electron contribution = -31.95365500 Ry
hartree contribution = 17.14669333 Ry
xc contribution = -6.27322222 Ry
ewald contribution = -10.21427100 Ry
convergence has been achieved in 4 iterations
Writing output data file pwscf.save
PWSCF : 3.33s CPU time, 3.62s wall time
init_run : 1.46s CPU
electrons : 1.68s CPU
Called by init_run:
wfcinit : 0.02s CPU
potinit : 0.14s CPU
Called by electrons:
c_bands : 0.22s CPU ( 5 calls, 0.043 s avg)
sum_band : 0.68s CPU ( 5 calls, 0.137 s avg)
v_of_rho : 0.21s CPU ( 5 calls, 0.042 s avg)
newd : 0.37s CPU ( 5 calls, 0.074 s avg)
mix_rho : 0.11s CPU ( 5 calls, 0.022 s avg)
Called by c_bands:
init_us_2 : 0.02s CPU ( 11 calls, 0.001 s avg)
regterg : 0.21s CPU ( 5 calls, 0.042 s avg)
Called by *egterg:
h_psi : 0.18s CPU ( 26 calls, 0.007 s avg)
s_psi : 0.00s CPU ( 26 calls, 0.000 s avg)
g_psi : 0.01s CPU ( 20 calls, 0.000 s avg)
rdiaghg : 0.02s CPU ( 24 calls, 0.001 s avg)
Called by h_psi:
add_vuspsi : 0.00s CPU ( 26 calls, 0.000 s avg)
General routines
calbec : 0.01s CPU ( 31 calls, 0.000 s avg)
cft3s : 0.70s CPU ( 160 calls, 0.004 s avg)
interpolate : 0.20s CPU ( 10 calls, 0.020 s avg)
davcio : 0.00s CPU ( 4 calls, 0.000 s avg)
Parallel routines