Merge branch 'develop' of gitlab.com:QEF/q-e into myqe

This commit is contained in:
Paolo Giannozzi 2019-08-09 08:39:08 +02:00
commit 009d1ee32e
9 changed files with 383 additions and 306 deletions

View File

@ -7,48 +7,39 @@
!
!
SUBROUTINE set_vdw_corr ( vdw_corr, llondon, ldftd3, ts_vdw, lxdm )
USE io_global, ONLY: stdout
!
IMPLICIT NONE
CHARACTER(LEN=*), INTENT(in) :: vdw_corr
LOGICAL, INTENT(out) :: llondon, ldftd3, ts_vdw, lxdm
!
llondon= .FALSE.
ldftd3 = .FALSE.
ts_vdw = .FALSE.
lxdm = .FALSE.
SELECT CASE( TRIM( vdw_corr ) )
!
CASE( 'grimme-d2', 'Grimme-D2', 'DFT-D', 'dft-d' )
!
llondon= .TRUE.
ldftd3 = .FALSE.
ts_vdw = .FALSE.
lxdm = .FALSE.
!
CASE( 'grimme-d3', 'Grimme-D3', 'DFT-D3', 'dft-d3' )
!
ldftd3 = .TRUE.
llondon= .FALSE.
ts_vdw = .FALSE.
lxdm = .FALSE.
!
CASE( 'TS', 'ts', 'ts-vdw', 'ts-vdW', 'tkatchenko-scheffler' )
!
llondon= .FALSE.
ldftd3 = .FALSE.
ts_vdw = .TRUE.
lxdm = .FALSE.
!
CASE( 'XDM', 'xdm' )
!
llondon= .FALSE.
ldftd3 = .FALSE.
ts_vdw = .FALSE.
lxdm = .TRUE.
!
CASE('none','')
CASE DEFAULT
!
llondon= .FALSE.
ldftd3 = .FALSE.
ts_vdw = .FALSE.
lxdm = .FALSE.
!
WRITE (stdout,*)
CALL infomsg('set_vdw_corr','WARNING: unknown vdw correction (vdw_corr): '//TRIM(vdw_corr)//'. No vdw correction used.')
WRITE (stdout,*)
END SELECT
END SUBROUTINE set_vdw_corr

View File

@ -44,7 +44,7 @@ compute_weight.o \
deallocate_part.o \
deallocate_phq.o \
d2ionq.o \
d2ionq_mm.o \
d2ionq_disp.o \
d2nsq_bare.o \
dnsq_bare.o \
dnsq_orth.o \

288
PHonon/PH/d2ionq_disp.f90 Normal file
View File

@ -0,0 +1,288 @@
!
! Copyright (C) 2016 Quantum ESPRESSO Foundation
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
! This routine calculates the XDM contribution to the dynamical matrix.
! It uses the XDM dispersion coefficients and van der Waals radii in
! the prefix.xdm file, which is written by pw.x.
! This code is based on the d2ionq_mm.f90 file by Fabrizio Masullo and
! Paolo Giannozzi.
!
SUBROUTINE d2ionq_disp(alat,nat,ityp,at,bg,tau,q,der2disp)
USE london_module, ONLY: init_london, dealloca_london, mxr, dist2, r_cut, r
USE kinds, ONLY: DP
USE io_global, ONLY: ionode, ionode_id, stdout
USE io_files, ONLY: seqopn, postfix
USE control_flags, ONLY: llondon, lxdm
USE constants, ONLY: tpi, eps8
USE mp_images, ONLY: me_image , nproc_image , intra_image_comm
USE mp, ONLY: mp_sum, mp_bcast
USE save_ph, ONLY: tmp_dir_save
IMPLICIT NONE
INTEGER, INTENT(IN) :: nat ! number of atoms in the unit cell
REAL(DP), INTENT(IN) :: alat ! cell parameter (celldm(1))
INTEGER, INTENT(IN) :: ityp(nat) ! atomic types for atoms in the unit cell
REAL(DP), INTENT(IN) :: at(3,3) ! at(:,i) is lattice vector i in alat units
REAL(DP), INTENT(IN) :: bg(3,3) ! bg(:,i) is reciprocal lattice vector i in 2pi/alat units
REAL(DP), INTENT(IN) :: tau(3,nat) ! atomic positions in alat units
REAL(DP), INTENT(IN) :: q(3) ! wavevector in 2pi/alat units
COMPLEX(DP), INTENT(OUT) :: der2disp(3,nat,3,nat) ! dispersion contribution to the (massless) dynamical matrix
INTEGER :: ii, jj, kk ! some indices
INTEGER :: k, l ! cell atom indices (1 -> nat)
INTEGER :: aa, bb ! coordinate indices (1 -> 3)
INTEGER :: nr ! lattice vector index (1 -> nvec)
INTEGER :: first, last, resto, divid ! for parallelization over atoms
REAL(DP) :: dd, d2 ! atom-atom distance and square distance
REAL(DP) :: dtau(3) ! cell atom-cell atom vector
REAL(DP) :: rr(3) ! cell atom-env atom vector
COMPLEX(DP) :: eiqr ! phase factor for the Fourier transform of the dyn. mat.
! accumulation auxiliary variables
REAL(DP) :: auxr
REAL(DP) :: aux(3,3,nat)
COMPLEX(DP) :: aux2(3,3,nat)
REAL(DP) :: g ! pairwise energy contribution g(d), Exdm = -1/2 sum_ij g_ij(d_ij)
REAL(DP) :: gp ! derivative of g(d) wrt distance
REAL(DP) :: h ! gp(d) / d
REAL(DP) :: hp ! derivative of h(d) wrt distance
! for reading the xdm.dat file
INTEGER :: iunxdm, ierr, iver
LOGICAL :: lexist
! environment info from the XDM module (via .xdm file)
INTEGER :: nvec ! number of lattice vectors in the environment
INTEGER :: lmax(3) ! max. lattice vector in the environment for each crystallographic axis
INTEGER, ALLOCATABLE :: lvec(:,:) ! lvec(:,i) is the ith environment lattice vector (cryst. coords.)
REAL(DP), ALLOCATABLE :: cx(:,:,:) ! cx(i,j,2:4) is nth dispersion coefficient between cell atoms i and j (2=c6,3=c8,4=c10)
REAL(DP), ALLOCATABLE :: rvdw(:,:) ! rvdw(i,j) is the sum of vdw radii of cell atoms i and j
REAL(DP) :: rmax2 ! max. distance for energy sum - to be consistent with pw.x
REAL(DP) :: ene ! total energy (for debug only)
CHARACTER*10 :: namestr ! name of the dispersion correction
INTEGER, EXTERNAL :: find_free_unit
! initialization
IF (llondon) THEN
! D2 does not require any saved info; just init the module
CALL init_london()
namestr = "D2"
ELSE IF (lxdm) THEN
! read the XDM environment, coefficients, and Rvdw
ALLOCATE(cx(nat,nat,2:4),rvdw(nat,nat))
IF (ionode) THEN
iunxdm = find_free_unit ()
CALL seqopn(iunxdm,postfix(2:6)//'xdm.dat','UNFORMATTED',lexist,tmp_dir_save)
IF (.NOT.lexist) CALL errore('d2ionq_disp','could not open xdm data file',1)
READ (iunxdm,iostat=ierr) iver
IF (ierr /= 0) CALL errore('d2ionq_disp','reading xdm.dat 1',1)
READ (iunxdm,iostat=ierr) lmax, rmax2
IF (ierr /= 0) CALL errore('d2ionq_disp','reading xdm.dat 2',1)
READ (iunxdm,iostat=ierr) cx, rvdw
IF (ierr /= 0) CALL errore('d2ionq_disp','reading xdm.dat 3',1)
CLOSE (UNIT=iunxdm, STATUS='KEEP')
END IF
CALL mp_bcast(iver, ionode_id, intra_image_comm)
CALL mp_bcast(lmax, ionode_id, intra_image_comm)
CALL mp_bcast(rmax2, ionode_id, intra_image_comm)
CALL mp_bcast(cx, ionode_id, intra_image_comm)
CALL mp_bcast(rvdw, ionode_id, intra_image_comm)
namestr = "XDM"
! pre-calculate the list of lattice vectors
ALLOCATE(lvec(3,PRODUCT(2*lmax + 1)))
nvec = 0
DO ii = -lmax(1), lmax(1)
DO jj = -lmax(2), lmax(2)
DO kk = -lmax(3), lmax(3)
nvec = nvec + 1
lvec(:,nvec) = (/ii,jj,kk/)
END DO
END DO
END DO
ELSE
CALL errore('d2ionq_disp','Dispersion correction not one of D2 or XDM',1)
ENDIF
IF (ionode) THEN
WRITE (stdout,'(/,5X,"Calculating the ",A," contribution to the dynamical matrix.")') TRIM(namestr)
END IF
ene = 0._dp
der2disp = 0._dp
! parallelize atoms over processors in this image
#if defined __MPI
resto = MOD(nat,nproc_image)
divid = nat/nproc_image
IF (me_image+1 <= resto) THEN
first = (divid+1) * me_image + 1
last = (divid+1) * (me_image+1)
ELSE
first = ((divid+1) * resto) + divid * (me_image-resto) + 1
last = (divid+1) * resto + divid * (me_image-resto+1)
ENDIF
#else
first = 1
last = nat
#endif
DO k = first, last
aux = 0._dp
aux2 = 0._dp
DO l = 1, nat
dtau = tau(:,k) - tau(:,l)
IF (llondon) THEN
CALL rgen ( dtau, r_cut, mxr, at, bg, r, dist2, nvec )
END IF
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1600)
!$omp parallel do private(rr,d2,dd,g,gp,h,hp,eiqr,auxr) default(shared), reduction(+:aux), reduction(+:aux2), reduction(+:ene)
#endif
DO nr = 1, nvec
IF (llondon) THEN
rr = r(:,nr)
d2 = dist2(nr) * alat * alat
ELSE
rr = lvec(1,nr) * at(:,1) + lvec(2,nr) * at(:,2) + lvec(3,nr) * at(:,3) - dtau
d2 = (rr(1)*rr(1) + rr(2)*rr(2) + rr(3)*rr(3)) * alat * alat
END IF
dd = SQRT(d2)
IF (dd <= eps8 .OR. (lxdm .AND. d2 > rmax2)) CYCLE
IF (lxdm) THEN
CALL calcgh_xdm(k,l,dd,g,gp,h,hp)
ELSE
CALL calcgh_d2(k,l,dd,g,gp,h,hp)
END IF
ene = ene - 0.5_dp * g
eiqr = EXP(tpi * (0_dp,1_dp) * (q(1)*(rr(1)+dtau(1))+q(2)*(rr(2)+dtau(2))+q(3)*(rr(3)+dtau(3))))
DO aa = 1 , 3
DO bb = 1 , 3
IF (aa /= bb) THEN
auxr = hp * rr(aa) * alat * rr(bb) * alat / dd
ELSE
auxr = hp * rr(aa) * alat * rr(bb) * alat / dd + h
ENDIF
aux(aa,bb,l) = aux(aa,bb,l) + auxr
aux2(aa,bb,l) = aux2(aa,bb,l) + auxr*eiqr
ENDDO ! bb
ENDDO ! aa
ENDDO ! nr
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1600)
!$omp end parallel do
#endif
DO aa =1, 3
DO bb = 1, 3
der2disp(aa,k,bb,l) = aux2(aa,bb,l)
ENDDO ! bb
ENDDO ! aa
ENDDO ! l
DO l = 1, nat
DO aa = 1, 3
DO bb = 1, 3
der2disp(aa,k,bb,k) = der2disp(aa,k,bb,k) - aux(aa,bb,l)
ENDDO ! bb
ENDDO ! aa
ENDDO ! l
ENDDO ! k
CALL mp_sum(ene, intra_image_comm)
CALL mp_sum(der2disp, intra_image_comm)
IF (ionode) THEN
WRITE (stdout,'(5X,A," energy = ",F17.8," Ry")') TRIM(namestr), ene
WRITE (stdout,'(5X,"Done."/)')
END IF
! cleanup
IF (llondon) THEN
CALL dealloca_london ()
ENDIF
CONTAINS
SUBROUTINE calcgh_xdm(i,j,d,g,gp,h,hp)
IMPLICIT NONE
INTEGER, INTENT(IN) :: i, j
REAL(DP), INTENT(IN) :: d
REAL(DP), INTENT(OUT) :: g, gp, h, hp
REAL(DP) :: c6, c8, c10, rr
REAL(DP) :: d2, d4, d6, d8, d10, dpr6, dpr8, dpr10, r2, r6, r8, r10
REAL(DP) :: d5, d7, d9, dpr6sq, dpr8sq, dpr10sq, d17, d13, d3, dpr6cub
REAL(DP) :: dpr8cub, dpr10cub
c6 = cx(i,j,2)
c8 = cx(i,j,3)
c10 = cx(i,j,4)
rr = rvdw(i,j)
r2 = rr * rr
r6 = r2 * r2 * r2
r8 = r6 * r2
r10 = r8 * r2
d2 = d * d
d3 = d2 * d
d4 = d2 * d2
d5 = d4 * d
d6 = d4 * d2
d7 = d6 * d
d8 = d6 * d2
d9 = d8 * d
d10 = d8 * d2
d13 = d6 * d7
d17 = d10 * d7
dpr6 = r6 + d6
dpr8 = r8 + d8
dpr10 = r10 + d10
dpr6sq = dpr6 * dpr6
dpr8sq = dpr8 * dpr8
dpr10sq = dpr10 * dpr10
dpr6cub = dpr6sq * dpr6
dpr8cub = dpr8sq * dpr8
dpr10cub = dpr10sq * dpr10
g = c6 / dpr6 + c8 / dpr8 + c10 / dpr10
gp = -10._dp * c10 * d9 / dpr10sq - 8._dp * c8 * d7 / dpr8sq - 6._dp * c6 * d5 / dpr6sq
h = gp / d
hp = -80._dp * c10 * d7 / dpr10sq + 200._dp * c10 * d17 / dpr10cub - 48._dp * c8 * d5 / dpr8sq &
+ 128._dp * c8 * d13 / dpr8cub - 24 * c6 * d3 / dpr6sq + 72._dp * c6 * d9 / dpr6cub
END SUBROUTINE calcgh_xdm
SUBROUTINE calcgh_d2(ii,jj,d,g,gp,h,hp)
USE london_module, ONLY: beta, R_sum, C6_ij, scal6
IMPLICIT NONE
INTEGER, INTENT(IN) :: ii, jj
REAL(DP), INTENT(IN) :: d
REAL(DP), INTENT(OUT) :: g, gp, h, hp
INTEGER :: i, j
REAL(DP) :: ed, fij, d6, d7, d2
i = ityp(ii)
j = ityp(jj)
d2 = d * d
d6 = d**6
d7 = d6 * d
ed = EXP(-beta * (d / R_sum(i,j) - 1._dp))
fij = 1._dp / (1._dp + ed)
g = C6_ij(i,j) * scal6 / d6 * fij
gp = C6_ij(i,j) * scal6 / d6 / (1._dp + ed) * (beta * ed / R_sum(i,j) / (1._dp + ed) - 6._dp / d)
h = gp / d
hp = C6_ij(i,j) * scal6 / d7 / (1._dp + ed) * (48._dp / d2 - &
13._dp * beta * ed / R_sum(i,j) / d / (1._dp + ed) - &
beta**2 * ed / R_sum(i,j)**2 / (1._dp + ed)**2 * (1._dp - ed))
END SUBROUTINE calcgh_d2
END SUBROUTINE d2ionq_disp

View File

@ -1,187 +0,0 @@
!
! Copyright (C) 2016 Quantum ESPRESSO Foundation
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
! or http://www.gnu.org/copyleft/gpl.txt .
!
! Calculation of Grimme D2 contribution to the dyamical matrix
! See module "london_module" in Modules/mm_dispersion.f90
! Written by Fabrizio Masullo for his M.Sc. in Mathematic at UniUD
! under the supervision of Paolo Giannozzi
!------------------------------------------------------------------------------
!
!
SUBROUTINE d2ionq_mm ( alat , nat , ityp , at , bg , tau, q, deriv2_london )
!
USE london_module
USE constants, ONLY : tpi, eps8
USE mp_images, ONLY : me_image , nproc_image , intra_image_comm
USE mp, ONLY : mp_sum
!
IMPLICIT NONE
!
INTEGER , INTENT ( in ) :: nat , ityp ( nat )
! input:
! nat : number of atoms
! ityp : type of each atom
!
REAL ( DP ) , INTENT ( in ) :: alat, tau(3, nat), at(3 , 3), bg(3 , 3), q(3)
! input:
! alat : the cell parameter
! tau : atomic positions in alat units
! at : direct lattice vectors
! bg : reciprocal lattice vectors
! q : wave-vector (in 2pi/alat units)
!
COMPLEX ( DP ), INTENT(OUT) :: deriv2_london ( 3, nat, 3, nat )
!
INTEGER :: ata , atb , nrm , nr , ipol, jpol
! locals :
! ata , atb : atom counters
! nrm : actual number of vectors computed by rgen
! nr : counter on neighbours shells
! ipol, jpol: counters on coords
!
INTEGER :: first , last , resto, divid
! locals :
! first : lower bound on processor
! last : upper
!
REAL ( DP ) :: dist , f_damp , dtau ( 3 ) , &
exparg , expval, par , par2, fac , facF, add, addF, auxr
COMPLEX ( DP ) :: eiqr
! locals :
! dist : distance R_ij between the current pair of atoms
! f_damp : damping function
! dtau : \vec R_ij
! ... and many other temporary variables, plus some buffers:
!
REAL ( DP ) :: aux (3, 3, nat)
COMPLEX ( DP ) :: aux2(3, 3, nat)
!
!
deriv2_london ( : , : , : , :) = 0.d0
!
#if defined __MPI
!
! parallelization: divide atoms across processors of this image
! (different images have different atomic positions)
!
resto = mod ( nat , nproc_image )
divid = nat / nproc_image
!
IF ( me_image + 1 <= resto ) THEN
!
first = ( divid + 1 ) * me_image + 1
last = ( divid + 1 ) * ( me_image + 1 )
!
ELSE
!
first = ( ( divid + 1 ) * resto ) + ( divid ) * ( me_image-resto ) + 1
last = ( divid + 1 ) * resto + ( divid ) * ( me_image - resto + 1 )
!
ENDIF
!
#else
!
first = 1
last = nat
#endif
!
DO ata = first , last
!
aux(:,:,:) = 0.d0
aux2(:,:,:) = 0.d0
!
DO atb = 1 , nat
!
dtau ( : ) = tau ( : , ata ) - tau ( : , atb )
!
! generate neighbours shells
CALL rgen ( dtau, r_cut, mxr, at, bg, r, dist2, nrm )
!
! compute forces
!
par = beta / ( R_sum ( ityp ( atb ) , ityp ( ata ) ) )
!
par2 = par**2
!
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1600)
!$omp parallel do private(nr,dist,exparg,expval,fac,add,eiqr,facF,addF,auxr,ipol,jpol) default(shared), reduction(+:aux), reduction(+:aux2)
#endif
DO nr = 1 , nrm
!
dist = alat * sqrt ( dist2 ( nr ) )
IF ( dist > eps8 ) THEN
!
exparg = - beta * ( dist / ( R_sum ( ityp(atb) , ityp(ata) ) ) - 1.0_dp )
expval = exp ( exparg )
!
fac = C6_ij ( ityp ( atb ) , ityp ( ata ) ) / dist**8
add = 48.d0 / dist**2
!
eiqr = exp ((0_dp,1_dp)*(q(1)*(r(1,nr)+dtau(1))+&
q(2)*(r(2,nr)+dtau(2))+&
q(3)*(r(3,nr)+dtau(3)) ) * tpi )
!
facF = C6_ij ( ityp ( atb ) , ityp ( ata ) ) / dist**6
addF = 6.d0 / dist
!
DO ipol = 1 , 3
DO jpol = 1 , 3
IF (ipol /= jpol) THEN
auxr = ( scal6 / ( 1.d0 + expval ) * fac * &
( add - par*(13.d0*expval/((1.d0+expval)*dist))-&
par2 *expval*(1.0_dp - expval)/ &
( (1.d0 + expval) * (1.d0 + expval) ) ) * &
r ( ipol, nr ) * alat * r(jpol,nr) * alat )
ELSE
auxr = ( scal6 / ( 1.d0 + expval ) * fac * &
( add - par*(13.d0*expval/((1.d0+expval)*dist))-&
par2 *expval*(1.0_dp - expval)/ &
( (1.d0 + expval) * (1.d0 + expval) ) ) * &
r ( ipol, nr ) * alat * r(jpol,nr) * alat )- &
( scal6 / ( 1.0_dp + expval ) * facF * &
( - par * expval / ( 1.d0 + expval ) + addF ) * &
1.d0/ dist )
ENDIF
!
aux (ipol,jpol,atb) = aux (ipol,jpol,atb) + auxr
aux2(ipol,jpol,atb) = aux2(ipol,jpol,atb) + auxr*eiqr
!
ENDDO
!
ENDDO
!
ENDIF
!
ENDDO
#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1600)
!$omp end parallel do
#endif
DO ipol =1,3
DO jpol = 1,3
deriv2_london (ipol, ata, jpol, atb) = aux2(ipol,jpol,atb)
ENDDO
ENDDO
ENDDO
!
DO atb = 1, nat
DO ipol =1,3
DO jpol = 1,3
deriv2_london (ipol, ata, jpol, ata) = &
deriv2_london (ipol, ata, jpol, ata) - aux(ipol,jpol,atb)
ENDDO
ENDDO
ENDDO
!
ENDDO
CALL mp_sum ( deriv2_london , intra_image_comm )
!
RETURN
!
END SUBROUTINE d2ionq_mm

View File

@ -20,7 +20,7 @@ subroutine dynmat0_new
USE cell_base, ONLY : alat, omega, at, bg
USE gvect, ONLY : g, gg, ngm, gcutm
USE symm_base, ONLY : irt, s, invs
USE control_flags, ONLY : modenum, llondon
USE control_flags, ONLY : modenum, llondon, lxdm
USE ph_restart, ONLY : ph_writefile
USE control_ph, ONLY : rec_code_read, current_iq
USE qpoint, ONLY : xq
@ -53,15 +53,12 @@ subroutine dynmat0_new
!
call d2ionq (nat, ntyp, ityp, zv, tau, alat, omega, xq, at, bg, g, &
gg, ngm, gcutm, nmodes, u, dyn)
!
! Here the Grimme D2 dispersion contribution (if present)
!
IF ( llondon ) THEN
CALL init_london ()
CALL d2ionq_mm (alat, nat, ityp, at, bg, tau, xq, dynwrk )
CALL rotate_pattern_add ( nat, u, dyn, dynwrk )
CALL dealloca_london ()
END IF
! Contribution from the dispersion correction
IF (llondon .OR. lxdm) THEN
CALL d2ionq_disp(alat,nat,ityp,at,bg,tau,xq,dynwrk)
CALL rotate_pattern_add (nat,u,dyn,dynwrk)
ENDIF
!
! Add non-linear core-correction (NLCC) contribution (if any)
!

View File

@ -708,9 +708,6 @@ SUBROUTINE phq_readin()
IF (ts_vdw) CALL errore('phq_readin',&
'The phonon code with TS-VdW is not yet available',1)
IF (lxdm) CALL errore('phq_readin',&
'The phonon code with XDM is not yet available',1)
IF (ldftd3) CALL errore('phq_readin',&
'The phonon code with Grimme''s DFT-D3 is not yet available',1)

View File

@ -1190,8 +1190,9 @@ SUBROUTINE iosys()
!
! ... initialize variables for vdW (dispersions) corrections
!
CALL set_vdw_corr ( vdw_corr, llondon, ldftd3, ts_vdw_, lxdm)
!
IF ( london ) THEN
CALL infomsg("iosys","london is obsolete, use ""vdw_corr='grimme-d2'"" instead")
vdw_corr='grimme-d2'

View File

@ -24,7 +24,7 @@ SUBROUTINE punch( what )
USE io_global, ONLY : stdout, ionode
USE io_files, ONLY : iunpun, iunwfc, nwordwfc, diropn, &
tmp_dir, prefix, postfix, create_directory
USE control_flags, ONLY : io_level, lscf
USE control_flags, ONLY : io_level, lscf, lxdm
USE klist, ONLY : nks
USE io_files, ONLY : xmlpun_schema, psfile, pseudo_dir
USE wrappers, ONLY : f_copy
@ -39,6 +39,7 @@ SUBROUTINE punch( what )
USE io_rho_xml, ONLY : write_scf
USE a2F, ONLY : la2F, a2Fsave
USE wavefunctions, ONLY : evc
USE xdm_module, ONLY : write_xdmdat
!
IMPLICIT NONE
!
@ -102,12 +103,17 @@ SUBROUTINE punch( what )
IF ( TRIM(cp_source) /= TRIM(cp_dest) ) &
cp_status = f_copy(cp_source, cp_dest)
END IF
! write XDM dispersion data (coefficients and vdw radii) to xdm.dat
IF (lxdm) THEN
CALL write_xdmdat()
ENDIF
END IF
!
! ... wavefunctions in "collected" format - also G- and k+G-vectors
!
CALL pw_write_binaries( )
!
! ... if allocated, deallocate variables containing info on ionic steps
!
CALL qexsd_reset_steps()

View File

@ -14,12 +14,13 @@ module xdm_module
IMPLICIT NONE
PRIVATE
PUBLIC :: a1i, a2i ! the damping function coefficients (real_dp)
PUBLIC :: init_xdm ! initialize XDM: calculate atomic volumes, radial densities,...
PUBLIC :: energy_xdm ! compute the XDM dispersion energy and derivatives
PUBLIC :: force_xdm ! fetch the forces calculated by energy_xdm
PUBLIC :: stress_xdm ! fetch the stresses calculated by energy_xdm
PUBLIC :: cleanup_xdm ! deallocate arrays
PUBLIC :: a1i, a2i ! the damping function coefficients (real_dp)
PUBLIC :: init_xdm ! initialize XDM: calculate atomic volumes, radial densities,...
PUBLIC :: energy_xdm ! compute the XDM dispersion energy and derivatives
PUBLIC :: force_xdm ! fetch the forces calculated by energy_xdm
PUBLIC :: stress_xdm ! fetch the stresses calculated by energy_xdm
PUBLIC :: write_xdmdat ! write the xdm.dat file
PUBLIC :: cleanup_xdm ! deallocate arrays
! is this a PAW calculation?
LOGICAL :: ispaw
@ -29,11 +30,13 @@ module xdm_module
REAL(DP), ALLOCATABLE :: xenv(:,:)
INTEGER, ALLOCATABLE :: ienv(:), lvec(:,:)
INTEGER :: nvec
INTEGER :: lmax(3)
! moments, polarizabilities, radii, dispersion coefficients
REAL(DP), ALLOCATABLE :: alpha(:), ml(:,:)
REAL(DP), ALLOCATABLE :: cx(:,:,:), rvdw(:,:)
REAL(DP) :: maxc6
REAL(DP) :: rmax2
! energies, forces and stresses
REAL(DP) :: esave = 0._DP
@ -211,17 +214,18 @@ CONTAINS
REAL(DP) :: x(3), wei, weic, db, ri, atb(3,3), taub(3)
REAL(DP) :: xij(3), ehadd(6:10), eat, ee
INTEGER :: l1, l2, ll, m1, m2
LOGICAL :: docalc
REAL(DP) :: a1, a2, rmax, rmax2, den, den2
LOGICAL :: docalc, lexist
REAL(DP) :: a1, a2, rmax, den, den2
REAL(DP) :: dij2
REAL(DP) :: rvdwx, dijx, dijxm2, fxx, cn0
INTEGER :: i3, nn
REAL(DP) :: for(3,nat), sigma(3,3), sat(3,3)
INTEGER :: resto, divid, first, last, it
INTEGER :: idx, ispin
INTEGER, EXTERNAL :: atomic_number
REAL(DP) :: iix, iiy, iiz
INTEGER, EXTERNAL :: atomic_number
CALL start_clock('energy_xdm')
! initialize
@ -259,7 +263,7 @@ CONTAINS
! set up the atomic environment for densities
rmax = SQRT(MAXVAL(rmaxg2))
CALL set_environ(rmax)
CALL set_environ(rmax,lmax(1),lmax(2),lmax(3))
! total and core promolecular density
ALLOCATE(rhoat(dfftp%nnr),rhocor(dfftp%nnr),STAT=ialloc)
@ -366,9 +370,7 @@ CONTAINS
iy0 = dfftp%my_i0r2p ; iz0 = dfftp%my_i0r3p
DO n = 1, dfftp%nr1x*dfftp%my_nr2p*dfftp%my_nr3p
!
! ... three dimensional indexes
!
! three dimensional indexes
idx = n -1
iz = idx / (dfftp%nr1x*dfftp%my_nr2p)
idx = idx - (dfftp%nr1x*dfftp%my_nr2p)*iz
@ -489,7 +491,7 @@ CONTAINS
! in the international tables for crystallography.
rmax = (maxc6/ecut)**(1._DP/6._DP)
rmax2 = rmax*rmax
CALL set_environ(rmax)
CALL set_environ(rmax,lmax(1),lmax(2),lmax(3))
! parallelize over atoms
#if defined __MPI
@ -562,10 +564,10 @@ CONTAINS
DO nn = 6, 10
CALL mp_sum(ehadd(nn),intra_image_comm)
ENDDO
!
IF (nspin == 2) CALL rhoz_or_updw( rho, 'r_and_g', '->rhoz' )
!
! Convert to Ry
! convert to Ry
evdw = evdw * 2
for = for * 2
sigma = sigma * 2
@ -578,6 +580,7 @@ CONTAINS
ssave = sigma
IF (ionode) THEN
! write to output
WRITE (stdout,'(" Evdw(total,Ry) = ",1p,E20.12)') evdw
WRITE (stdout,'(" Evdw(C6,Ry) = ",1p,E20.12)') ehadd(6)
WRITE (stdout,'(" Evdw(C8,Ry) = ",1p,E20.12)') ehadd(8)
@ -614,9 +617,33 @@ CONTAINS
svdw = ssave
END FUNCTION stress_xdm
SUBROUTINE write_xdmdat()
! save the XDM coefficients and vdw radii to the xdm.dat file for ph.x
USE io_files, ONLY: seqopn, postfix
USE io_global, ONLY: ionode
USE ions_base, ONLY: nat
INTEGER :: iunxdm, ierr
LOGICAL :: lexist
INTEGER, EXTERNAL :: find_free_unit
IF (ionode) THEN
iunxdm = find_free_unit ()
CALL seqopn(iunxdm,postfix(2:6)//'xdm.dat','UNFORMATTED',lexist)
WRITE (iunxdm,iostat=ierr) 1 ! version
IF (ierr /= 0) CALL errore('energy_xdm','writing xdm.dat',1)
WRITE (iunxdm,iostat=ierr) lmax, rmax2
IF (ierr /= 0) CALL errore('energy_xdm','writing xdm.dat',1)
WRITE (iunxdm,iostat=ierr) 2d0 * cx(1:nat,1:nat,2:4), rvdw(1:nat,1:nat)
IF (ierr /= 0) CALL errore('energy_xdm','writing xdm.dat',1)
CLOSE (UNIT=iunxdm, STATUS='KEEP')
ENDIF
END SUBROUTINE write_xdmdat
! --- private ---
SUBROUTINE set_environ (rcut)
SUBROUTINE set_environ(rcut,imax,jmax,kmax)
! Calculate an atomic environemnt of the entire unit cell up to a distance rcut.
! This environment is saved in the host module arrays ienv, xenv and lvec.
USE cell_base, ONLY: at, bg, alat, omega, tpiba2
@ -624,33 +651,14 @@ CONTAINS
USE io_global, ONLY: stdout, ionode
REAL(DP), INTENT(IN) :: rcut
INTEGER, INTENT(OUT) :: imax, jmax, kmax
INTEGER :: nadd, imax, jmax, kmax, ialloc
INTEGER :: nadd, ialloc
REAL(DP) :: rmat(3,3), gtensor(3,3), alp, bet, gam, aa, bb, cc, xx(3)
INTEGER :: ii, jj, kk, m, nsize, lsize
INTEGER, ALLOCATABLE :: ienvaux(:), lvecaux(:,:)
REAL(DP), ALLOCATABLE :: xenvaux(:,:)
INTEGER, PARAMETER :: menv = 1000, lenv=100
INTEGER :: ii, jj, kk, m
CALL start_clock('exdm:environ')
! allocate the initial environment
nenv = 0
IF (ALLOCATED(ienv)) DEALLOCATE(ienv)
IF (ALLOCATED(xenv)) DEALLOCATE(xenv)
ALLOCATE(ienv(menv),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("ienv")
ALLOCATE(xenv(3,menv),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("xenv")
nsize = menv
! allocate the array of lattice vectors
nvec = 0
IF (ALLOCATED(lvec)) DEALLOCATE(lvec)
ALLOCATE(lvec(3,lenv),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("lenv")
lsize = lenv
! determine number of cells (adapted from gulp, by J. Gale)
rmat = at * alat
gtensor = MATMUL(TRANSPOSE(rmat),rmat)
@ -673,60 +681,36 @@ CONTAINS
jmax = NINT(rcut / bb) + nadd
kmax = NINT(rcut / cc) + nadd
! pre-allocate the environment arrays
nvec = (2*imax+1)*(2*jmax+1)*(2*kmax+1)
nenv = (2*imax+1)*(2*jmax+1)*(2*kmax+1) * nat
IF (ALLOCATED(xenv)) DEALLOCATE(xenv)
IF (ALLOCATED(ienv)) DEALLOCATE(ienv)
IF (ALLOCATED(lvec)) DEALLOCATE(lvec)
ALLOCATE(xenv(3,nenv),ienv(nenv),lvec(3,nvec))
! build the environment arrays
nenv = 0
nvec = 0
DO ii = -imax, imax
DO jj = -jmax, jmax
DO kk = -kmax, kmax
! run over the ions in the (i,j,k) cell:
DO m = 1, nat
xx = tau(:,m) + ii*at(:,1) + jj*at(:,2) + kk*at(:,3)
! dynamically increase the array size
nenv = nenv + 1
IF (nenv > nsize) THEN
ALLOCATE(ienvaux(NINT(1.5*nsize)),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("ienvaux")
ALLOCATE(xenvaux(3,NINT(1.5*nsize)),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("xenvaux")
ienvaux(1:nsize) = ienv
xenvaux(:,1:nsize) = xenv
CALL move_alloc(ienvaux,ienv)
CALL move_alloc(xenvaux,xenv)
nsize = NINT(1.5*nsize)
END IF
xx = tau(:,m) + ii*at(:,1) + jj*at(:,2) + kk*at(:,3)
xenv(:,nenv) = xx * alat
ienv(nenv) = m
ENDDO ! m
! one more lattice vector
nvec = nvec + 1
IF (nvec > lsize) THEN
ALLOCATE(lvecaux(3,NINT(1.5*lsize)),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("lvecaux")
lvecaux(:,1:lsize) = lvec
CALL move_alloc(lvecaux,lvec)
lsize = NINT(1.5*lsize)
END IF
lvec(:,nvec) = (/ii,jj,kk/)
END DO ! kk
END DO ! jj
END DO ! ii
! fit memory snugly
ALLOCATE(ienvaux(nsize),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("ienvaux")
ienvaux(1:nsize) = ienv
CALL move_alloc(ienvaux,ienv)
ALLOCATE(xenvaux(3,nsize),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("xenvaux")
xenvaux(:,1:nsize) = xenv
CALL move_alloc(xenvaux,xenv)
ALLOCATE(lvecaux(3,lsize),STAT=ialloc)
IF (ialloc /= 0) CALL alloc_failed("lvecaux")
lvecaux(:,1:lsize) = lvec
CALL move_alloc(lvecaux,lvec)
CALL stop_clock('exdm:environ')
END SUBROUTINE set_environ