Some cleanup, nothing substantial. Removed "local" dimensions of the grid

that seemed to me redundant


git-svn-id: http://qeforge.qe-forge.org/svn/q-e/trunk/espresso@7369 c92efa57-630b-4861-b058-cf58834340f0
This commit is contained in:
giannozz 2010-12-29 17:56:40 +00:00
parent a0f7ba8052
commit d8953d8d5f
6 changed files with 59 additions and 82 deletions

View File

@ -54,7 +54,7 @@ MODULE cp_restart
nwordwfc, tmp_dir, diropn
USE mp_global, ONLY : intra_image_comm, me_image, nproc_image, intra_bgrp_comm
USE printout_base, ONLY : title
USE grid_dimensions, ONLY : nr1, nr2, nr3, nr1x, nr2x, nr3l
USE grid_dimensions, ONLY : nr1, nr2, nr3, nr1x, nr2x
USE smooth_grid_dimensions, ONLY : nr1s, nr2s, nr3s
USE smallbox_grid_dimensions, ONLY : nr1b, nr2b, nr3b
USE gvect, ONLY : ngm, ngm_g

View File

@ -134,7 +134,7 @@
USE gvect, ONLY: ngm
USE constants, ONLY: gsmall, pi
USE cell_base, ONLY: tpiba2, s_to_r, alat
use grid_dimensions, only: nr1, nr2, nr3, nr1l, nr2l, nr3l, nrxx
use grid_dimensions, only: nr1, nr2, nr3, nrxx
IMPLICIT NONE
@ -150,7 +150,7 @@
COMPLEX(DP), ALLOCATABLE :: grg(:)
REAL(DP) :: rc, r(3), s(3), rmod, g2, rc2, arg, fact
INTEGER :: ig, i, j, k, ir
INTEGER :: ir1, ir2, ir3
INTEGER :: ir1, ir2, ir3, nr3l
ir1 = 1
ir2 = 1
@ -158,6 +158,7 @@
DO k = 1, me_bgrp
ir3 = ir3 + dfftp%npp( k )
END DO
nr3l = dfftp%npl
ALLOCATE( grr( nrxx ) )
ALLOCATE( grg( SIZE( screen_coul ) ) )
@ -173,9 +174,9 @@
DO k = 1, nr3l
s(3) = DBLE ( (k-1) + (ir3 - 1) ) / nr3 - 0.5d0
DO j = 1, nr2l
DO j = 1, nr2
s(2) = DBLE ( (j-1) + (ir2 - 1) ) / nr2 - 0.5d0
DO i = 1, nr1l
DO i = 1, nr1
s(1) = DBLE ( (i-1) + (ir1 - 1) ) / nr1 - 0.5d0
CALL S_TO_R( S, R, hmat )
rmod = SQRT( r(1)**2 + r(2)**2 + r(3)**2 )
@ -837,7 +838,7 @@
USE gvect, ONLY: gstart, gg
USE gvect, ONLY: ngm
USE gvecw, ONLY: ngw
use grid_dimensions, only: nr1, nr2, nr3, nr1l, nr2l, nr3l, nrxx
use grid_dimensions, only: nr1, nr2, nr3, nrxx
USE fft_interfaces, ONLY: fwfft, invfft
IMPLICIT NONE
@ -861,6 +862,7 @@
COMPLEX(DP), ALLOCATABLE :: k_density(:)
COMPLEX(DP) :: vscreen
COMPLEX(DP), ALLOCATABLE :: screen_coul(:)
INTEGER :: nr3l
! ... Subroutine body ...
@ -870,7 +872,7 @@
CALL cluster_bc( screen_coul, gg, ht%deth, ht%hmat )
END IF
nr3l = dfftp%npl
omega = ht%deth
ALLOCATE( density( nrxx ) )
@ -906,22 +908,22 @@
!WRITE(6,*) 'ATOM ', ind_localisation( isa_input )
!WRITE(6,*) 'POS ', atoms_m%taus( :, isa_sorted )
work = nr1l
work = nr1
work2 = sic_rloc * work
work = work * R(1) - work2
Xmin = FLOOR(work)
work = work + 2*work2
Xmax = FLOOR(work)
IF ( Xmax > nr1l ) Xmax = nr1l
IF ( Xmax > nr1 ) Xmax = nr1
IF ( Xmin < 1 ) Xmin = 1
work = nr2l
work = nr2
work2 = sic_rloc * work
work = work * R(2) - work2
Ymin = FLOOR(work)
work = work + 2*work2
Ymax = FLOOR(work)
IF ( Ymax > nr2l ) Ymax = nr2l
IF ( Ymax > nr2 ) Ymax = nr2
IF ( Ymin < 1 ) Ymin = 1
work = nr3l

View File

@ -23,9 +23,6 @@
! may differ from nr1 ,nr2 ,nr3 in order to boost performances
INTEGER :: nr1x = 0, nr2x = 0, nr3x = 0
! dimensions of the "dense" 3D grid (local on each processor)
INTEGER :: nr1l = 0, nr2l = 0, nr3l = 0
! size of the arrays allocated for the FFT, local to each processor:
! in parallel execution may differ from nr1x*nr2x*nr3x
! Not to be confused either with nr1*nr2*nr3
@ -33,7 +30,6 @@
PRIVATE
PUBLIC :: nr1, nr2,nr3, nr1x,nr2x,nr3x, nrxx
PUBLIC :: nr1l, nr2l,nr3l
!=----------------------------------------------------------------------------=!
END MODULE grid_dimensions
@ -53,12 +49,10 @@
! parameter description: same as above but for smooth grid
INTEGER :: nr1s = 0, nr2s = 0, nr3s = 0
INTEGER :: nr1sx= 0, nr2sx= 0, nr3sx= 0
INTEGER :: nr1sl= 0, nr2sl= 0, nr3sl= 0
INTEGER :: nrxxs = 0
PRIVATE
PUBLIC :: nr1s, nr2s,nr3s, nr1sx,nr2sx,nr3sx, nrxxs
PUBLIC :: nr1sl, nr2sl,nr3sl
!=----------------------------------------------------------------------------=!
END MODULE smooth_grid_dimensions
@ -170,8 +164,8 @@
USE io_global, ONLY: ionode, stdout
USE fft_types, ONLY: fft_dlay_descriptor
USE grid_dimensions, ONLY: nr1l, nr2l, nr3l, nrxx
USE smooth_grid_dimensions, ONLY: nr1sl, nr2sl, nr3sl, nrxxs
USE grid_dimensions, ONLY: nrxx
USE smooth_grid_dimensions, ONLY: nrxxs
IMPLICIT NONE
@ -185,7 +179,7 @@
WRITE( stdout,*)
WRITE( stdout,*) ' Real Mesh'
WRITE( stdout,*) ' ---------'
WRITE( stdout,1000) nr1, nr2, nr3, nr1l, nr2l, nr3l, 1, 1, nproc_
WRITE( stdout,1000) nr1, nr2, nr3, nr1, nr2, dfftp%npl, 1, 1, nproc_
WRITE( stdout,1010) nr1x, nr2x, nr3x
WRITE( stdout,1020) nrxx
WRITE( stdout,*) ' Number of x-y planes for each processors: '
@ -195,7 +189,7 @@
WRITE( stdout,*)
WRITE( stdout,*) ' Smooth Real Mesh'
WRITE( stdout,*) ' ----------------'
WRITE( stdout,1000) nr1s, nr2s, nr3s, nr1sl, nr2sl, nr3sl, 1, 1, nproc_
WRITE( stdout,1000) nr1s, nr2s, nr3s, nr1s, nr2s, dffts%npl,1,1, nproc_
WRITE( stdout,1010) nr1sx, nr2sx, nr3sx
WRITE( stdout,1020) nrxxs
WRITE( stdout,*) ' Number of x-y planes for each processors: '

View File

@ -13,7 +13,7 @@
! ... G vectors with |G|^2 < ecutrho, cut-off for charge density
! ... With gamma tricks, G-vectors are divided into two half-spheres,
! ... G> and G<, containing G and -G (G=0 is in G>)
! ... This is referred to as the "hard" or "dense" grid
! ... This is referred to as the "dense" (or "hard", or "thick") grid
USE kinds, ONLY: DP

View File

@ -762,14 +762,7 @@
CALL fft_dlay_scalar( dffts, ub, lb, nr1s, nr2s, nr3s, nr1sx, nr2sx, nr3sx, stw )
#endif
! set the actual (local) FFT dimensions
nr1l = dfftp % nr1
nr2l = dfftp % nr2
nr3l = dfftp % npl
nr1sl = dffts % nr1
nr2sl = dffts % nr2
nr3sl = dffts % npl
! set the dimensions of the array allocated for the FFT
! set the dimensions of the arrays allocated for the FFT
nrxx = dfftp % nnr
nrxxs = dffts % nnr

View File

@ -1,5 +1,5 @@
!
! Copyright (C) 2001-2009 Quantum ESPRESSO group
! Copyright (C) 2001-2010 Quantum ESPRESSO group
! This file is distributed under the terms of the
! GNU General Public License. See the file `License'
! in the root directory of the present distribution,
@ -10,35 +10,63 @@
SUBROUTINE data_structure( gamma_only )
!-----------------------------------------------------------------------
! this routine sets the data structure for the fft arrays
! (both the smooth and the hard mesh)
! (both the smooth and the dense grid)
! In the parallel case, it distributes columns to processes, too
!
USE kinds, ONLY : DP
USE io_global, ONLY : stdout
USE mp, ONLY : mp_sum
USE mp_global, ONLY : intra_pool_comm, use_task_groups
USE mp, ONLY : mp_sum, mp_max
USE mp_global, ONLY : intra_pool_comm, inter_pool_comm, use_task_groups
USE fft_base, ONLY : dfftp, dffts
USE cell_base, ONLY : bg
USE cell_base, ONLY : bg, tpiba
USE klist, ONLY : xk, nks
USE gvect, ONLY : ngm, ngm_g, gcutm
USE gvecs, ONLY : ngms, ngms_g, gcutms
USE stick_base, ONLY : pstickset
USE task_groups,ONLY : task_groups_init
USE wvfct, ONLY : ecutwfc
!
IMPLICIT NONE
LOGICAL, INTENT(in) :: gamma_only
REAL (DP) :: gkcut
INTEGER :: ngw_
INTEGER :: ik, ngw_
!
gkcut = calculate_gkcut ()
CALL pstickset( gamma_only, bg(1,1), bg(1,2), bg(1,3), gcutm, gkcut, gcutms,&
dfftp, dffts, ngw_ , ngm , ngms )
! ... calculate gkcut = max |k+G|^2, in (2pi/a)^2 units
!
! compute the global number of g, i.e. the sum over all processors
! within a pool
IF (nks == 0) THEN
!
! if k-points are automatically generated (which happens later)
! use max(bg)/2 as an estimate of the largest k-point
!
gkcut = 0.5d0 * max ( &
sqrt (sum(bg (1:3, 1)**2) ), &
sqrt (sum(bg (1:3, 2)**2) ), &
sqrt (sum(bg (1:3, 3)**2) ) )
ELSE
gkcut = 0.0d0
DO ik = 1, nks
gkcut = max (gkcut, sqrt ( sum(xk (1:3, ik)**2) ) )
ENDDO
ENDIF
gkcut = (sqrt (ecutwfc) / tpiba + gkcut)**2
!
! ... find maximum value among all the processors
!
CALL mp_max (gkcut, inter_pool_comm )
!
! ... set up fft descriptors, including parallel stuff: sticks, planes, etc.
!
CALL pstickset( gamma_only, bg(1,1), bg(1,2), bg(1,3), &
gcutm, gkcut, gcutms, dfftp, dffts, ngw_ , ngm , ngms )
!
! on output, ngm and ngms contain the local number of G-vectors
! for the two grids. Compute here the global number of G-vectors,
! i.e. the sum over all processors within a pool
!
ngm_g = ngm ; CALL mp_sum( ngm_g , intra_pool_comm )
ngms_g = ngms ; CALL mp_sum( ngms_g, intra_pool_comm )
!
IF( use_task_groups ) THEN
!
! Initialize task groups.
@ -48,45 +76,5 @@ SUBROUTINE data_structure( gamma_only )
!
ENDIF
CONTAINS
FUNCTION calculate_gkcut()
USE kinds, ONLY : DP
USE cell_base, ONLY : bg, tpiba
USE klist, ONLY : xk, nks
USE mp, ONLY : mp_max
USE mp_global, ONLY : inter_pool_comm
USE wvfct, ONLY : ecutwfc
REAL (DP) :: gkcut, calculate_gkcut
INTEGER :: ik
IF (nks == 0) THEN
!
! if k-points are automatically generated (which happens later)
! use max(bg)/2 as an estimate of the largest k-point
!
gkcut = 0.5d0 * max ( &
sqrt (sum(bg (1:3, 1)**2) ), &
sqrt (sum(bg (1:3, 2)**2) ), &
sqrt (sum(bg (1:3, 3)**2) ) )
ELSE
gkcut = 0.0d0
DO ik = 1, nks
gkcut = max (gkcut, sqrt ( sum(xk (1:3, ik)**2) ) )
ENDDO
ENDIF
gkcut = (sqrt (ecutwfc) / tpiba + gkcut)**2
!
! ... find maximum value among all the processors
!
CALL mp_max (gkcut, inter_pool_comm )
!
calculate_gkcut = gkcut
!
END function calculate_gkcut
END SUBROUTINE data_structure