Cleanup of unnecessary ifdef __MPI commands.

git-svn-id: http://qeforge.qe-forge.org/svn/q-e/trunk/espresso@9820 c92efa57-630b-4861-b058-cf58834340f0
This commit is contained in:
dalcorso 2013-01-20 15:01:54 +00:00
parent 652f460dfa
commit 1225df21cf
20 changed files with 21 additions and 60 deletions

View File

@ -159,10 +159,8 @@ subroutine cgsolve_all_imfreq (h_psi, cg_psi, e, d0psi, dpsi, h_diag, &
endif
enddo
kter_eff = kter_eff + DBLE (lbnd) / DBLE (nbnd)
#ifdef __MPI
call mp_sum(rho)
!!!call reduce (lbnd, rho )
#endif
do ibnd = nbnd, 1, -1
if (conv(ibnd).eq.0) then
rho(ibnd)=rho(lbnd)
@ -221,12 +219,12 @@ subroutine cgsolve_all_imfreq (h_psi, cg_psi, e, d0psi, dpsi, h_diag, &
c(lbnd) = ZDOTC (ndim, h(1,ibnd), 1, t(1,lbnd), 1)
end if
end do
#ifdef __MPI
call mp_sum(a)
call mp_sum(c)
!!!call reduce (lbnd, a)
!!!call reduce (lbnd, c)
#endif
lbnd=0
do ibnd = 1, nbnd
if (conv (ibnd) .eq.0) then

View File

@ -168,9 +168,7 @@ CONTAINS
npwx, spsi, npwx, (0.d0, 0.d0) , ps, nbnd)
ENDIF
ps (:,:) = ps(:,:) * alpha_pv
#ifdef __MPI
CALL mp_sum ( ps, intra_bgrp_comm )
#endif
hpsi (:,:) = (0.d0, 0.d0)
IF (noncolin) THEN
@ -220,9 +218,7 @@ CONTAINS
CALL DGEMM( 'C', 'N', nbnd, m, n, 2.D0,evc, 2*npwx*npol, spsi, 2*npwx*npol, 0.D0, ps, nbnd )
ENDIF
ps (:,:) = ps(:,:) * alpha_pv
#ifdef __MPI
CALL mp_sum ( ps, intra_bgrp_comm )
#endif
hpsi (:,:) = (0.d0, 0.d0)
IF (noncolin) THEN

View File

@ -319,9 +319,9 @@ subroutine compute_nldyn (wdyn, wgg, becq, alpq)
enddo
enddo
enddo
#ifdef __MPI
call mp_sum ( dynwrk, intra_bgrp_comm )
#endif
do nu_i = 1, 3 * nat
do nu_j = 1, 3 * nat
ps = (0.0d0, 0.0d0)

View File

@ -224,10 +224,8 @@ subroutine d2ionq (nat, ntyp, ityp, zv, tau, alat, omega, q, at, &
enddo
enddo
#ifdef __MPI
100 continue
call mp_sum ( dy3, intra_bgrp_comm )
#endif
!
! The dynamical matrix was computed in cartesian axis and now we put
! it on the basis of the modes

View File

@ -60,10 +60,8 @@ subroutine dielec()
enddo
enddo
enddo
#ifdef __MPI
call mp_sum ( epsilon, intra_bgrp_comm )
call mp_sum ( epsilon, inter_pool_comm )
#endif
!
! symmetrize
!

View File

@ -156,7 +156,6 @@ subroutine drho
enddo
enddo
#ifdef __MPI
!
! collect contributions from all pools (sum over k-points)
!
@ -166,7 +165,7 @@ subroutine drho
! collect contributions from nodes of a pool (sum over G & R space)
!
call mp_sum ( wdyn, intra_bgrp_comm )
#endif
call zaxpy (3 * nat * 3 * nat, (1.d0, 0.d0), wdyn, 1, dyn00, 1)
!
! force this term to be hermitean
@ -183,7 +182,6 @@ subroutine drho
!
allocate (drhoust(dfftp%nnr, nspin_mag , npertx))
drhoust=(0.d0,0.d0)
#ifdef __MPI
!
! The calculation of dbecsum is distributed across processors (see addusdbec)
! Sum over processors the contributions coming from each slice of bands
@ -193,7 +191,6 @@ subroutine drho
ELSE
call mp_sum ( dbecsum, intra_bgrp_comm )
END IF
#endif
IF (noncolin.and.okvan) CALL set_dbecsum_nc(dbecsum_nc, dbecsum, 3*nat)
@ -220,12 +217,10 @@ subroutine drho
enddo
mode = mode+npe
enddo
#ifdef __MPI
!
! Collect the sum over k points in different pools.
!
IF (okpaw) call mp_sum ( becsumort, inter_pool_comm )
#endif
deallocate (drhoust)
deallocate (dvlocin)

View File

@ -137,12 +137,10 @@ subroutine drhodv (nu_i0, nper, drhoscf)
enddo
enddo
#ifdef __MPI
!
! collect contributions from all pools (sum over k-points)
!
call mp_sum ( wdyn, inter_pool_comm )
#endif
!
! add the contribution of the local part of the perturbation
!

View File

@ -59,12 +59,10 @@ subroutine drhodvloc (nu_i0, npe, drhoscf, wdyn)
enddo
enddo
#ifdef __MPI
!
! collect contributions from nodes of a pool (sum over G & R space)
!
call mp_sum ( dynwrk, intra_bgrp_comm )
#endif
wdyn(:,:) = wdyn(:,:) + dynwrk(:,:)
deallocate(dvloc)

View File

@ -82,13 +82,11 @@ subroutine drhodvus (irr, imode0, dvscfin, npe)
mode0 = mode0 + npert (irr1)
enddo
deallocate (drhous)
#ifdef __MPI
!
! collect contributions from all pools (sum over k-points)
!
call mp_sum ( dyn1, inter_pool_comm )
call mp_sum ( dyn1, intra_bgrp_comm )
#endif
!
! PAW contribution: this part of the dynamical matrix is present only
! with PAW. PAW and US dynamical matrices differ only at this point.

View File

@ -237,12 +237,12 @@ subroutine dvanqq
enddo
endif
enddo
#ifdef __MPI
call mp_sum( int1, intra_bgrp_comm )
call mp_sum( int2, intra_bgrp_comm )
call mp_sum( int4, intra_bgrp_comm )
call mp_sum( int5, intra_bgrp_comm )
#endif
IF (noncolin) THEN
CALL set_int12_nc(0)
int4_nc = (0.d0, 0.d0)

View File

@ -30,6 +30,7 @@ subroutine dvpsi_e2
USE ramanm, ONLY : lrba2, iuba2, lrchf, iuchf, a1j, a2j
USE mp_global, ONLY: my_pool_id, inter_pool_comm, intra_bgrp_comm
USE mp, ONLY: mp_sum
implicit none
integer :: ik, ipa, ipb, ir, ibnd, jbnd, nrec
@ -184,8 +185,8 @@ subroutine dvpsi_e2
enddo
100 continue
call mp_sum ( aux6, inter_pool_comm )
call psyme2 (aux6)
call mp_sum ( aux6, inter_pool_comm )
call psyme2 (aux6)
deallocate (d2muxc)
deallocate (aux3)

View File

@ -96,9 +96,7 @@ subroutine dynmatcc
enddo
enddo
enddo
#ifdef __MPI
call mp_sum (dynwrk,intra_bgrp_comm)
#endif
!
dynwrk = dynwrk * omega
!

View File

@ -25,11 +25,10 @@ subroutine el_opt
USE control_ph, ONLY : nbnd_occ
USE ramanm, ONLY : eloptns, jab, lrchf, iuchf, done_elop
USE io_global, ONLY: ionode_id
#ifdef __MPI
USE mp, ONLY: mp_bcast, mp_sum
USE mp_global, ONLY: my_pool_id, inter_pool_comm, intra_bgrp_comm, &
intra_image_comm
#endif
implicit none
logical wr_all
@ -83,10 +82,8 @@ subroutine el_opt
enddo
enddo
#ifdef __MPI
call mp_sum( elop_ , intra_bgrp_comm)
call mp_sum( elop_ , inter_pool_comm)
#endif
deallocate (chif )
deallocate (depsi )
@ -123,11 +120,10 @@ subroutine el_opt
enddo
enddo
#ifdef __MPI
call mp_sum ( ps3, intra_bgrp_comm )
100 continue
call mp_bcast(ps3, ionode_id, intra_image_comm)
#endif
deallocate (d2muxc )
deallocate (aux3 )

View File

@ -273,9 +273,7 @@ SUBROUTINE phq_init()
END DO
!
END DO
#ifdef __MPI
CALL mp_sum ( eprec, intra_bgrp_comm )
#endif
CALL mp_sum ( eprec, intra_bgrp_comm )
!
DEALLOCATE( aux1 )

View File

@ -95,10 +95,10 @@ SUBROUTINE phqscf
IF (zue) CALL add_zstar_ue (imode0, npe )
IF (zue.AND. okvan) CALL add_zstar_ue_us(imode0, npe )
IF (zue) THEN
#ifdef __MPI
call mp_sum ( zstarue0_rec, intra_bgrp_comm )
call mp_sum ( zstarue0_rec, inter_pool_comm )
#endif
zstarue0(:,:)=zstarue0(:,:)+zstarue0_rec(:,:)
END IF
!

View File

@ -69,10 +69,8 @@ subroutine polariz ( iw )
enddo
enddo
enddo
#ifdef __MPI
call mp_sum ( epsilon, intra_bgrp_comm )
call mp_sum ( epsilon, inter_pool_comm )
#endif
!
! symmetrize
!

View File

@ -132,9 +132,9 @@ subroutine raman_mat
enddo
enddo
enddo
#ifdef __MPI
call mp_sum ( ps, intra_bgrp_comm )
#endif
do ipa = 1, 6
nrec = (ipa - 1) * nksq + ik
call davcio (chif (1, 1, ipa), lrd2w, iud2w, nrec, -1)
@ -218,10 +218,8 @@ subroutine raman_mat
enddo
#ifdef __MPI
call mp_sum( wrk, intra_bgrp_comm )
call mp_sum( wrk, inter_pool_comm )
#endif
do iat = 1, nat
do icr = 1, 3

View File

@ -41,12 +41,11 @@ subroutine set_irr_new (xq, u, npert, nirr, eigen)
USE control_flags, ONLY : iverbosity
USE random_numbers, ONLY : randy
USE rap_point_group, ONLY : name_rap
#ifdef __MPI
use mp, only: mp_bcast
use io_global, only : ionode_id
use mp_global, only : intra_image_comm
#endif
implicit none
!
! first the dummy variables
@ -277,7 +276,6 @@ subroutine set_irr_new (xq, u, npert, nirr, eigen)
! nsymq=1
! minus_q=.false.
#ifdef __MPI
!
! parallel stuff: first node broadcasts everything to all nodes
!
@ -292,6 +290,6 @@ subroutine set_irr_new (xq, u, npert, nirr, eigen)
call mp_bcast (minus_q, ionode_id, intra_image_comm)
call mp_bcast (num_rap_mode, ionode_id, intra_image_comm)
call mp_bcast (name_rap_mode, ionode_id, intra_image_comm)
#endif
return
end subroutine set_irr_new

View File

@ -161,12 +161,10 @@ subroutine set_irr_sym_new ( t, tmq, npertx )
enddo
enddo
#ifdef __MPI
!
! parallel stuff: first node broadcasts everything to all nodes
!
call mp_bcast (t, ionode_id, intra_image_comm)
call mp_bcast (tmq, ionode_id, intra_image_comm)
#endif
return
end subroutine set_irr_sym_new

View File

@ -207,9 +207,8 @@ subroutine solve_e_fpol ( iw )
CALL zgemm( 'C', 'N', nbnd_occ (ik), nbnd_occ (ik), npw, &
(1.d0,0.d0), evc(1,1), npwx, dvpsi(1,1), npwx, (0.d0,0.d0), &
ps(1,1), nbnd )
#ifdef __MPI
call mp_sum ( ps( :, 1:nbnd_occ(ik) ), intra_bgrp_comm )
#endif
! dpsi is used as work space to store S|evc>
!
CALL calbec (npw, vkb, evc, becp, nbnd_occ(ik) )
@ -289,14 +288,12 @@ subroutine solve_e_fpol ( iw )
ik, dbecsum(1,1,current_spin,ipol), dpsi)
enddo ! on polarizations
enddo ! on k points
#ifdef __MPI
!
! The calculation of dbecsum is distributed across processors
! (see addusdbec) - we sum over processors the contributions
! coming from each slice of bands
!
call mp_sum ( dbecsum, intra_bgrp_comm )
#endif
if (doublegrid) then
do is=1,nspin