最后提供了完整的最小代码。
我有这个 module.f90,它定义为全局变量:
module Quantum_Ising_1D
implicit none
private
integer :: iparam(11)
iparam,然后在子程序“对角化”中使用
此子例程使用 iparam 作为参数从外部库调用另一个子例程(如果有人遇到类似问题,则从 arpack-ng 调用 dsaupd)。
我用 -i8 标志编译这个 module.f90 。
该子例程有各种参数,甚至是其他整数。 它在 dsaupd.f 中定义,其中定义:
整数 iparam(11) !在 f77 中没有意图
我在声明之后打印 iparam 以及一些先前和后续的整数。
其他整数不变,所以应该不是变量精度不同的问题,因为它们都会移位。
此外,在构建 arpack-ng 时,我确保链接了正确的链接,对于 BLAS 和 LAPACK 终端给了我:
-- link: /opt/intel/oneapi/mkl/2024.0/lib/libmkl_intel_ilp64.so
相关: 使用 mkl 构建 Arpack-ng 时出现整数错误
iparam 在我的模块中调用 dsaupd 之前打印:
1
0
1000
0
0
0
1
0
0
0
0
iparam 打印在 dsaupd 内部,就在声明之后:
1
1000
0
1
0
0
0
32
1000
0
0
其中(在下面的代码中使用以下参数):
tot_Hilbert_elem = 32 ! this is called n in dsaupd.f
ishfts = 1
maxitr = 1000 !max iteration
mode1 = 1
iparam(1) = ishfts
iparam(3) = maxitr
iparam(7) = mode1
这是实际的 dsaupd.f,被截断为打印内容:
subroutine dsaupd
& ( ido, bmat, n, which, nev, tol, resid, ncv, v, ldv, iparam,
& ipntr, workd, workl, lworkl, info )
c
c %----------------------------------------------------%
c | Include files for debugging and timing information |
c %----------------------------------------------------%
c
include 'debug.h'
include 'stat.h'
c
c %------------------%
c | Scalar Arguments |
c %------------------%
c
character bmat*1, which*2
integer ido, info, ldv, lworkl, n, ncv, nev
Double precision
& tol
c
c %-----------------%
c | Array Arguments |
c %-----------------%
c
integer iparam(11), ipntr(11)
Double precision
& resid(n), v(ldv,ncv), workd(3*n), workl(lworkl)
c
c %------------%
c | Parameters |
c %------------%
c
Double precision
& one, zero
parameter (one = 1.0D+0 , zero = 0.0D+0 )
c
c %---------------%
c | Local Scalars |
c %---------------%
c
integer bounds, ierr, ih, iq, ishift, iupd, iw,
& ldh, ldq, msglvl, mxiter, mode, nb,
& nev0, next, np, ritz, j
save bounds, ierr, ih, iq, ishift, iupd, iw,
& ldh, ldq, msglvl, mxiter, mode, nb,
& nev0, next, np, ritz
c
c %----------------------%
c | External Subroutines |
c %----------------------%
c
external dsaup2 , dvout , ivout, arscnd, dstats
c
c %--------------------%
c | External Functions |
c %--------------------%
c
Double precision
& dlamch
external dlamch
c
c %-----------------------%
c | Executable Statements |
c %-----------------------%
c
print *, ldv
print *, iparam(1)
print *, iparam(2)
print *, iparam(3)
print *, iparam(4)
print *, iparam(5)
print *, iparam(6)
print *, iparam(7)
print *, iparam(8)
print *, iparam(9)
print *, iparam(10)
print *, iparam(11)
print *, lworkl
这是我程序的一部分,它给出了正确的值:
do while (ido /= 99)
print *, iparam(1)
print *, iparam(2)
print *, iparam(3)
print *, iparam(4)
print *, iparam(5)
print *, iparam(6)
print *, iparam(7)
print *, iparam(8)
print *, iparam(9)
print *, iparam(10)
print *, iparam(11)
call dsaupd ( ido, Bmat, tot_Hilbert_elem, which_eval, nev, tol, resid, ncv, &
lancz_basis_matrix, ldv, iparam, ipntr, workd, workl, length_workl, info )
我错过了什么?
最小示例(我跳过了稀疏矩阵的初始化,因为它不是数组重新排序的问题。请务必在 dsaupd 中打印 iparam,正如我之前所写的)
遵循这个最低限度的指南: https://github.com/VinceNeede/Ising-Diag
它说要安装 mkl 和 arpack-ng
Arpack-NG 来自这里: https://github.com/opencollab/arpack-ng
For mkl remember to set the enviroment. I added those lines at the end of the terminal document directly, so that it does itautomatically.
The minimal Cmake IS PROVIDED. Read the comments too, as you need to compile mkl_spblas module.
minimal_Quantum_Ising_1D.f90 is a module i made, following the examples in arpack-ng
main_program_fortran.f90 is the program which calls it
大多数包含的库和标志来自:https://www.intel.com/content/www/us/en/developer/tools/oneapi/onemkl-link-line-advisor.html
# copy paste the codes below when you are in bin.
# cmake ../ -B ../build -DCMAKE_Fortran_COMPILER=ifort -DCMAKE_CXX_COMPILER=icpx
# cmake --build ../build
cmake_minimum_required(VERSION 3.24)
project(modulo_02 LANGUAGES CXX Fortran)
message("\n")
get_filename_component (Fortran_COMPILER_NAME ${CMAKE_Fortran_COMPILER} NAME)
message(Fortran_compiler: " ${Fortran_COMPILER_NAME}")
set(executable_and_libraries YOU/NEED/TO/PUT/THE/DIR)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${executable_and_libraries}/bin)
# %---------------%
# | MKL FORTRAN |
# %---------------%
set(MKL_THREADING sequential)
set(MKL_LINK static)
set(MKLROOT /opt/intel/oneapi/mkl/2024.0)
find_package(MKL CONFIG REQUIRED PATHS ${MKLROOT})
set(Cmake_Fortran_FLAGS "-Ofast -i8")
#First compile mk_spblas.f90 in an object mk_spblas.o file you can use to link to all the other libraries
#I compiled it directly from terminal, knowing the exact location of MKLROOT:
#N.B: YOU NEED THE MOD FILE TO BE IN THE BUILD DIRECTORY fo this reason I added the module flag.
#ifort -fpp -I/opt/intel/oneapi/mkl/2024.0/include -w /opt/intel/oneapi/mkl/2024.0/include/mkl_spblas.f90 -c -o ../src_external/mkl_spblas.o -Ofast -i8 -static -module ../build
#Home made Fortran module
add_library(Quantum_Ising_1D STATIC)
target_sources(Quantum_Ising_1D PUBLIC
${executable_and_libraries}/src_homemade/minimal_Quantum_Ising_1D.f90
)
#Main program Fortran
add_executable(main_program_fortran
${executable_and_libraries}/bin/main_program_fortran.f90
)
target_link_libraries(main_program_fortran PUBLIC
Quantum_Ising_1D
${executable_and_libraries}/src_external/arpack-ng/build/libarpackILP64.a
${executable_and_libraries}/src_external/mkl_spblas.o
${MKLROOT}/lib/intel64/libmkl_blas95_ilp64.a
${MKLROOT}/lib/intel64/libmkl_lapack95_ilp64.a
$<LINK_GROUP:RESCAN, ${MKLROOT}/lib/libmkl_intel_ilp64.a, ${MKLROOT}/lib/libmkl_sequential.a, ${MKLROOT}/lib/libmkl_core.a, ${MKLROOT}/lib/libmkl_blacs_intelmpi_ilp64.a >
)
target_include_directories(main_program_fortran PUBLIC
${MKLROOT}/include/mkl/intel64/ilp64 BEFORE
${MKLROOT}/include AFTER
)
set_target_properties(main_program_fortran PROPERTIES
#COMPILE_FLAGS "-fpp" #needed for preprocessing. It's needed just when compiling the actual library which uses it.
LINK_FLAGS "-lpthread -lm -ldl -static" #if the libraries are dynamic, then you need to add link flags just to the executable
)
program main_program_fortran
use Quantum_Ising_1D
implicit none
integer(4) :: number_sites, nev, ncv
real(8) :: trans_magnet_field, long_magnet_field, long_magnetiz, trans_magnetiz
real(8), allocatable :: eval_arr(:), evec_matrix(:,:)
number_sites = 5
nev = 1
ncv = 3
long_magnet_field = 0
trans_magnet_field = 1.1
call Initialize_Quantum_Ising(number_sites, nev, ncv)
! call Initialize_Hamiltonian(long_magnet_field, trans_magnet_field)
print *, tot_Hilbert_elem
allocate(eval_arr(nev))
allocate(evec_matrix(tot_Hilbert_elem, nev))
print *, 'start'
call diag_lanczos('I', 'SA', eval_arr, evec_matrix)
end program main_program_fortran
module Quantum_Ising_1D
use mkl_spblas
implicit none
private
public Initialize_Quantum_Ising, Initialize_Hamiltonian, get_long_magnetiz, get_trans_magnetiz, diag_lanczos, tot_Hilbert_elem
integer, parameter :: number_spin_states = 2
integer, parameter :: dimensions = 1
integer, parameter :: boundary_conditions_flag = 1
integer :: number_sites, total_sites, tot_Hilbert_elem, nev, ncv, ldv
logical, allocatable :: comput_base(:,:)
type(sparse_matrix_T) :: sp_matrix
type(MATRIX_DESCR) :: descr_sp_matrix !caratteristiche della matrice, come sym, hermitiana, ecc
! FOR ALL THE FOLLOWING LOCAL NAMES, THEY ARE LEFT AS IN THE ORIGINAL DOCUMENTATION
! Allocations are done in initialize_comput_base
! %------------------------------------------------------%
! | NCV is the largest number of basis vectors that will |
! | be used in the Implicitly Restarted Arnoldi |
! | Process. Work per major iteration is |
! | proportional to N*NCV*NCV. |
! | |
! |NEV is the number of eigenvalues |
! %------------------------------------------------------%
! %------------------------------------------------------%
! | Note: NEV and NCV must satisfy the following |
! | conditions: |
! | NEV + 1 <= NCV |
! %------------------------------------------------------%
! %--------------%
! | Local Arrays |
! %--------------%
Real(8), allocatable :: lancz_basis_matrix(:,:), workl(:), workd(:), resid(:), ax(:)
logical, allocatable :: select(:)
integer :: iparam(11), ipntr(11)
! %---------------%
! | Local Scalars |
! %---------------%
integer(4) :: previous_file_digits
integer :: ido, length_workl, info, ierr, j, ishfts, maxitr, mode1, nconv, itry
Real(8) :: tol, sigma, rnorm !tol(arpack.ng) == tol(Spectra C++),
!maxitr(arpack.ng) == maxit(Spectra C++)
! %------------%
! | Parameters |
! %------------%
Real(8), parameter :: zero=0.0d+0 !machine precision, will be used for tol
! %-----------------------------%
! | BLAS & LAPACK routines used |
! %-----------------------------%
! %------------------------------------------------------------%
! | Routines called: |
! | dsaupd ARPACK reverse communication interface routine. |
! | dseupd ARPACK routine that returns Ritz values |
! | and (optionally) Ritz vectors. |
! | dnrm2 Level 1 BLAS that computes the norm of a vector. |
! | daxpy Level 1 BLAS that computes y <- alpha*x+y. |
! %------------------------------------------------------------%
Real(8) dnrm2
external dnrm2, daxpy, dseupd, dsaupd, dcopy, dgetv0
contains
subroutine Initialize_Quantum_Ising(number_sites_external, nev_external, ncv_external)
implicit none
integer, intent(in) :: number_sites_external, nev_external, ncv_external
integer :: i_row, j_col, j_holder
number_sites = number_sites_external
nev = nev_external
ncv = ncv_external
total_sites = number_sites*dimensions
tot_Hilbert_elem = number_spin_states**total_sites
ldv = tot_Hilbert_elem
!Inizializing all indipendent configuration of Hilbert Space.
allocate(comput_base(total_sites, tot_Hilbert_elem))
!column major language, comput_base has consecutive calls on adjacent sites, not hilbert states
j_holder = 0
do j_col = 1, tot_Hilbert_elem !column major language
j_holder = j_col
do i_row = 1, total_sites
comput_base(i_row, j_col) = MOD(j_holder,2)
j_holder = j_holder/2
end do
end do
! %-------------------------%
! | Local Arrays allocation |
! %-------------------------%
allocate(lancz_basis_matrix(ldv,ncv))
length_workl = ncv*(ncv+8)
allocate(workl(length_workl))
allocate(workd(3*tot_Hilbert_elem))
allocate(resid(tot_Hilbert_elem))
allocate(ax(tot_Hilbert_elem))
allocate(select(ncv))
end subroutine Initialize_Quantum_Ising
subroutine Initialize_Hamiltonian(long_magnet_field, trans_magnet_field)
implicit none
real(8), intent(in) :: long_magnet_field, trans_magnet_field
integer :: i_row, j_col !indices for computational base we are scrolling through.
integer :: new_i_row !index for non diagonal elements of sp_matrix.
real(8) :: val_arr(tot_Hilbert_elem*(total_sites+1))
integer(4) :: col_arr(tot_Hilbert_elem*(total_sites+1)), row_arr(tot_Hilbert_elem*(total_sites+1))
integer :: nnz, index_holder
if (total_sites > 26) then
print *, 'change dimension of col_arr and row_arr at line 125 Quantum_Ising_1D.f90'
print *, 'max total_sites with int32 is 26'
stop
end if
print *, "I'm not the problem"
end subroutine Initialize_Hamiltonian
subroutine diag_lanczos(Bmat, which_eval, eval_array, evec_matrix, v0)
implicit none
! %---------------------------------------%
! | A*x=lambda*Bmat*x |
! | where x is a vector, lambda the |
! | eigenvalue to search for, and B=I. |
! | |
! | 'I' specifies that the problem to |
! | solve is a standard Eigenvalues |
! | problem. |
! | |
! | which_evals |
! | 'SA' specifies that we are searching |
! | for the Smallest Algebric eigenvalues.|
! | 'LM' for Larges Magnitude. |
! %---------------------------------------%
character,intent(in) :: Bmat(1), which_eval(2)
real(8),intent(out) :: eval_array(nev) !array for eigenvalues
real(8),intent(out),optional :: evec_matrix(tot_Hilbert_elem,nev) !1 array for each eigenvector --> matrix
real(8),intent(in),optional :: v0(tot_Hilbert_elem) !initial vector for lanczos
integer :: stat=0 !final state of computation. If altered an error occurred
logical :: rvec = .false. !Do you want to compute eigenvectors too?
ido = 0 !reverse communication variable
tol = zero !if zero, machine precision is used
if (present(v0)) then
info=1
resid=v0
else
info=0
end if
if (present(evec_matrix)) rvec=.true.
ishfts = 1
maxitr = 1000 !max iteration
mode1 = 1
iparam(1) = ishfts
iparam(3) = maxitr
iparam(7) = mode1
print *, iparam(1)
print *, iparam(2)
print *, iparam(3)
print *, iparam(4)
print *, iparam(5)
print *, iparam(6)
print *, iparam(7)
print *, iparam(8)
print *, iparam(9)
print *, iparam(10)
print *, iparam(11)
call dsaupd ( ido, Bmat, tot_Hilbert_elem, which_eval, nev, tol, resid, ncv, &
lancz_basis_matrix, ldv, iparam, ipntr, workd, workl, length_workl, info )
print *, 'solved'
end do
end subroutine diag_lanczos
end module Quantum_Ising_1D
所以,问题是这样的: 我有一个主程序和一个包含一些整数的模块,以及需要双精度整数的库。
我用过:
set(Cmake_Fortran_FLAGS "-Ofast -i8")
在 CMakeLists.txt 中
打印整数类型,返回 4 而不是 8,所以我只是切换到:
set_target_properties(Quantum_Ising_1D PROPERTIES
COMPILE_FLAGS "-Ofast -i8"
)
它奏效了。不知道为什么。 CMakeLists参数有问题吗?