High-performance sparse solvers for symmetric FEM matrices
Multiple RHS simultaneously, sharing Krylov subspace for 4× fewer iterations
Quasi inner product ⟨x,y⟩ = Σxₖyₖ — natural for A = Aᵀ systems
Built-in ILU, Jacobi, and split preconditioners
Outperforms direct solvers for large systems
Short three-term recurrences, not full Krylov basis
Monotonic quasi-minimal residual reduction
# pip install blocksolver from blocksolver import blqmr from scipy.sparse import diags # Complex symmetric FEM matrix A = diags([-1, 4, -1], [-1, 0, 1], shape=(5000, 5000)) A = A + 0.1j * diags([1], [0], shape=(5000, 5000)) # Block solve: 16 RHS at once B = np.random.randn(5000, 16) + 0j result = blqmr(A, B, tol=1e-10, precond_type='diag') print(f"Converged: {result.converged}")
% Complex symmetric FEM matrix n = 5000; K = spdiags([-ones(n,1) 4*ones(n,1) -ones(n,1)], -1:1, n, n); A = K + 0.1i * speye(n); % Block solve: 16 RHS at once B = rand(n, 16) + 1i*rand(n, 16); opt.precond = 'diag'; [X, flag] = blqmr(A, B, 1e-10, 1000, [], [], [], opt);
#include "blit_solvers.h" int main() { const int n = 5000, nnz = 14998, nrhs = 16; std::vector<int> Ap(n+1), Ai(nnz); std::vector<double> Ax(nnz), b(n*nrhs), x(n*nrhs); BlitBLQMR<double> solver(n, nrhs, 1000, 1e-3); solver.Prepare(Ap.data(), Ai.data(), Ax.data(), nnz); solver.Solve(x.data(), b.data(), nrhs); return 0; }
pip install blocksolver
Linux: apt install gfortran libsuitesparse-dev
addpath('/path/to/blit/matlab')