ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pcgesv.f
Go to the documentation of this file.
1  SUBROUTINE pcgesv( N, NRHS, A, IA, JA, DESCA, IPIV, B, IB, JB,
2  $ DESCB, INFO )
3 *
4 * -- ScaLAPACK routine (version 1.7) --
5 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
6 * and University of California, Berkeley.
7 * Jan 30, 2006
8 *
9 * .. Scalar Arguments ..
10  INTEGER IA, IB, INFO, JA, JB, N, NRHS
11 * ..
12 * .. Array Arguments ..
13  INTEGER DESCA( * ), DESCB( * ), IPIV( * )
14  COMPLEX A( * ), B( * )
15 * ..
16 *
17 * Purpose
18 * =======
19 *
20 * PCGESV computes the solution to a complex system of linear equations
21 *
22 * sub( A ) * X = sub( B ),
23 *
24 * where sub( A ) = A(IA:IA+N-1,JA:JA+N-1) is an N-by-N distributed
25 * matrix and X and sub( B ) = B(IB:IB+N-1,JB:JB+NRHS-1) are N-by-NRHS
26 * distributed matrices.
27 *
28 * The LU decomposition with partial pivoting and row interchanges is
29 * used to factor sub( A ) as sub( A ) = P * L * U, where P is a permu-
30 * tation matrix, L is unit lower triangular, and U is upper triangular.
31 * L and U are stored in sub( A ). The factored form of sub( A ) is then
32 * used to solve the system of equations sub( A ) * X = sub( B ).
33 *
34 * Notes
35 * =====
36 *
37 * Each global data object is described by an associated description
38 * vector. This vector stores the information required to establish
39 * the mapping between an object element and its corresponding process
40 * and memory location.
41 *
42 * Let A be a generic term for any 2D block cyclicly distributed array.
43 * Such a global array has an associated description vector DESCA.
44 * In the following comments, the character _ should be read as
45 * "of the global array".
46 *
47 * NOTATION STORED IN EXPLANATION
48 * --------------- -------------- --------------------------------------
49 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
50 * DTYPE_A = 1.
51 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
52 * the BLACS process grid A is distribu-
53 * ted over. The context itself is glo-
54 * bal, but the handle (the integer
55 * value) may vary.
56 * M_A (global) DESCA( M_ ) The number of rows in the global
57 * array A.
58 * N_A (global) DESCA( N_ ) The number of columns in the global
59 * array A.
60 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
61 * the rows of the array.
62 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
63 * the columns of the array.
64 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
65 * row of the array A is distributed.
66 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
67 * first column of the array A is
68 * distributed.
69 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
70 * array. LLD_A >= MAX(1,LOCr(M_A)).
71 *
72 * Let K be the number of rows or columns of a distributed matrix,
73 * and assume that its process grid has dimension p x q.
74 * LOCr( K ) denotes the number of elements of K that a process
75 * would receive if K were distributed over the p processes of its
76 * process column.
77 * Similarly, LOCc( K ) denotes the number of elements of K that a
78 * process would receive if K were distributed over the q processes of
79 * its process row.
80 * The values of LOCr() and LOCc() may be determined via a call to the
81 * ScaLAPACK tool function, NUMROC:
82 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
83 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
84 * An upper bound for these quantities may be computed by:
85 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
86 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
87 *
88 * This routine requires square block decomposition ( MB_A = NB_A ).
89 *
90 * Arguments
91 * =========
92 *
93 * N (global input) INTEGER
94 * The number of rows and columns to be operated on, i.e. the
95 * order of the distributed submatrix sub( A ). N >= 0.
96 *
97 * NRHS (global input) INTEGER
98 * The number of right hand sides, i.e., the number of columns
99 * of the distributed submatrix sub( B ). NRHS >= 0.
100 *
101 * A (local input/local output) COMPLEX pointer into the
102 * local memory to an array of dimension (LLD_A,LOCc(JA+N-1)).
103 * On entry, the local pieces of the N-by-N distributed matrix
104 * sub( A ) to be factored. On exit, this array contains the
105 * local pieces of the factors L and U from the factorization
106 * sub( A ) = P*L*U; the unit diagonal elements of L are not
107 * stored.
108 *
109 * IA (global input) INTEGER
110 * The row index in the global array A indicating the first
111 * row of sub( A ).
112 *
113 * JA (global input) INTEGER
114 * The column index in the global array A indicating the
115 * first column of sub( A ).
116 *
117 * DESCA (global and local input) INTEGER array of dimension DLEN_.
118 * The array descriptor for the distributed matrix A.
119 *
120 * IPIV (local output) INTEGER array, dimension ( LOCr(M_A)+MB_A )
121 * This array contains the pivoting information.
122 * IPIV(i) -> The global row local row i was swapped with.
123 * This array is tied to the distributed matrix A.
124 *
125 * B (local input/local output) COMPLEX pointer into the
126 * local memory to an array of dimension
127 * (LLD_B,LOCc(JB+NRHS-1)). On entry, the right hand side
128 * distributed matrix sub( B ). On exit, if INFO = 0, sub( B )
129 * is overwritten by the solution distributed matrix X.
130 *
131 * IB (global input) INTEGER
132 * The row index in the global array B indicating the first
133 * row of sub( B ).
134 *
135 * JB (global input) INTEGER
136 * The column index in the global array B indicating the
137 * first column of sub( B ).
138 *
139 * DESCB (global and local input) INTEGER array of dimension DLEN_.
140 * The array descriptor for the distributed matrix B.
141 *
142 * INFO (global output) INTEGER
143 * = 0: successful exit
144 * < 0: If the i-th argument is an array and the j-entry had
145 * an illegal value, then INFO = -(i*100+j), if the i-th
146 * argument is a scalar and had an illegal value, then
147 * INFO = -i.
148 * > 0: If INFO = K, U(IA+K-1,JA+K-1) is exactly zero.
149 * The factorization has been completed, but the factor U
150 * is exactly singular, so the solution could not be
151 * computed.
152 *
153 * =====================================================================
154 *
155 * .. Parameters ..
156  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
157  $ lld_, mb_, m_, nb_, n_, rsrc_
158  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
159  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
160  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
161 * ..
162 * .. Local Scalars ..
163  INTEGER IAROW, IBROW, ICOFFA, ICTXT, IROFFA, IROFFB,
164  $ mycol, myrow, npcol, nprow
165 * ..
166 * .. Local Arrays ..
167  INTEGER IDUM1( 1 ), IDUM2( 1 )
168 * ..
169 * .. External Subroutines ..
170  EXTERNAL blacs_gridinfo, chk1mat, pchk2mat, pcgetrf,
171  $ pcgetrs, pxerbla
172 * ..
173 * .. External Functions ..
174  INTEGER INDXG2P
175  EXTERNAL indxg2p
176 * ..
177 * .. Intrinsic Functions ..
178  INTRINSIC mod
179 * ..
180 * .. Executable Statements ..
181 *
182 * Get grid parameters
183 *
184  ictxt = desca( ctxt_ )
185  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
186 *
187 * Test the input parameters
188 *
189  info = 0
190  IF( nprow.EQ.-1 ) THEN
191  info = -(600+ctxt_)
192  ELSE
193  CALL chk1mat( n, 1, n, 1, ia, ja, desca, 6, info )
194  CALL chk1mat( n, 1, nrhs, 2, ib, jb, descb, 11, info )
195  IF( info.EQ.0 ) THEN
196  iarow = indxg2p( ia, desca( mb_ ), myrow, desca( rsrc_ ),
197  $ nprow )
198  ibrow = indxg2p( ib, descb( mb_ ), myrow, descb( rsrc_ ),
199  $ nprow )
200  iroffa = mod( ia-1, desca( mb_ ) )
201  icoffa = mod( ja-1, desca( nb_ ) )
202  iroffb = mod( ib-1, descb( mb_ ) )
203  IF( iroffa.NE.0 ) THEN
204  info = -4
205  ELSE IF( icoffa.NE.0 ) THEN
206  info = -5
207  ELSE IF( desca( mb_ ).NE.desca( nb_ ) ) THEN
208  info = -(600+nb_)
209  ELSE IF( ibrow.NE.iarow .OR. icoffa.NE.iroffb ) THEN
210  info = -9
211  ELSE IF( descb( mb_ ).NE.desca( nb_ ) ) THEN
212  info = -(1100+nb_)
213  ELSE IF( ictxt.NE.descb( ctxt_ ) ) THEN
214  info = -(1100+ctxt_)
215  END IF
216  END IF
217  CALL pchk2mat( n, 1, n, 1, ia, ja, desca, 6, n, 1, nrhs, 2,
218  $ ib, jb, descb, 11, 0, idum1, idum2, info )
219  END IF
220 *
221  IF( info.NE.0 ) THEN
222  CALL pxerbla( ictxt, 'PCGESV', -info )
223  RETURN
224  END IF
225 *
226 * Compute the LU factorization of sub( A ).
227 *
228  CALL pcgetrf( n, n, a, ia, ja, desca, ipiv, info )
229 *
230  IF( info.EQ.0 ) THEN
231 *
232 * Solve the system sub( A ) * X = sub( B ), overwriting sub( B )
233 * with X.
234 *
235  CALL pcgetrs( 'No transpose', n, nrhs, a, ia, ja, desca, ipiv,
236  $ b, ib, jb, descb, info )
237 *
238  END IF
239 *
240  RETURN
241 *
242 * End of PCGESV
243 *
244  END
pcgetrf
subroutine pcgetrf(M, N, A, IA, JA, DESCA, IPIV, INFO)
Definition: pcgetrf.f:2
pcgetrs
subroutine pcgetrs(TRANS, N, NRHS, A, IA, JA, DESCA, IPIV, B, IB, JB, DESCB, INFO)
Definition: pcgetrs.f:3
pchk2mat
subroutine pchk2mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, MB, MBPOS0, NB, NBPOS0, IB, JB, DESCB, DESCBPOS0, NEXTRA, EX, EXPOS, INFO)
Definition: pchkxmat.f:175
chk1mat
subroutine chk1mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, INFO)
Definition: chk1mat.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
pcgesv
subroutine pcgesv(N, NRHS, A, IA, JA, DESCA, IPIV, B, IB, JB, DESCB, INFO)
Definition: pcgesv.f:3