ScaLAPACK 2.1  2.1 ScaLAPACK: Scalable Linear Algebra PACKage
pzposv.f
Go to the documentation of this file.
1  SUBROUTINE pzposv( UPLO, N, NRHS, A, IA, JA, DESCA, B, IB, JB,
2  \$ DESCB, INFO )
3 *
4 * -- ScaLAPACK routine (version 1.7) --
5 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
6 * and University of California, Berkeley.
7 * May 1, 1997
8 *
9 * .. Scalar Arguments ..
10  CHARACTER UPLO
11  INTEGER IA, IB, INFO, JA, JB, N, NRHS
12 * ..
13 * .. Array Arguments ..
14  INTEGER DESCA( * ), DESCB( * )
15  COMPLEX*16 A( * ), B( * )
16 * ..
17 *
18 * Purpose
19 * =======
20 *
21 * PZPOSV computes the solution to a complex system of linear equations
22 *
23 * sub( A ) * X = sub( B ),
24 *
25 * where sub( A ) denotes A(IA:IA+N-1,JA:JA+N-1) and is an N-by-N
26 * hermitian distributed positive definite matrix and X and sub( B )
27 * denoting B(IB:IB+N-1,JB:JB+NRHS-1) are N-by-NRHS distributed
28 * matrices.
29 *
30 * The Cholesky decomposition is used to factor sub( A ) as
31 *
32 * sub( A ) = U**H * U, if UPLO = 'U', or
33 *
34 * sub( A ) = L * L**H, if UPLO = 'L',
35 *
36 * where U is an upper triangular matrix and L is a lower triangular
37 * matrix. The factored form of sub( A ) is then used to solve the
38 * system of equations.
39 *
40 * Notes
41 * =====
42 *
43 * Each global data object is described by an associated description
44 * vector. This vector stores the information required to establish
45 * the mapping between an object element and its corresponding process
46 * and memory location.
47 *
48 * Let A be a generic term for any 2D block cyclicly distributed array.
49 * Such a global array has an associated description vector DESCA.
50 * In the following comments, the character _ should be read as
51 * "of the global array".
52 *
53 * NOTATION STORED IN EXPLANATION
54 * --------------- -------------- --------------------------------------
55 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
56 * DTYPE_A = 1.
57 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
58 * the BLACS process grid A is distribu-
59 * ted over. The context itself is glo-
60 * bal, but the handle (the integer
61 * value) may vary.
62 * M_A (global) DESCA( M_ ) The number of rows in the global
63 * array A.
64 * N_A (global) DESCA( N_ ) The number of columns in the global
65 * array A.
66 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
67 * the rows of the array.
68 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
69 * the columns of the array.
70 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
71 * row of the array A is distributed.
72 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
73 * first column of the array A is
74 * distributed.
75 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
76 * array. LLD_A >= MAX(1,LOCr(M_A)).
77 *
78 * Let K be the number of rows or columns of a distributed matrix,
79 * and assume that its process grid has dimension p x q.
80 * LOCr( K ) denotes the number of elements of K that a process
81 * would receive if K were distributed over the p processes of its
82 * process column.
83 * Similarly, LOCc( K ) denotes the number of elements of K that a
84 * process would receive if K were distributed over the q processes of
85 * its process row.
86 * The values of LOCr() and LOCc() may be determined via a call to the
87 * ScaLAPACK tool function, NUMROC:
88 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
89 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
90 * An upper bound for these quantities may be computed by:
91 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
92 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
93 *
94 * This routine requires square block decomposition ( MB_A = NB_A ).
95 *
96 * Arguments
97 * =========
98 *
99 * UPLO (global input) CHARACTER
100 * = 'U': Upper triangle of sub( A ) is stored;
101 * = 'L': Lower triangle of sub( A ) is stored.
102 *
103 * N (global input) INTEGER
104 * The number of rows and columns to be operated on, i.e. the
105 * order of the distributed submatrix sub( A ). N >= 0.
106 *
107 * NRHS (global input) INTEGER
108 * The number of right hand sides, i.e., the number of columns
109 * of the distributed submatrix sub( B ). NRHS >= 0.
110 *
111 * A (local input/local output) COMPLEX*16 pointer into the
112 * local memory to an array of dimension (LLD_A, LOCc(JA+N-1)).
113 * On entry, this array contains the local pieces of the
114 * N-by-N symmetric distributed matrix sub( A ) to be factored.
115 * If UPLO = 'U', the leading N-by-N upper triangular part of
116 * sub( A ) contains the upper triangular part of the matrix,
117 * and its strictly lower triangular part is not referenced.
118 * If UPLO = 'L', the leading N-by-N lower triangular part of
119 * sub( A ) contains the lower triangular part of the distribu-
120 * ted matrix, and its strictly upper triangular part is not
121 * referenced. On exit, if INFO = 0, this array contains the
122 * local pieces of the factor U or L from the Cholesky factori-
123 * zation sub( A ) = U**H*U or L*L**H.
124 *
125 * IA (global input) INTEGER
126 * The row index in the global array A indicating the first
127 * row of sub( A ).
128 *
129 * JA (global input) INTEGER
130 * The column index in the global array A indicating the
131 * first column of sub( A ).
132 *
133 * DESCA (global and local input) INTEGER array of dimension DLEN_.
134 * The array descriptor for the distributed matrix A.
135 *
136 * B (local input/local output) COMPLEX*16 pointer into the
137 * local memory to an array of dimension (LLD_B,LOC(JB+NRHS-1)).
138 * On entry, the local pieces of the right hand sides distribu-
139 * ted matrix sub( B ). On exit, if INFO = 0, sub( B ) is over-
140 * written with the solution distributed matrix X.
141 *
142 * IB (global input) INTEGER
143 * The row index in the global array B indicating the first
144 * row of sub( B ).
145 *
146 * JB (global input) INTEGER
147 * The column index in the global array B indicating the
148 * first column of sub( B ).
149 *
150 * DESCB (global and local input) INTEGER array of dimension DLEN_.
151 * The array descriptor for the distributed matrix B.
152 *
153 * INFO (global output) INTEGER
154 * = 0: successful exit
155 * < 0: If the i-th argument is an array and the j-entry had
156 * an illegal value, then INFO = -(i*100+j), if the i-th
157 * argument is a scalar and had an illegal value, then
158 * INFO = -i.
159 * > 0: If INFO = K, the leading minor of order K,
160 * A(IA:IA+K-1,JA:JA+K-1) is not positive definite, and
161 * the factorization could not be completed, and the
162 * solution has not been computed.
163 *
164 * =====================================================================
165 *
166 * .. Parameters ..
167  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
168  \$ lld_, mb_, m_, nb_, n_, rsrc_
169  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
170  \$ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
171  \$ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
172 * ..
173 * .. Local Scalars ..
174  LOGICAL UPPER
175  INTEGER IAROW, IBROW, ICOFFA, ICTXT, IROFFA, IROFFB,
176  \$ mycol, myrow, npcol, nprow
177 * ..
178 * .. Local Arrays ..
179  INTEGER IDUM1( 1 ), IDUM2( 1 )
180 * ..
181 * .. External Subroutines ..
182  EXTERNAL blacs_gridinfo, chk1mat, pchk2mat, pxerbla,
183  \$ pzpotrf, pzpotrs
184 * ..
185 * .. External Functions ..
186  LOGICAL LSAME
187  INTEGER INDXG2P
188  EXTERNAL indxg2p, lsame
189 * ..
190 * .. Intrinsic Functions ..
191  INTRINSIC ichar, mod
192 * ..
193 * .. Executable Statements ..
194 *
195 * Get grid parameters
196 *
197  ictxt = desca( ctxt_ )
198  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
199 *
200 * Test the input parameters
201 *
202  info = 0
203  IF( nprow.EQ.-1 ) THEN
204  info = -(700+ctxt_)
205  ELSE
206  upper = lsame( uplo, 'U' )
207  CALL chk1mat( n, 2, n, 2, ia, ja, desca, 7, info )
208  IF( info.EQ.0 ) THEN
209  iarow = indxg2p( ia, desca( mb_ ), myrow, desca( rsrc_ ),
210  \$ nprow )
211  ibrow = indxg2p( ib, descb( mb_ ), myrow, descb( rsrc_ ),
212  \$ nprow )
213  iroffa = mod( ia-1, desca( mb_ ) )
214  iroffb = mod( ib-1, descb( mb_ ) )
215  icoffa = mod( ja-1, desca( nb_ ) )
216  IF ( .NOT.upper .AND. .NOT.lsame( uplo, 'L' ) ) THEN
217  info = -1
218  ELSE IF( iroffa.NE.0 ) THEN
219  info = -5
220  ELSE IF( icoffa.NE.0 ) THEN
221  info = -6
222  ELSE IF( desca( mb_ ).NE.desca( nb_ ) ) THEN
223  info = -(700+nb_)
224  ELSE IF( iroffb.NE.0 .OR. ibrow.NE.iarow ) THEN
225  info = -9
226  ELSE IF( descb( mb_ ).NE.desca( nb_ ) ) THEN
227  info = -(1000+nb_)
228  END IF
229  END IF
230  IF( upper ) THEN
231  idum1( 1 ) = ichar( 'U' )
232  ELSE
233  idum1( 1 ) = ichar( 'L' )
234  END IF
235  idum2( 1 ) = 1
236  CALL pchk2mat( n, 2, n, 2, ia, ja, desca, 7, n, 2, nrhs,
237  \$ 3, ib, jb, descb, 11, 1, idum1, idum2, info )
238  END IF
239 *
240  IF( info.NE.0 ) THEN
241  CALL pxerbla( ictxt, 'PZPOSV', -info )
242  RETURN
243  END IF
244 *
245 * Compute the Cholesky factorization sub( A ) = U'*U or L*L'.
246 *
247  CALL pzpotrf( uplo, n, a, ia, ja, desca, info )
248 *
249  IF( info.EQ.0 ) THEN
250 *
251 * Solve the system sub( A ) * X = sub( B ) overwriting sub( B )
252 * with X.
253 *
254  CALL pzpotrs( uplo, n, nrhs, a, ia, ja, desca, b, ib, jb,
255  \$ descb, info )
256 *
257  END IF
258 *
259  RETURN
260 *
261 * End of PZPOSV
262 *
263  END
pzposv
subroutine pzposv(UPLO, N, NRHS, A, IA, JA, DESCA, B, IB, JB, DESCB, INFO)
Definition: pzposv.f:3
pchk2mat
subroutine pchk2mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, MB, MBPOS0, NB, NBPOS0, IB, JB, DESCB, DESCBPOS0, NEXTRA, EX, EXPOS, INFO)
Definition: pchkxmat.f:175
chk1mat
subroutine chk1mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, INFO)
Definition: chk1mat.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
pzpotrf
subroutine pzpotrf(UPLO, N, A, IA, JA, DESCA, INFO)
Definition: pzpotrf.f:2
pzpotrs
subroutine pzpotrs(UPLO, N, NRHS, A, IA, JA, DESCA, B, IB, JB, DESCB, INFO)
Definition: pzpotrs.f:3