ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pdgetf2.f
Go to the documentation of this file.
1  SUBROUTINE pdgetf2( M, N, A, IA, JA, DESCA, IPIV, INFO )
2 *
3 * -- ScaLAPACK routine (version 1.7) --
4 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
5 * and University of California, Berkeley.
6 * May 1, 1997
7 *
8 * .. Scalar Arguments ..
9  INTEGER IA, INFO, JA, M, N
10 * ..
11 * .. Array Arguments ..
12  INTEGER DESCA( * ), IPIV( * )
13  DOUBLE PRECISION A( * )
14 * ..
15 *
16 * Purpose
17 * =======
18 *
19 * PDGETF2 computes an LU factorization of a general M-by-N
20 * distributed matrix sub( A ) = A(IA:IA+M-1,JA:JA+N-1) using
21 * partial pivoting with row interchanges.
22 *
23 * The factorization has the form sub( A ) = P * L * U, where P is a
24 * permutation matrix, L is lower triangular with unit diagonal
25 * elements (lower trapezoidal if m > n), and U is upper triangular
26 * (upper trapezoidal if m < n).
27 *
28 * This is the right-looking Parallel Level 2 BLAS version of the
29 * algorithm.
30 *
31 * Notes
32 * =====
33 *
34 * Each global data object is described by an associated description
35 * vector. This vector stores the information required to establish
36 * the mapping between an object element and its corresponding process
37 * and memory location.
38 *
39 * Let A be a generic term for any 2D block cyclicly distributed array.
40 * Such a global array has an associated description vector DESCA.
41 * In the following comments, the character _ should be read as
42 * "of the global array".
43 *
44 * NOTATION STORED IN EXPLANATION
45 * --------------- -------------- --------------------------------------
46 * DTYPE_A(global) DESCA( DTYPE_ )The descriptor type. In this case,
47 * DTYPE_A = 1.
48 * CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
49 * the BLACS process grid A is distribu-
50 * ted over. The context itself is glo-
51 * bal, but the handle (the integer
52 * value) may vary.
53 * M_A (global) DESCA( M_ ) The number of rows in the global
54 * array A.
55 * N_A (global) DESCA( N_ ) The number of columns in the global
56 * array A.
57 * MB_A (global) DESCA( MB_ ) The blocking factor used to distribute
58 * the rows of the array.
59 * NB_A (global) DESCA( NB_ ) The blocking factor used to distribute
60 * the columns of the array.
61 * RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
62 * row of the array A is distributed.
63 * CSRC_A (global) DESCA( CSRC_ ) The process column over which the
64 * first column of the array A is
65 * distributed.
66 * LLD_A (local) DESCA( LLD_ ) The leading dimension of the local
67 * array. LLD_A >= MAX(1,LOCr(M_A)).
68 *
69 * Let K be the number of rows or columns of a distributed matrix,
70 * and assume that its process grid has dimension p x q.
71 * LOCr( K ) denotes the number of elements of K that a process
72 * would receive if K were distributed over the p processes of its
73 * process column.
74 * Similarly, LOCc( K ) denotes the number of elements of K that a
75 * process would receive if K were distributed over the q processes of
76 * its process row.
77 * The values of LOCr() and LOCc() may be determined via a call to the
78 * ScaLAPACK tool function, NUMROC:
79 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
80 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
81 * An upper bound for these quantities may be computed by:
82 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
83 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
84 *
85 * This routine requires N <= NB_A-MOD(JA-1, NB_A) and square block
86 * decomposition ( MB_A = NB_A ).
87 *
88 * Arguments
89 * =========
90 *
91 * M (global input) INTEGER
92 * The number of rows to be operated on, i.e. the number of rows
93 * of the distributed submatrix sub( A ). M >= 0.
94 *
95 * N (global input) INTEGER
96 * The number of columns to be operated on, i.e. the number of
97 * columns of the distributed submatrix sub( A ).
98 * NB_A-MOD(JA-1, NB_A) >= N >= 0.
99 *
100 * A (local input/local output) DOUBLE PRECISION pointer into the
101 * local memory to an array of dimension (LLD_A, LOCc(JA+N-1)).
102 * On entry, this array contains the local pieces of the M-by-N
103 * distributed matrix sub( A ). On exit, this array contains
104 * the local pieces of the factors L and U from the factoriza-
105 * tion sub( A ) = P*L*U; the unit diagonal elements of L are
106 * not stored.
107 *
108 * IA (global input) INTEGER
109 * The row index in the global array A indicating the first
110 * row of sub( A ).
111 *
112 * JA (global input) INTEGER
113 * The column index in the global array A indicating the
114 * first column of sub( A ).
115 *
116 * DESCA (global and local input) INTEGER array of dimension DLEN_.
117 * The array descriptor for the distributed matrix A.
118 *
119 * IPIV (local output) INTEGER array, dimension ( LOCr(M_A)+MB_A )
120 * This array contains the pivoting information.
121 * IPIV(i) -> The global row local row i was swapped with.
122 * This array is tied to the distributed matrix A.
123 *
124 * INFO (local output) INTEGER
125 * = 0: successful exit
126 * < 0: If the i-th argument is an array and the j-entry had
127 * an illegal value, then INFO = -(i*100+j), if the i-th
128 * argument is a scalar and had an illegal value, then
129 * INFO = -i.
130 * > 0: If INFO = K, U(IA+K-1,JA+K-1) is exactly zero.
131 * The factorization has been completed, but the factor U
132 * is exactly singular, and division by zero will occur if
133 * it is used to solve a system of equations.
134 *
135 * =====================================================================
136 *
137 * .. Parameters ..
138  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
139  $ LLD_, MB_, M_, NB_, N_, RSRC_
140  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
141  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
142  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
143  DOUBLE PRECISION ONE, ZERO
144  parameter( one = 1.0d+0, zero = 0.0d+0 )
145 * ..
146 * .. Local Scalars ..
147  CHARACTER ROWBTOP
148  INTEGER I, IACOL, IAROW, ICOFF, ICTXT, IIA, IROFF, J,
149  $ JJA, MN, MYCOL, MYROW, NPCOL, NPROW
150  DOUBLE PRECISION GMAX
151 * ..
152 * .. External Subroutines ..
153  EXTERNAL blacs_abort, blacs_gridinfo, chk1mat, igebr2d,
154  $ igebs2d, infog2l, pdamax, pdger,
155  $ pdscal, pdswap, pb_topget, pxerbla
156 * ..
157 * .. Intrinsic Functions ..
158  INTRINSIC min, mod
159 * ..
160 * .. Executable Statements ..
161 *
162 * Get grid parameters.
163 *
164  ictxt = desca( ctxt_ )
165  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
166 *
167 * Test the input parameters.
168 *
169  info = 0
170  IF( nprow.EQ.-1 ) THEN
171  info = -(600+ctxt_)
172  ELSE
173  CALL chk1mat( m, 1, n, 2, ia, ja, desca, 6, info )
174  IF( info.EQ.0 ) THEN
175  iroff = mod( ia-1, desca( mb_ ) )
176  icoff = mod( ja-1, desca( nb_ ) )
177  IF( n+icoff.GT.desca( nb_ ) ) THEN
178  info = -2
179  ELSE IF( iroff.NE.0 ) THEN
180  info = -4
181  ELSE IF( icoff.NE.0 ) THEN
182  info = -5
183  ELSE IF( desca( mb_ ).NE.desca( nb_ ) ) THEN
184  info = -(600+nb_)
185  END IF
186  END IF
187  END IF
188 *
189  IF( info.NE.0 ) THEN
190  CALL pxerbla( ictxt, 'PDGETF2', -info )
191  CALL blacs_abort( ictxt, 1 )
192  RETURN
193  END IF
194 *
195 * Quick return if possible
196 *
197  IF( m.EQ.0 .OR. n.EQ.0 )
198  $ RETURN
199 *
200  mn = min( m, n )
201  CALL infog2l( ia, ja, desca, nprow, npcol, myrow, mycol, iia, jja,
202  $ iarow, iacol )
203  CALL pb_topget( ictxt, 'Broadcast', 'Rowwise', rowbtop )
204 *
205  IF( mycol.EQ.iacol ) THEN
206  DO 10 j = ja, ja+mn-1
207  i = ia + j - ja
208 *
209 * Find pivot and test for singularity.
210 *
211  CALL pdamax( m-j+ja, gmax, ipiv( iia+j-ja ), a, i, j,
212  $ desca, 1 )
213  IF( gmax.NE.zero ) THEN
214 *
215 * Apply the row interchanges to columns JA:JA+N-1
216 *
217  CALL pdswap( n, a, i, ja, desca, desca( m_ ), a,
218  $ ipiv( iia+j-ja ), ja, desca, desca( m_ ) )
219 *
220 * Compute elements I+1:IA+M-1 of J-th column.
221 *
222  IF( j-ja+1.LT.m )
223  $ CALL pdscal( m-j+ja-1, one / gmax, a, i+1, j,
224  $ desca, 1 )
225  ELSE IF( info.EQ.0 ) THEN
226  info = j - ja + 1
227  END IF
228 *
229 * Update trailing submatrix
230 *
231  IF( j-ja+1.LT.mn ) THEN
232  CALL pdger( m-j+ja-1, n-j+ja-1, -one, a, i+1, j, desca,
233  $ 1, a, i, j+1, desca, desca( m_ ), a, i+1,
234  $ j+1, desca )
235  END IF
236  10 CONTINUE
237 *
238  CALL igebs2d( ictxt, 'Rowwise', rowbtop, mn, 1, ipiv( iia ),
239  $ mn )
240 *
241  ELSE
242 *
243  CALL igebr2d( ictxt, 'Rowwise', rowbtop, mn, 1, ipiv( iia ),
244  $ mn, myrow, iacol )
245 *
246  END IF
247 *
248  RETURN
249 *
250 * End of PDGETF2
251 *
252  END
infog2l
subroutine infog2l(GRINDX, GCINDX, DESC, NPROW, NPCOL, MYROW, MYCOL, LRINDX, LCINDX, RSRC, CSRC)
Definition: infog2l.f:3
pdgetf2
subroutine pdgetf2(M, N, A, IA, JA, DESCA, IPIV, INFO)
Definition: pdgetf2.f:2
chk1mat
subroutine chk1mat(MA, MAPOS0, NA, NAPOS0, IA, JA, DESCA, DESCAPOS0, INFO)
Definition: chk1mat.f:3
pxerbla
subroutine pxerbla(ICTXT, SRNAME, INFO)
Definition: pxerbla.f:2
min
#define min(A, B)
Definition: pcgemr.c:181