ScaLAPACK  2.0.2
ScaLAPACK: Scalable Linear Algebra PACKage
pdgetrf.f
Go to the documentation of this file.
00001       SUBROUTINE PDGETRF( M, N, A, IA, JA, DESCA, IPIV, INFO )
00002 *
00003 *  -- ScaLAPACK routine (version 1.7) --
00004 *     University of Tennessee, Knoxville, Oak Ridge National Laboratory,
00005 *     and University of California, Berkeley.
00006 *     May 25, 2001
00007 *
00008 *     .. Scalar Arguments ..
00009       INTEGER            IA, INFO, JA, M, N
00010 *     ..
00011 *     .. Array Arguments ..
00012       INTEGER            DESCA( * ), IPIV( * )
00013       DOUBLE PRECISION   A( * )
00014 *     ..
00015 *
00016 *  Purpose
00017 *  =======
00018 *
00019 *  PDGETRF computes an LU factorization of a general M-by-N distributed
00020 *  matrix sub( A ) = (IA:IA+M-1,JA:JA+N-1) using partial pivoting with
00021 *  row interchanges.
00022 *
00023 *  The factorization has the form sub( A ) = P * L * U, where P is a
00024 *  permutation matrix, L is lower triangular with unit diagonal ele-
00025 *  ments (lower trapezoidal if m > n), and U is upper triangular
00026 *  (upper trapezoidal if m < n). L and U are stored in sub( A ).
00027 *
00028 *  This is the right-looking Parallel Level 3 BLAS version of the
00029 *  algorithm.
00030 *
00031 *  Notes
00032 *  =====
00033 *
00034 *  Each global data object is described by an associated description
00035 *  vector.  This vector stores the information required to establish
00036 *  the mapping between an object element and its corresponding process
00037 *  and memory location.
00038 *
00039 *  Let A be a generic term for any 2D block cyclicly distributed array.
00040 *  Such a global array has an associated description vector DESCA.
00041 *  In the following comments, the character _ should be read as
00042 *  "of the global array".
00043 *
00044 *  NOTATION        STORED IN      EXPLANATION
00045 *  --------------- -------------- --------------------------------------
00046 *  DTYPE_A(global) DESCA( DTYPE_ )The descriptor type.  In this case,
00047 *                                 DTYPE_A = 1.
00048 *  CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
00049 *                                 the BLACS process grid A is distribu-
00050 *                                 ted over. The context itself is glo-
00051 *                                 bal, but the handle (the integer
00052 *                                 value) may vary.
00053 *  M_A    (global) DESCA( M_ )    The number of rows in the global
00054 *                                 array A.
00055 *  N_A    (global) DESCA( N_ )    The number of columns in the global
00056 *                                 array A.
00057 *  MB_A   (global) DESCA( MB_ )   The blocking factor used to distribute
00058 *                                 the rows of the array.
00059 *  NB_A   (global) DESCA( NB_ )   The blocking factor used to distribute
00060 *                                 the columns of the array.
00061 *  RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
00062 *                                 row of the array A is distributed.
00063 *  CSRC_A (global) DESCA( CSRC_ ) The process column over which the
00064 *                                 first column of the array A is
00065 *                                 distributed.
00066 *  LLD_A  (local)  DESCA( LLD_ )  The leading dimension of the local
00067 *                                 array.  LLD_A >= MAX(1,LOCr(M_A)).
00068 *
00069 *  Let K be the number of rows or columns of a distributed matrix,
00070 *  and assume that its process grid has dimension p x q.
00071 *  LOCr( K ) denotes the number of elements of K that a process
00072 *  would receive if K were distributed over the p processes of its
00073 *  process column.
00074 *  Similarly, LOCc( K ) denotes the number of elements of K that a
00075 *  process would receive if K were distributed over the q processes of
00076 *  its process row.
00077 *  The values of LOCr() and LOCc() may be determined via a call to the
00078 *  ScaLAPACK tool function, NUMROC:
00079 *          LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
00080 *          LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
00081 *  An upper bound for these quantities may be computed by:
00082 *          LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
00083 *          LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
00084 *
00085 *  This routine requires square block decomposition ( MB_A = NB_A ).
00086 *
00087 *  Arguments
00088 *  =========
00089 *
00090 *  M       (global input) INTEGER
00091 *          The number of rows to be operated on, i.e. the number of rows
00092 *          of the distributed submatrix sub( A ). M >= 0.
00093 *
00094 *  N       (global input) INTEGER
00095 *          The number of columns to be operated on, i.e. the number of
00096 *          columns of the distributed submatrix sub( A ). N >= 0.
00097 *
00098 *  A       (local input/local output) DOUBLE PRECISION pointer into the
00099 *          local memory to an array of dimension (LLD_A, LOCc(JA+N-1)).
00100 *          On entry, this array contains the local pieces of the M-by-N
00101 *          distributed matrix sub( A ) to be factored. On exit, this
00102 *          array contains the local pieces of the factors L and U from
00103 *          the factorization sub( A ) = P*L*U; the unit diagonal ele-
00104 *          ments of L are not stored.
00105 *
00106 *  IA      (global input) INTEGER
00107 *          The row index in the global array A indicating the first
00108 *          row of sub( A ).
00109 *
00110 *  JA      (global input) INTEGER
00111 *          The column index in the global array A indicating the
00112 *          first column of sub( A ).
00113 *
00114 *  DESCA   (global and local input) INTEGER array of dimension DLEN_.
00115 *          The array descriptor for the distributed matrix A.
00116 *
00117 *  IPIV    (local output) INTEGER array, dimension ( LOCr(M_A)+MB_A )
00118 *          This array contains the pivoting information.
00119 *          IPIV(i) -> The global row local row i was swapped with.
00120 *          This array is tied to the distributed matrix A.
00121 *
00122 *  INFO    (global output) INTEGER
00123 *          = 0:  successful exit
00124 *          < 0:  If the i-th argument is an array and the j-entry had
00125 *                an illegal value, then INFO = -(i*100+j), if the i-th
00126 *                argument is a scalar and had an illegal value, then
00127 *                INFO = -i.
00128 *          > 0:  If INFO = K, U(IA+K-1,JA+K-1) is exactly zero.
00129 *                The factorization has been completed, but the factor U
00130 *                is exactly singular, and division by zero will occur if
00131 *                it is used to solve a system of equations.
00132 *
00133 *  =====================================================================
00134 *
00135 *     .. Parameters ..
00136       INTEGER            BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
00137      $                   LLD_, MB_, M_, NB_, N_, RSRC_
00138       PARAMETER          ( BLOCK_CYCLIC_2D = 1, DLEN_ = 9, DTYPE_ = 1,
00139      $                     CTXT_ = 2, M_ = 3, N_ = 4, MB_ = 5, NB_ = 6,
00140      $                     RSRC_ = 7, CSRC_ = 8, LLD_ = 9 )
00141       DOUBLE PRECISION   ONE
00142       PARAMETER          ( ONE = 1.0D+0 )
00143 *     ..
00144 *     .. Local Scalars ..
00145       CHARACTER          COLBTOP, COLCTOP, ROWBTOP
00146       INTEGER            I, ICOFF, ICTXT, IINFO, IN, IROFF, J, JB, JN,
00147      $                   MN, MYCOL, MYROW, NPCOL, NPROW
00148 *     ..
00149 *     .. Local Arrays ..
00150       INTEGER            IDUM1( 1 ), IDUM2( 1 )
00151 *     ..
00152 *     .. External Subroutines ..
00153       EXTERNAL           BLACS_GRIDINFO, CHK1MAT, IGAMN2D, PCHK1MAT,
00154      $                   PB_TOPGET, PB_TOPSET, PDGEMM, PDGETF2,
00155      $                   PDLASWP, PDTRSM, PXERBLA
00156 *     ..
00157 *     .. External Functions ..
00158       INTEGER            ICEIL
00159       EXTERNAL           ICEIL
00160 *     ..
00161 *     .. Intrinsic Functions ..
00162       INTRINSIC          MIN, MOD
00163 *     ..
00164 *     .. Executable Statements ..
00165 *
00166 *     Get grid parameters
00167 *
00168       ICTXT = DESCA( CTXT_ )
00169       CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
00170 *
00171 *     Test the input parameters
00172 *
00173       INFO = 0
00174       IF( NPROW.EQ.-1 ) THEN
00175          INFO = -(600+CTXT_)
00176       ELSE
00177          CALL CHK1MAT( M, 1, N, 2, IA, JA, DESCA, 6, INFO )
00178          IF( INFO.EQ.0 ) THEN
00179             IROFF = MOD( IA-1, DESCA( MB_ ) )
00180             ICOFF = MOD( JA-1, DESCA( NB_ ) )
00181             IF( IROFF.NE.0 ) THEN
00182                INFO = -4
00183             ELSE IF( ICOFF.NE.0 ) THEN
00184                INFO = -5
00185             ELSE IF( DESCA( MB_ ).NE.DESCA( NB_ ) ) THEN
00186                INFO = -(600+NB_)
00187             END IF
00188          END IF
00189          CALL PCHK1MAT( M, 1, N, 2, IA, JA, DESCA, 6, 0, IDUM1,
00190      $                  IDUM2, INFO )
00191       END IF
00192 *
00193       IF( INFO.NE.0 ) THEN
00194          CALL PXERBLA( ICTXT, 'PDGETRF', -INFO )
00195          RETURN
00196       END IF
00197 *
00198 *     Quick return if possible
00199 *
00200       IF( DESCA( M_ ).EQ.1 ) THEN
00201          IPIV( 1 ) = 1
00202          RETURN
00203       ELSE IF( M.EQ.0 .OR. N.EQ.0 ) THEN
00204          RETURN
00205       END IF
00206 *
00207 *     Split-ring topology for the communication along process rows
00208 *
00209       CALL PB_TOPGET( ICTXT, 'Broadcast', 'Rowwise', ROWBTOP )
00210       CALL PB_TOPGET( ICTXT, 'Broadcast', 'Columnwise', COLBTOP )
00211       CALL PB_TOPGET( ICTXT, 'Combine', 'Columnwise', COLCTOP )
00212       CALL PB_TOPSET( ICTXT, 'Broadcast', 'Rowwise', 'S-ring' )
00213       CALL PB_TOPSET( ICTXT, 'Broadcast', 'Columnwise', ' ' )
00214       CALL PB_TOPSET( ICTXT, 'Combine', 'Columnwise', ' ' )
00215 *
00216 *     Handle the first block of columns separately
00217 *
00218       MN = MIN( M, N )
00219       IN = MIN( ICEIL( IA, DESCA( MB_ ) )*DESCA( MB_ ), IA+M-1 )
00220       JN = MIN( ICEIL( JA, DESCA( NB_ ) )*DESCA( NB_ ), JA+MN-1 )
00221       JB = JN - JA + 1
00222 *
00223 *     Factor diagonal and subdiagonal blocks and test for exact
00224 *     singularity.
00225 *
00226       CALL PDGETF2( M, JB, A, IA, JA, DESCA, IPIV, INFO )
00227 *
00228       IF( JB+1.LE.N ) THEN
00229 *
00230 *        Apply interchanges to columns JN+1:JA+N-1.
00231 *
00232          CALL PDLASWP( 'Forward', 'Rows', N-JB, A, IA, JN+1, DESCA,
00233      $                 IA, IN, IPIV )
00234 *
00235 *        Compute block row of U.
00236 *
00237          CALL PDTRSM( 'Left', 'Lower', 'No transpose', 'Unit', JB,
00238      $                N-JB, ONE, A, IA, JA, DESCA, A, IA, JN+1, DESCA )
00239 *
00240          IF( JB+1.LE.M ) THEN
00241 *
00242 *           Update trailing submatrix.
00243 *
00244             CALL PDGEMM( 'No transpose', 'No transpose', M-JB, N-JB, JB,
00245      $                   -ONE, A, IN+1, JA, DESCA, A, IA, JN+1, DESCA,
00246      $                   ONE, A, IN+1, JN+1, DESCA )
00247 *
00248          END IF
00249       END IF
00250 *
00251 *     Loop over the remaining blocks of columns.
00252 *
00253       DO 10 J = JN+1, JA+MN-1, DESCA( NB_ )
00254          JB = MIN( MN-J+JA, DESCA( NB_ ) )
00255          I = IA + J - JA
00256 *
00257 *        Factor diagonal and subdiagonal blocks and test for exact
00258 *        singularity.
00259 *
00260          CALL PDGETF2( M-J+JA, JB, A, I, J, DESCA, IPIV, IINFO )
00261 *
00262          IF( INFO.EQ.0 .AND. IINFO.GT.0 )
00263      $      INFO = IINFO + J - JA
00264 *
00265 *        Apply interchanges to columns JA:J-JA.
00266 *
00267          CALL PDLASWP( 'Forward', 'Rowwise', J-JA, A, IA, JA, DESCA,
00268      $                 I, I+JB-1, IPIV )
00269 *
00270          IF( J-JA+JB+1.LE.N ) THEN
00271 *
00272 *           Apply interchanges to columns J+JB:JA+N-1.
00273 *
00274             CALL PDLASWP( 'Forward', 'Rowwise', N-J-JB+JA, A, IA, J+JB,
00275      $                    DESCA, I, I+JB-1, IPIV )
00276 *
00277 *           Compute block row of U.
00278 *
00279             CALL PDTRSM( 'Left', 'Lower', 'No transpose', 'Unit', JB,
00280      $                   N-J-JB+JA, ONE, A, I, J, DESCA, A, I, J+JB,
00281      $                   DESCA )
00282 *
00283             IF( J-JA+JB+1.LE.M ) THEN
00284 *
00285 *              Update trailing submatrix.
00286 *
00287                CALL PDGEMM( 'No transpose', 'No transpose', M-J-JB+JA,
00288      $                      N-J-JB+JA, JB, -ONE, A, I+JB, J, DESCA, A,
00289      $                      I, J+JB, DESCA, ONE, A, I+JB, J+JB, DESCA )
00290 *
00291             END IF
00292          END IF
00293 *
00294    10 CONTINUE
00295 *
00296       IF( INFO.EQ.0 )
00297      $   INFO = MN + 1
00298       CALL IGAMN2D( ICTXT, 'Rowwise', ' ', 1, 1, INFO, 1, IDUM1, IDUM2,
00299      $              -1, -1, MYCOL )
00300       IF( INFO.EQ.MN+1 )
00301      $   INFO = 0
00302 *
00303       CALL PB_TOPSET( ICTXT, 'Broadcast', 'Rowwise', ROWBTOP )
00304       CALL PB_TOPSET( ICTXT, 'Broadcast', 'Columnwise', COLBTOP )
00305       CALL PB_TOPSET( ICTXT, 'Combine', 'Columnwise', COLCTOP )
00306 *
00307       RETURN
00308 *
00309 *     End of PDGETRF
00310 *
00311       END