ScaLAPACK  2.0.2
ScaLAPACK: Scalable Linear Algebra PACKage
pddtsv.f
Go to the documentation of this file.
00001       SUBROUTINE PDDTSV( N, NRHS, DL, D, DU, JA, DESCA, B, IB, DESCB,
00002      $                   WORK, LWORK, INFO )
00003 *
00004 *
00005 *
00006 *  -- ScaLAPACK routine (version 1.7) --
00007 *     University of Tennessee, Knoxville, Oak Ridge National Laboratory,
00008 *     and University of California, Berkeley.
00009 *     November 15, 1997
00010 *
00011 *     .. Scalar Arguments ..
00012       INTEGER            IB, INFO, JA, LWORK, N, NRHS
00013 *     ..
00014 *     .. Array Arguments ..
00015       INTEGER            DESCA( * ), DESCB( * )
00016       DOUBLE PRECISION   B( * ), D( * ), DL( * ), DU( * ), WORK( * )
00017 *     ..
00018 *
00019 *
00020 *  Purpose
00021 *  =======
00022 *
00023 *  PDDTSV solves a system of linear equations
00024 *
00025 *                      A(1:N, JA:JA+N-1) * X = B(IB:IB+N-1, 1:NRHS)
00026 *
00027 *  where A(1:N, JA:JA+N-1) is an N-by-N real
00028 *  tridiagonal diagonally dominant-like distributed
00029 *  matrix.
00030 *
00031 *  Gaussian elimination without pivoting
00032 *  is used to factor a reordering
00033 *  of the matrix into L U.
00034 *
00035 *  See PDDTTRF and PDDTTRS for details.
00036 *
00037 *  =====================================================================
00038 *
00039 *  Arguments
00040 *  =========
00041 *
00042 *
00043 *  N       (global input) INTEGER
00044 *          The number of rows and columns to be operated on, i.e. the
00045 *          order of the distributed submatrix A(1:N, JA:JA+N-1). N >= 0.
00046 *
00047 *  NRHS    (global input) INTEGER
00048 *          The number of right hand sides, i.e., the number of columns
00049 *          of the distributed submatrix B(IB:IB+N-1, 1:NRHS).
00050 *          NRHS >= 0.
00051 *
00052 *  DL      (local input/local output) DOUBLE PRECISION pointer to local
00053 *          part of global vector storing the lower diagonal of the
00054 *          matrix. Globally, DL(1) is not referenced, and DL must be
00055 *          aligned with D.
00056 *          Must be of size >= DESCA( NB_ ).
00057 *          On exit, this array contains information containing the
00058 *            factors of the matrix.
00059 *
00060 *  D       (local input/local output) DOUBLE PRECISION pointer to local
00061 *          part of global vector storing the main diagonal of the
00062 *          matrix.
00063 *          On exit, this array contains information containing the
00064 *            factors of the matrix.
00065 *          Must be of size >= DESCA( NB_ ).
00066 *
00067 *  DU       (local input/local output) DOUBLE PRECISION pointer to local
00068 *          part of global vector storing the upper diagonal of the
00069 *          matrix. Globally, DU(n) is not referenced, and DU must be
00070 *          aligned with D.
00071 *          On exit, this array contains information containing the
00072 *            factors of the matrix.
00073 *          Must be of size >= DESCA( NB_ ).
00074 *
00075 *  JA      (global input) INTEGER
00076 *          The index in the global array A that points to the start of
00077 *          the matrix to be operated on (which may be either all of A
00078 *          or a submatrix of A).
00079 *
00080 *  DESCA   (global and local input) INTEGER array of dimension DLEN.
00081 *          if 1D type (DTYPE_A=501 or 502), DLEN >= 7;
00082 *          if 2D type (DTYPE_A=1), DLEN >= 9.
00083 *          The array descriptor for the distributed matrix A.
00084 *          Contains information of mapping of A to memory. Please
00085 *          see NOTES below for full description and options.
00086 *
00087 *  B       (local input/local output) DOUBLE PRECISION pointer into
00088 *          local memory to an array of local lead dimension lld_b>=NB.
00089 *          On entry, this array contains the
00090 *          the local pieces of the right hand sides
00091 *          B(IB:IB+N-1, 1:NRHS).
00092 *          On exit, this contains the local piece of the solutions
00093 *          distributed matrix X.
00094 *
00095 *  IB      (global input) INTEGER
00096 *          The row index in the global array B that points to the first
00097 *          row of the matrix to be operated on (which may be either
00098 *          all of B or a submatrix of B).
00099 *
00100 *  DESCB   (global and local input) INTEGER array of dimension DLEN.
00101 *          if 1D type (DTYPE_B=502), DLEN >=7;
00102 *          if 2D type (DTYPE_B=1), DLEN >= 9.
00103 *          The array descriptor for the distributed matrix B.
00104 *          Contains information of mapping of B to memory. Please
00105 *          see NOTES below for full description and options.
00106 *
00107 *  WORK    (local workspace/local output)
00108 *          DOUBLE PRECISION temporary workspace. This space may
00109 *          be overwritten in between calls to routines. WORK must be
00110 *          the size given in LWORK.
00111 *          On exit, WORK( 1 ) contains the minimal LWORK.
00112 *
00113 *  LWORK   (local input or global input) INTEGER
00114 *          Size of user-input workspace WORK.
00115 *          If LWORK is too small, the minimal acceptable size will be
00116 *          returned in WORK(1) and an error code is returned. LWORK>=
00117 *          (12*NPCOL+3*NB)
00118 *          +max(10*NPCOL+4*NRHS, 8*NPCOL)
00119 *
00120 *  INFO    (local output) INTEGER
00121 *          = 0:  successful exit
00122 *          < 0:  If the i-th argument is an array and the j-entry had
00123 *                an illegal value, then INFO = -(i*100+j), if the i-th
00124 *                argument is a scalar and had an illegal value, then
00125 *                INFO = -i.
00126 *          > 0:  If INFO = K<=NPROCS, the submatrix stored on processor
00127 *                INFO and factored locally was not
00128 *                diagonally dominant-like,  and
00129 *                the factorization was not completed.
00130 *                If INFO = K>NPROCS, the submatrix stored on processor
00131 *                INFO-NPROCS representing interactions with other
00132 *                processors was not
00133 *                stably factorable wo/interchanges,
00134 *                and the factorization was not completed.
00135 *
00136 *  =====================================================================
00137 *
00138 *
00139 *  Restrictions
00140 *  ============
00141 *
00142 *  The following are restrictions on the input parameters. Some of these
00143 *    are temporary and will be removed in future releases, while others
00144 *    may reflect fundamental technical limitations.
00145 *
00146 *    Non-cyclic restriction: VERY IMPORTANT!
00147 *      P*NB>= mod(JA-1,NB)+N.
00148 *      The mapping for matrices must be blocked, reflecting the nature
00149 *      of the divide and conquer algorithm as a task-parallel algorithm.
00150 *      This formula in words is: no processor may have more than one
00151 *      chunk of the matrix.
00152 *
00153 *    Blocksize cannot be too small:
00154 *      If the matrix spans more than one processor, the following
00155 *      restriction on NB, the size of each block on each processor,
00156 *      must hold:
00157 *      NB >= 2
00158 *      The bulk of parallel computation is done on the matrix of size
00159 *      O(NB) on each processor. If this is too small, divide and conquer
00160 *      is a poor choice of algorithm.
00161 *
00162 *    Submatrix reference:
00163 *      JA = IB
00164 *      Alignment restriction that prevents unnecessary communication.
00165 *
00166 *
00167 *  =====================================================================
00168 *
00169 *
00170 *  Notes
00171 *  =====
00172 *
00173 *  If the factorization routine and the solve routine are to be called
00174 *    separately (to solve various sets of righthand sides using the same
00175 *    coefficient matrix), the auxiliary space AF *must not be altered*
00176 *    between calls to the factorization routine and the solve routine.
00177 *
00178 *  The best algorithm for solving banded and tridiagonal linear systems
00179 *    depends on a variety of parameters, especially the bandwidth.
00180 *    Currently, only algorithms designed for the case N/P >> bw are
00181 *    implemented. These go by many names, including Divide and Conquer,
00182 *    Partitioning, domain decomposition-type, etc.
00183 *    For tridiagonal matrices, it is obvious: N/P >> bw(=1), and so D&C
00184 *    algorithms are the appropriate choice.
00185 *
00186 *  Algorithm description: Divide and Conquer
00187 *
00188 *    The Divide and Conqer algorithm assumes the matrix is narrowly
00189 *      banded compared with the number of equations. In this situation,
00190 *      it is best to distribute the input matrix A one-dimensionally,
00191 *      with columns atomic and rows divided amongst the processes.
00192 *      The basic algorithm divides the tridiagonal matrix up into
00193 *      P pieces with one stored on each processor,
00194 *      and then proceeds in 2 phases for the factorization or 3 for the
00195 *      solution of a linear system.
00196 *      1) Local Phase:
00197 *         The individual pieces are factored independently and in
00198 *         parallel. These factors are applied to the matrix creating
00199 *         fillin, which is stored in a non-inspectable way in auxiliary
00200 *         space AF. Mathematically, this is equivalent to reordering
00201 *         the matrix A as P A P^T and then factoring the principal
00202 *         leading submatrix of size equal to the sum of the sizes of
00203 *         the matrices factored on each processor. The factors of
00204 *         these submatrices overwrite the corresponding parts of A
00205 *         in memory.
00206 *      2) Reduced System Phase:
00207 *         A small ((P-1)) system is formed representing
00208 *         interaction of the larger blocks, and is stored (as are its
00209 *         factors) in the space AF. A parallel Block Cyclic Reduction
00210 *         algorithm is used. For a linear system, a parallel front solve
00211 *         followed by an analagous backsolve, both using the structure
00212 *         of the factored matrix, are performed.
00213 *      3) Backsubsitution Phase:
00214 *         For a linear system, a local backsubstitution is performed on
00215 *         each processor in parallel.
00216 *
00217 *
00218 *  Descriptors
00219 *  ===========
00220 *
00221 *  Descriptors now have *types* and differ from ScaLAPACK 1.0.
00222 *
00223 *  Note: tridiagonal codes can use either the old two dimensional
00224 *    or new one-dimensional descriptors, though the processor grid in
00225 *    both cases *must be one-dimensional*. We describe both types below.
00226 *
00227 *  Each global data object is described by an associated description
00228 *  vector.  This vector stores the information required to establish
00229 *  the mapping between an object element and its corresponding process
00230 *  and memory location.
00231 *
00232 *  Let A be a generic term for any 2D block cyclicly distributed array.
00233 *  Such a global array has an associated description vector DESCA.
00234 *  In the following comments, the character _ should be read as
00235 *  "of the global array".
00236 *
00237 *  NOTATION        STORED IN      EXPLANATION
00238 *  --------------- -------------- --------------------------------------
00239 *  DTYPE_A(global) DESCA( DTYPE_ )The descriptor type.  In this case,
00240 *                                 DTYPE_A = 1.
00241 *  CTXT_A (global) DESCA( CTXT_ ) The BLACS context handle, indicating
00242 *                                 the BLACS process grid A is distribu-
00243 *                                 ted over. The context itself is glo-
00244 *                                 bal, but the handle (the integer
00245 *                                 value) may vary.
00246 *  M_A    (global) DESCA( M_ )    The number of rows in the global
00247 *                                 array A.
00248 *  N_A    (global) DESCA( N_ )    The number of columns in the global
00249 *                                 array A.
00250 *  MB_A   (global) DESCA( MB_ )   The blocking factor used to distribute
00251 *                                 the rows of the array.
00252 *  NB_A   (global) DESCA( NB_ )   The blocking factor used to distribute
00253 *                                 the columns of the array.
00254 *  RSRC_A (global) DESCA( RSRC_ ) The process row over which the first
00255 *                                 row of the array A is distributed.
00256 *  CSRC_A (global) DESCA( CSRC_ ) The process column over which the
00257 *                                 first column of the array A is
00258 *                                 distributed.
00259 *  LLD_A  (local)  DESCA( LLD_ )  The leading dimension of the local
00260 *                                 array.  LLD_A >= MAX(1,LOCr(M_A)).
00261 *
00262 *  Let K be the number of rows or columns of a distributed matrix,
00263 *  and assume that its process grid has dimension p x q.
00264 *  LOCr( K ) denotes the number of elements of K that a process
00265 *  would receive if K were distributed over the p processes of its
00266 *  process column.
00267 *  Similarly, LOCc( K ) denotes the number of elements of K that a
00268 *  process would receive if K were distributed over the q processes of
00269 *  its process row.
00270 *  The values of LOCr() and LOCc() may be determined via a call to the
00271 *  ScaLAPACK tool function, NUMROC:
00272 *          LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
00273 *          LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
00274 *  An upper bound for these quantities may be computed by:
00275 *          LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
00276 *          LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
00277 *
00278 *
00279 *  One-dimensional descriptors:
00280 *
00281 *  One-dimensional descriptors are a new addition to ScaLAPACK since
00282 *    version 1.0. They simplify and shorten the descriptor for 1D
00283 *    arrays.
00284 *
00285 *  Since ScaLAPACK supports two-dimensional arrays as the fundamental
00286 *    object, we allow 1D arrays to be distributed either over the
00287 *    first dimension of the array (as if the grid were P-by-1) or the
00288 *    2nd dimension (as if the grid were 1-by-P). This choice is
00289 *    indicated by the descriptor type (501 or 502)
00290 *    as described below.
00291 *    However, for tridiagonal matrices, since the objects being
00292 *    distributed are the individual vectors storing the diagonals, we
00293 *    have adopted the convention that both the P-by-1 descriptor and
00294 *    the 1-by-P descriptor are allowed and are equivalent for
00295 *    tridiagonal matrices. Thus, for tridiagonal matrices,
00296 *    DTYPE_A = 501 or 502 can be used interchangeably
00297 *    without any other change.
00298 *  We require that the distributed vectors storing the diagonals of a
00299 *    tridiagonal matrix be aligned with each other. Because of this, a
00300 *    single descriptor, DESCA, serves to describe the distribution of
00301 *    of all diagonals simultaneously.
00302 *
00303 *    IMPORTANT NOTE: the actual BLACS grid represented by the
00304 *    CTXT entry in the descriptor may be *either*  P-by-1 or 1-by-P
00305 *    irrespective of which one-dimensional descriptor type
00306 *    (501 or 502) is input.
00307 *    This routine will interpret the grid properly either way.
00308 *    ScaLAPACK routines *do not support intercontext operations* so that
00309 *    the grid passed to a single ScaLAPACK routine *must be the same*
00310 *    for all array descriptors passed to that routine.
00311 *
00312 *    NOTE: In all cases where 1D descriptors are used, 2D descriptors
00313 *    may also be used, since a one-dimensional array is a special case
00314 *    of a two-dimensional array with one dimension of size unity.
00315 *    The two-dimensional array used in this case *must* be of the
00316 *    proper orientation:
00317 *      If the appropriate one-dimensional descriptor is DTYPEA=501
00318 *      (1 by P type), then the two dimensional descriptor must
00319 *      have a CTXT value that refers to a 1 by P BLACS grid;
00320 *      If the appropriate one-dimensional descriptor is DTYPEA=502
00321 *      (P by 1 type), then the two dimensional descriptor must
00322 *      have a CTXT value that refers to a P by 1 BLACS grid.
00323 *
00324 *
00325 *  Summary of allowed descriptors, types, and BLACS grids:
00326 *  DTYPE           501         502         1         1
00327 *  BLACS grid      1xP or Px1  1xP or Px1  1xP       Px1
00328 *  -----------------------------------------------------
00329 *  A               OK          OK          OK        NO
00330 *  B               NO          OK          NO        OK
00331 *
00332 *  Note that a consequence of this chart is that it is not possible
00333 *    for *both* DTYPE_A and DTYPE_B to be 2D_type(1), as these lead
00334 *    to opposite requirements for the orientation of the BLACS grid,
00335 *    and as noted before, the *same* BLACS context must be used in
00336 *    all descriptors in a single ScaLAPACK subroutine call.
00337 *
00338 *  Let A be a generic term for any 1D block cyclicly distributed array.
00339 *  Such a global array has an associated description vector DESCA.
00340 *  In the following comments, the character _ should be read as
00341 *  "of the global array".
00342 *
00343 *  NOTATION        STORED IN  EXPLANATION
00344 *  --------------- ---------- ------------------------------------------
00345 *  DTYPE_A(global) DESCA( 1 ) The descriptor type. For 1D grids,
00346 *                                TYPE_A = 501: 1-by-P grid.
00347 *                                TYPE_A = 502: P-by-1 grid.
00348 *  CTXT_A (global) DESCA( 2 ) The BLACS context handle, indicating
00349 *                                the BLACS process grid A is distribu-
00350 *                                ted over. The context itself is glo-
00351 *                                bal, but the handle (the integer
00352 *                                value) may vary.
00353 *  N_A    (global) DESCA( 3 ) The size of the array dimension being
00354 *                                distributed.
00355 *  NB_A   (global) DESCA( 4 ) The blocking factor used to distribute
00356 *                                the distributed dimension of the array.
00357 *  SRC_A  (global) DESCA( 5 ) The process row or column over which the
00358 *                                first row or column of the array
00359 *                                is distributed.
00360 *  Ignored         DESCA( 6 ) Ignored for tridiagonal matrices.
00361 *  Reserved        DESCA( 7 ) Reserved for future use.
00362 *
00363 *
00364 *
00365 *  =====================================================================
00366 *
00367 *  Code Developer: Andrew J. Cleary, University of Tennessee.
00368 *    Current address: Lawrence Livermore National Labs.
00369 *  This version released: August, 2001.
00370 *
00371 *  =====================================================================
00372 *
00373 *     ..
00374 *     .. Parameters ..
00375       DOUBLE PRECISION   ONE, ZERO
00376       PARAMETER          ( ONE = 1.0D+0 )
00377       PARAMETER          ( ZERO = 0.0D+0 )
00378       INTEGER            INT_ONE
00379       PARAMETER          ( INT_ONE = 1 )
00380       INTEGER            DESCMULT, BIGNUM
00381       PARAMETER          (DESCMULT = 100, BIGNUM = DESCMULT * DESCMULT)
00382       INTEGER            BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
00383      $                   LLD_, MB_, M_, NB_, N_, RSRC_
00384       PARAMETER          ( BLOCK_CYCLIC_2D = 1, DLEN_ = 9, DTYPE_ = 1,
00385      $                     CTXT_ = 2, M_ = 3, N_ = 4, MB_ = 5, NB_ = 6,
00386      $                     RSRC_ = 7, CSRC_ = 8, LLD_ = 9 )
00387 *     ..
00388 *     .. Local Scalars ..
00389       INTEGER            ICTXT, MYCOL, MYROW, NB, NPCOL, NPROW,
00390      $                   WS_FACTOR
00391 *     ..
00392 *     .. External Subroutines ..
00393       EXTERNAL           PDDTTRF, PDDTTRS, PXERBLA
00394 *     ..
00395 *     .. Executable Statements ..
00396 *
00397 *     Note: to avoid duplication, most error checking is not performed
00398 *           in this routine and is left to routines
00399 *           PDDTTRF and PDDTTRS.
00400 *
00401 *     Begin main code
00402 *
00403       INFO = 0
00404 *
00405 *     Get block size to calculate workspace requirements
00406 *
00407       IF( DESCA( DTYPE_ ) .EQ. BLOCK_CYCLIC_2D ) THEN
00408          NB = DESCA( NB_ )
00409          ICTXT = DESCA( CTXT_ )
00410       ELSEIF( DESCA( DTYPE_ ) .EQ. 501 ) THEN
00411          NB = DESCA( 4 )
00412          ICTXT = DESCA( 2 )
00413       ELSEIF( DESCA( DTYPE_ ) .EQ. 502 ) THEN
00414          NB = DESCA( 4 )
00415          ICTXT = DESCA( 2 )
00416       ELSE
00417          INFO = -( 6*100 + DTYPE_ )
00418          CALL PXERBLA( ICTXT,
00419      $      'PDDTSV',
00420      $      -INFO )
00421          RETURN
00422       ENDIF
00423 *
00424       CALL BLACS_GRIDINFO( ICTXT, NPROW, NPCOL, MYROW, MYCOL )
00425 *
00426 *
00427 *     Size needed for AF in factorization
00428 *
00429       WS_FACTOR = (12*NPCOL+3*NB)
00430 *
00431 *     Factor the matrix
00432 *
00433       CALL PDDTTRF( N, DL, D, DU, JA, DESCA, WORK,
00434      $              MIN( LWORK, WS_FACTOR ), WORK( 1+WS_FACTOR ),
00435      $              LWORK-WS_FACTOR, INFO )
00436 *
00437 *     Check info for error conditions
00438 *
00439       IF( INFO.NE.0 ) THEN
00440          IF( INFO .LT. 0 ) THEN
00441             CALL PXERBLA( ICTXT, 'PDDTSV', -INFO )
00442          ENDIF
00443          RETURN
00444       END IF
00445 *
00446 *     Solve the system using the factorization
00447 *
00448       CALL PDDTTRS( 'N', N, NRHS, DL, D, DU, JA, DESCA, B, IB, DESCB,
00449      $              WORK, MIN( LWORK, WS_FACTOR ), WORK( 1+WS_FACTOR),
00450      $              LWORK-WS_FACTOR, INFO )
00451 *
00452 *     Check info for error conditions
00453 *
00454       IF( INFO.NE.0 ) THEN
00455          CALL PXERBLA( ICTXT, 'PDDTSV', -INFO )
00456          RETURN
00457       END IF
00458 *
00459       RETURN
00460 *
00461 *     End of PDDTSV
00462 *
00463       END