ScaLAPACK 2.1  2.1
ScaLAPACK: Scalable Linear Algebra PACKage
pzdrscl.f
Go to the documentation of this file.
1  SUBROUTINE pzdrscl( N, SA, SX, IX, JX, DESCX, INCX )
2 *
3 * -- ScaLAPACK auxiliary routine (version 1.7) --
4 * University of Tennessee, Knoxville, Oak Ridge National Laboratory,
5 * and University of California, Berkeley.
6 * May 1, 1997
7 *
8 * .. Scalar Arguments ..
9  INTEGER IX, INCX, JX, N
10  DOUBLE PRECISION SA
11 * ..
12 * .. Array Arguments ..
13  INTEGER DESCX( * )
14  COMPLEX*16 SX( * )
15 * ..
16 *
17 * Purpose
18 * =======
19 *
20 * PZDRSCL multiplies an N-element complex distributed vector
21 * sub( X ) by the real scalar 1/a. This is done without overflow or
22 * underflow as long as the final sub( X )/a does not overflow or
23 * underflow.
24 *
25 * where sub( X ) denotes X(IX:IX+N-1,JX:JX), if INCX = 1,
26 * X(IX:IX,JX:JX+N-1), if INCX = M_X.
27 *
28 * Notes
29 * =====
30 *
31 * Each global data object is described by an associated description
32 * vector. This vector stores the information required to establish
33 * the mapping between an object element and its corresponding process
34 * and memory location.
35 *
36 * Let A be a generic term for any 2D block cyclicly distributed array.
37 * Such a global array has an associated description vector descA.
38 * In the following comments, the character _ should be read as
39 * "of the global array".
40 *
41 * NOTATION STORED IN EXPLANATION
42 * --------------- -------------- --------------------------------------
43 * DT_A (global) descA[ DT_ ] The descriptor type. In this case,
44 * DT_A = 1.
45 * CTXT_A (global) descA[ CTXT_ ] The BLACS context handle, indicating
46 * the BLACS process grid A is distribu-
47 * ted over. The context itself is glo-
48 * bal, but the handle (the integer
49 * value) may vary.
50 * M_A (global) descA[ M_ ] The number of rows in the global
51 * array A.
52 * N_A (global) descA[ N_ ] The number of columns in the global
53 * array A.
54 * MB_A (global) descA[ MB_ ] The blocking factor used to distribu-
55 * te the rows of the array.
56 * NB_A (global) descA[ NB_ ] The blocking factor used to distribu-
57 * te the columns of the array.
58 * RSRC_A (global) descA[ RSRC_ ] The process row over which the first
59 * row of the array A is distributed.
60 * CSRC_A (global) descA[ CSRC_ ] The process column over which the
61 * first column of the array A is
62 * distributed.
63 * LLD_A (local) descA[ LLD_ ] The leading dimension of the local
64 * array. LLD_A >= MAX(1,LOCr(M_A)).
65 *
66 * Let K be the number of rows or columns of a distributed matrix,
67 * and assume that its process grid has dimension p x q.
68 * LOCr( K ) denotes the number of elements of K that a process
69 * would receive if K were distributed over the p processes of its
70 * process column.
71 * Similarly, LOCc( K ) denotes the number of elements of K that a
72 * process would receive if K were distributed over the q processes of
73 * its process row.
74 * The values of LOCr() and LOCc() may be determined via a call to the
75 * ScaLAPACK tool function, NUMROC:
76 * LOCr( M ) = NUMROC( M, MB_A, MYROW, RSRC_A, NPROW ),
77 * LOCc( N ) = NUMROC( N, NB_A, MYCOL, CSRC_A, NPCOL ).
78 * An upper bound for these quantities may be computed by:
79 * LOCr( M ) <= ceil( ceil(M/MB_A)/NPROW )*MB_A
80 * LOCc( N ) <= ceil( ceil(N/NB_A)/NPCOL )*NB_A
81 *
82 * Because vectors may be seen as particular matrices, a distributed
83 * vector is considered to be a distributed matrix.
84 *
85 * Arguments
86 * =========
87 *
88 * N (global input) pointer to INTEGER
89 * The number of components of the distributed vector sub( X ).
90 * N >= 0.
91 *
92 * SA (global input) DOUBLE PRECISION
93 * The scalar a which is used to divide each component of
94 * sub( X ). SA must be >= 0, or the subroutine will divide by
95 * zero.
96 *
97 * SX (local input/local output) COMPLEX*16 array
98 * containing the local pieces of a distributed matrix of
99 * dimension of at least
100 * ( (JX-1)*M_X + IX + ( N - 1 )*abs( INCX ) )
101 * This array contains the entries of the distributed vector
102 * sub( X ).
103 *
104 * IX (global input) pointer to INTEGER
105 * The global row index of the submatrix of the distributed
106 * matrix X to operate on.
107 *
108 * JX (global input) pointer to INTEGER
109 * The global column index of the submatrix of the distributed
110 * matrix X to operate on.
111 *
112 * DESCX (global and local input) INTEGER array of dimension 8.
113 * The array descriptor of the distributed matrix X.
114 *
115 * INCX (global input) pointer to INTEGER
116 * The global increment for the elements of X. Only two values
117 * of INCX are supported in this version, namely 1 and M_X.
118 *
119 * =====================================================================
120 *
121 * .. Parameters ..
122  INTEGER BLOCK_CYCLIC_2D, CSRC_, CTXT_, DLEN_, DTYPE_,
123  $ LLD_, MB_, M_, NB_, N_, RSRC_
124  parameter( block_cyclic_2d = 1, dlen_ = 9, dtype_ = 1,
125  $ ctxt_ = 2, m_ = 3, n_ = 4, mb_ = 5, nb_ = 6,
126  $ rsrc_ = 7, csrc_ = 8, lld_ = 9 )
127  DOUBLE PRECISION ONE, ZERO
128  parameter( one = 1.0d+0, zero = 0.0d+0 )
129 * ..
130 * .. Local Scalars ..
131  LOGICAL DONE
132  INTEGER ICTXT, MYCOL, MYROW, NPCOL, NPROW
133  DOUBLE PRECISION BIGNUM, CDEN, CDEN1, CNUM, CNUM1, MUL, SMLNUM
134 * ..
135 * .. External Subroutines ..
136  EXTERNAL blacs_gridinfo, pdlabad, pzdscal
137 * ..
138 * .. External Functions ..
139  DOUBLE PRECISION PDLAMCH
140  EXTERNAL pdlamch
141 * ..
142 * .. Intrinsic Functions ..
143  INTRINSIC abs
144 * ..
145 * .. Executable Statements ..
146 *
147 * Get grid parameters
148 *
149  ictxt = descx( ctxt_ )
150  CALL blacs_gridinfo( ictxt, nprow, npcol, myrow, mycol )
151 *
152 * Quick return if possible
153 *
154  IF( n.LE.0 )
155  $ RETURN
156 *
157 * Get machine parameters
158 *
159  smlnum = pdlamch( ictxt, 'S' )
160  bignum = one / smlnum
161  CALL pdlabad( ictxt, smlnum, bignum )
162 *
163 * Initialize the denominator to SA and the numerator to 1.
164 *
165  cden = sa
166  cnum = one
167 *
168  10 CONTINUE
169  cden1 = cden*smlnum
170  cnum1 = cnum / bignum
171  IF( abs( cden1 ).GT.abs( cnum ) .AND. cnum.NE.zero ) THEN
172 *
173 * Pre-multiply sub( X ) by SMLNUM if CDEN is large compared to
174 * CNUM.
175 *
176  mul = smlnum
177  done = .false.
178  cden = cden1
179  ELSE IF( abs( cnum1 ).GT.abs( cden ) ) THEN
180 *
181 * Pre-multiply sub( X ) by BIGNUM if CDEN is small compared to
182 * CNUM.
183 *
184  mul = bignum
185  done = .false.
186  cnum = cnum1
187  ELSE
188 *
189 * Multiply sub( X ) by CNUM / CDEN and return.
190 *
191  mul = cnum / cden
192  done = .true.
193  END IF
194 *
195 * Scale the vector sub( X ) by MUL
196 *
197  CALL pzdscal( n, mul, sx, ix, jx, descx, incx )
198 *
199  IF( .NOT.done )
200  $ GO TO 10
201 *
202  RETURN
203 *
204 * End of PZDRSCL
205 *
206  END
pdlabad
subroutine pdlabad(ICTXT, SMALL, LARGE)
Definition: pdlabad.f:2
pzdrscl
subroutine pzdrscl(N, SA, SX, IX, JX, DESCX, INCX)
Definition: pzdrscl.f:2