001:       SUBROUTINE ZLATDF( IJOB, N, Z, LDZ, RHS, RDSUM, RDSCAL, IPIV,
002:      $                   JPIV )
003: *
004: *  -- LAPACK auxiliary routine (version 3.2) --
005: *  -- LAPACK is a software package provided by Univ. of Tennessee,    --
006: *  -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
007: *     November 2006
008: *
009: *     .. Scalar Arguments ..
010:       INTEGER            IJOB, LDZ, N
011:       DOUBLE PRECISION   RDSCAL, RDSUM
012: *     ..
013: *     .. Array Arguments ..
014:       INTEGER            IPIV( * ), JPIV( * )
015:       COMPLEX*16         RHS( * ), Z( LDZ, * )
016: *     ..
017: *
018: *  Purpose
019: *  =======
020: *
021: *  ZLATDF computes the contribution to the reciprocal Dif-estimate
022: *  by solving for x in Z * x = b, where b is chosen such that the norm
023: *  of x is as large as possible. It is assumed that LU decomposition
024: *  of Z has been computed by ZGETC2. On entry RHS = f holds the
025: *  contribution from earlier solved sub-systems, and on return RHS = x.
026: *
027: *  The factorization of Z returned by ZGETC2 has the form
028: *  Z = P * L * U * Q, where P and Q are permutation matrices. L is lower
029: *  triangular with unit diagonal elements and U is upper triangular.
030: *
031: *  Arguments
032: *  =========
033: *
034: *  IJOB    (input) INTEGER
035: *          IJOB = 2: First compute an approximative null-vector e
036: *              of Z using ZGECON, e is normalized and solve for
037: *              Zx = +-e - f with the sign giving the greater value of
038: *              2-norm(x).  About 5 times as expensive as Default.
039: *          IJOB .ne. 2: Local look ahead strategy where
040: *              all entries of the r.h.s. b is choosen as either +1 or
041: *              -1.  Default.
042: *
043: *  N       (input) INTEGER
044: *          The number of columns of the matrix Z.
045: *
046: *  Z       (input) DOUBLE PRECISION array, dimension (LDZ, N)
047: *          On entry, the LU part of the factorization of the n-by-n
048: *          matrix Z computed by ZGETC2:  Z = P * L * U * Q
049: *
050: *  LDZ     (input) INTEGER
051: *          The leading dimension of the array Z.  LDA >= max(1, N).
052: *
053: *  RHS     (input/output) DOUBLE PRECISION array, dimension (N).
054: *          On entry, RHS contains contributions from other subsystems.
055: *          On exit, RHS contains the solution of the subsystem with
056: *          entries according to the value of IJOB (see above).
057: *
058: *  RDSUM   (input/output) DOUBLE PRECISION
059: *          On entry, the sum of squares of computed contributions to
060: *          the Dif-estimate under computation by ZTGSYL, where the
061: *          scaling factor RDSCAL (see below) has been factored out.
062: *          On exit, the corresponding sum of squares updated with the
063: *          contributions from the current sub-system.
064: *          If TRANS = 'T' RDSUM is not touched.
065: *          NOTE: RDSUM only makes sense when ZTGSY2 is called by CTGSYL.
066: *
067: *  RDSCAL  (input/output) DOUBLE PRECISION
068: *          On entry, scaling factor used to prevent overflow in RDSUM.
069: *          On exit, RDSCAL is updated w.r.t. the current contributions
070: *          in RDSUM.
071: *          If TRANS = 'T', RDSCAL is not touched.
072: *          NOTE: RDSCAL only makes sense when ZTGSY2 is called by
073: *          ZTGSYL.
074: *
075: *  IPIV    (input) INTEGER array, dimension (N).
076: *          The pivot indices; for 1 <= i <= N, row i of the
077: *          matrix has been interchanged with row IPIV(i).
078: *
079: *  JPIV    (input) INTEGER array, dimension (N).
080: *          The pivot indices; for 1 <= j <= N, column j of the
081: *          matrix has been interchanged with column JPIV(j).
082: *
083: *  Further Details
084: *  ===============
085: *
086: *  Based on contributions by
087: *     Bo Kagstrom and Peter Poromaa, Department of Computing Science,
088: *     Umea University, S-901 87 Umea, Sweden.
089: *
090: *  This routine is a further developed implementation of algorithm
091: *  BSOLVE in [1] using complete pivoting in the LU factorization.
092: *
093: *   [1]   Bo Kagstrom and Lars Westin,
094: *         Generalized Schur Methods with Condition Estimators for
095: *         Solving the Generalized Sylvester Equation, IEEE Transactions
096: *         on Automatic Control, Vol. 34, No. 7, July 1989, pp 745-751.
097: *
098: *   [2]   Peter Poromaa,
099: *         On Efficient and Robust Estimators for the Separation
100: *         between two Regular Matrix Pairs with Applications in
101: *         Condition Estimation. Report UMINF-95.05, Department of
102: *         Computing Science, Umea University, S-901 87 Umea, Sweden,
103: *         1995.
104: *
105: *  =====================================================================
106: *
107: *     .. Parameters ..
108:       INTEGER            MAXDIM
109:       PARAMETER          ( MAXDIM = 2 )
110:       DOUBLE PRECISION   ZERO, ONE
111:       PARAMETER          ( ZERO = 0.0D+0, ONE = 1.0D+0 )
112:       COMPLEX*16         CONE
113:       PARAMETER          ( CONE = ( 1.0D+0, 0.0D+0 ) )
114: *     ..
115: *     .. Local Scalars ..
116:       INTEGER            I, INFO, J, K
117:       DOUBLE PRECISION   RTEMP, SCALE, SMINU, SPLUS
118:       COMPLEX*16         BM, BP, PMONE, TEMP
119: *     ..
120: *     .. Local Arrays ..
121:       DOUBLE PRECISION   RWORK( MAXDIM )
122:       COMPLEX*16         WORK( 4*MAXDIM ), XM( MAXDIM ), XP( MAXDIM )
123: *     ..
124: *     .. External Subroutines ..
125:       EXTERNAL           ZAXPY, ZCOPY, ZGECON, ZGESC2, ZLASSQ, ZLASWP,
126:      $                   ZSCAL
127: *     ..
128: *     .. External Functions ..
129:       DOUBLE PRECISION   DZASUM
130:       COMPLEX*16         ZDOTC
131:       EXTERNAL           DZASUM, ZDOTC
132: *     ..
133: *     .. Intrinsic Functions ..
134:       INTRINSIC          ABS, DBLE, SQRT
135: *     ..
136: *     .. Executable Statements ..
137: *
138:       IF( IJOB.NE.2 ) THEN
139: *
140: *        Apply permutations IPIV to RHS
141: *
142:          CALL ZLASWP( 1, RHS, LDZ, 1, N-1, IPIV, 1 )
143: *
144: *        Solve for L-part choosing RHS either to +1 or -1.
145: *
146:          PMONE = -CONE
147:          DO 10 J = 1, N - 1
148:             BP = RHS( J ) + CONE
149:             BM = RHS( J ) - CONE
150:             SPLUS = ONE
151: *
152: *           Lockahead for L- part RHS(1:N-1) = +-1
153: *           SPLUS and SMIN computed more efficiently than in BSOLVE[1].
154: *
155:             SPLUS = SPLUS + DBLE( ZDOTC( N-J, Z( J+1, J ), 1, Z( J+1,
156:      $              J ), 1 ) )
157:             SMINU = DBLE( ZDOTC( N-J, Z( J+1, J ), 1, RHS( J+1 ), 1 ) )
158:             SPLUS = SPLUS*DBLE( RHS( J ) )
159:             IF( SPLUS.GT.SMINU ) THEN
160:                RHS( J ) = BP
161:             ELSE IF( SMINU.GT.SPLUS ) THEN
162:                RHS( J ) = BM
163:             ELSE
164: *
165: *              In this case the updating sums are equal and we can
166: *              choose RHS(J) +1 or -1. The first time this happens we
167: *              choose -1, thereafter +1. This is a simple way to get
168: *              good estimates of matrices like Byers well-known example
169: *              (see [1]). (Not done in BSOLVE.)
170: *
171:                RHS( J ) = RHS( J ) + PMONE
172:                PMONE = CONE
173:             END IF
174: *
175: *           Compute the remaining r.h.s.
176: *
177:             TEMP = -RHS( J )
178:             CALL ZAXPY( N-J, TEMP, Z( J+1, J ), 1, RHS( J+1 ), 1 )
179:    10    CONTINUE
180: *
181: *        Solve for U- part, lockahead for RHS(N) = +-1. This is not done
182: *        In BSOLVE and will hopefully give us a better estimate because
183: *        any ill-conditioning of the original matrix is transfered to U
184: *        and not to L. U(N, N) is an approximation to sigma_min(LU).
185: *
186:          CALL ZCOPY( N-1, RHS, 1, WORK, 1 )
187:          WORK( N ) = RHS( N ) + CONE
188:          RHS( N ) = RHS( N ) - CONE
189:          SPLUS = ZERO
190:          SMINU = ZERO
191:          DO 30 I = N, 1, -1
192:             TEMP = CONE / Z( I, I )
193:             WORK( I ) = WORK( I )*TEMP
194:             RHS( I ) = RHS( I )*TEMP
195:             DO 20 K = I + 1, N
196:                WORK( I ) = WORK( I ) - WORK( K )*( Z( I, K )*TEMP )
197:                RHS( I ) = RHS( I ) - RHS( K )*( Z( I, K )*TEMP )
198:    20       CONTINUE
199:             SPLUS = SPLUS + ABS( WORK( I ) )
200:             SMINU = SMINU + ABS( RHS( I ) )
201:    30    CONTINUE
202:          IF( SPLUS.GT.SMINU )
203:      $      CALL ZCOPY( N, WORK, 1, RHS, 1 )
204: *
205: *        Apply the permutations JPIV to the computed solution (RHS)
206: *
207:          CALL ZLASWP( 1, RHS, LDZ, 1, N-1, JPIV, -1 )
208: *
209: *        Compute the sum of squares
210: *
211:          CALL ZLASSQ( N, RHS, 1, RDSCAL, RDSUM )
212:          RETURN
213:       END IF
214: *
215: *     ENTRY IJOB = 2
216: *
217: *     Compute approximate nullvector XM of Z
218: *
219:       CALL ZGECON( 'I', N, Z, LDZ, ONE, RTEMP, WORK, RWORK, INFO )
220:       CALL ZCOPY( N, WORK( N+1 ), 1, XM, 1 )
221: *
222: *     Compute RHS
223: *
224:       CALL ZLASWP( 1, XM, LDZ, 1, N-1, IPIV, -1 )
225:       TEMP = CONE / SQRT( ZDOTC( N, XM, 1, XM, 1 ) )
226:       CALL ZSCAL( N, TEMP, XM, 1 )
227:       CALL ZCOPY( N, XM, 1, XP, 1 )
228:       CALL ZAXPY( N, CONE, RHS, 1, XP, 1 )
229:       CALL ZAXPY( N, -CONE, XM, 1, RHS, 1 )
230:       CALL ZGESC2( N, Z, LDZ, RHS, IPIV, JPIV, SCALE )
231:       CALL ZGESC2( N, Z, LDZ, XP, IPIV, JPIV, SCALE )
232:       IF( DZASUM( N, XP, 1 ).GT.DZASUM( N, RHS, 1 ) )
233:      $   CALL ZCOPY( N, XP, 1, RHS, 1 )
234: *
235: *     Compute the sum of squares
236: *
237:       CALL ZLASSQ( N, RHS, 1, RDSCAL, RDSUM )
238:       RETURN
239: *
240: *     End of ZLATDF
241: *
242:       END
243: