258 SUBROUTINE dlaed7( ICOMPQ, N, QSIZ, TLVLS, CURLVL, CURPBM, D, Q,
259 $ LDQ, INDXQ, RHO, CUTPNT, QSTORE, QPTR, PRMPTR,
260 $ PERM, GIVPTR, GIVCOL, GIVNUM, WORK, IWORK,
269 INTEGER CURLVL, CURPBM, CUTPNT, ICOMPQ, INFO, LDQ, N,
274 INTEGER GIVCOL( 2, * ), GIVPTR( * ), INDXQ( * ),
275 $ IWORK( * ), PERM( * ), PRMPTR( * ), QPTR( * )
276 DOUBLE PRECISION D( * ), GIVNUM( 2, * ), Q( LDQ, * ),
277 $ qstore( * ), work( * )
283 DOUBLE PRECISION ONE, ZERO
284 PARAMETER ( ONE = 1.0d0, zero = 0.0d0 )
287 INTEGER COLTYP, CURR, I, IDLMDA, INDX, INDXC, INDXP,
288 $ IQ2, IS, IW, IZ, K, LDQ2, N1, N2, PTR
302 IF( icompq.LT.0 .OR. icompq.GT.1 )
THEN
304 ELSE IF( n.LT.0 )
THEN
306 ELSE IF( icompq.EQ.1 .AND. qsiz.LT.n )
THEN
308 ELSE IF( ldq.LT.max( 1, n ) )
THEN
310 ELSE IF( min( 1, n ).GT.cutpnt .OR. n.LT.cutpnt )
THEN
314 CALL xerbla(
'DLAED7', -info )
327 IF( icompq.EQ.1 )
THEN
348 DO 10 i = 1, curlvl - 1
349 ptr = ptr + 2**( tlvls-i )
352 CALL dlaeda( n, tlvls, curlvl, curpbm, prmptr, perm, givptr,
353 $ givcol, givnum, qstore, qptr, work( iz ),
354 $ work( iz+n ), info )
360 IF( curlvl.EQ.tlvls )
THEN
368 CALL dlaed8( icompq, k, n, qsiz, d, q, ldq, indxq, rho, cutpnt,
369 $ work( iz ), work( idlmda ), work( iq2 ), ldq2,
370 $ work( iw ), perm( prmptr( curr ) ), givptr( curr+1 ),
371 $ givcol( 1, givptr( curr ) ),
372 $ givnum( 1, givptr( curr ) ), iwork( indxp ),
373 $ iwork( indx ), info )
374 prmptr( curr+1 ) = prmptr( curr ) + n
375 givptr( curr+1 ) = givptr( curr+1 ) + givptr( curr )
380 CALL dlaed9( k, 1, k, n, d, work( is ), k, rho, work( idlmda ),
381 $ work( iw ), qstore( qptr( curr ) ), k, info )
384 IF( icompq.EQ.1 )
THEN
385 CALL dgemm(
'N',
'N', qsiz, k, k, one, work( iq2 ), ldq2,
386 $ qstore( qptr( curr ) ), k, zero, q, ldq )
388 qptr( curr+1 ) = qptr( curr ) + k**2
394 CALL dlamrg( n1, n2, d, 1, -1, indxq )
396 qptr( curr+1 ) = qptr( curr )