kernel panic when configure wireless

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi

When I configure wireless(system will rmmod wireless driver and
restart to load it after configuration) kernel panic occurs randomly,
and it is not very easy to be reproduced.
The platform is linux_2.4.17_mvl21/MIPS, and the attached is timer.c file.

Oops messages are as following(two addresses):
===========================================================================
Unable to handle kernel paging request at virtual address 00000004,
epc == 9403fc1c, ra == 9403fa8c
Oops in fault.c:do_page_fault, line 229:
$0 : 00000000 941dc674 00000000 941dc468 000180d1 943ecf70 941dc294 00000000
$8 : 03ffffff 000fffff 941dc260 94172408 00018000 00000002 00018000 00000001
$16: 00000001 941c9620 fffffffe ffffffff 1000fc00 941c8860 941c00c0 9457e4e0
$24: 00000000 2ab86410                   94026000 94027e08 01000000 9403fa8c
Hi : 0000596a
Lo : e147fc52
epc  : 9403fc1c    Not tainted
Status: 1000fc02
Cause : 3080000c
Process swapper (pid: 0, stackpage=94026000)
Stack: 00000000 941c8860 00000001 941c0080 941c95e8 ffffffff 9403fe84 9403b890
       941ed918 a8611800 c0061da8 ffffffef 00000001 941c00c0 9403b25c ffffffef
       94026000 00000001 00000000 9403f918 7ffffffe 941c7f94 941c8864 94027eb8
       00000000 9400b478 0000000b 9403b544 00000000 9400b478 0000000b 9403b544
       9416d530 9000e310 941c8864 94003f30 9416298c 94162958 00808000 94003f30
       00000001 ...
Call Trace: [<9403fe84>] [<9403b890>] [<c0061da8>] [<9403b25c>]
[<94026000>] [<9403f918>]
 [<94027eb8>] [<9403b544>] [<9403b544>] [<9416d530>] [<9416298c>] [<94162958>]
 [<9416d0c0>] [<9416d0d8>] [<94027fe0>] [<94027fe0>] [<94026000>] [<94027f68>]
 [<94028578>] [<94028560>] [<9402802c>] [<9416dc18>] [<94027f84>] [<9416eff4>]
 [<940204b0>]

Code: 3508ffff  8ca70000  8ca20004 <ace20004> ac470000  8ca40008
008c1823  2c620100  14400020
Kernel panic: Aiee, killing interrupt handler!
------------------------------------------------------------------------------------------------------------------------------------
Unable to handle kernel paging request at virtual address 00000004,
epc == 9403f020, ra == 94130b60
Oops in fault.c:do_page_fault, line 229:
$0 : 00000000 941dc264 94630e70 00000000 94c77cc4 0000464f 000188d7 1000fc01
$8 : 00001407 0101a8c0 6901a8c0 00001407 00000001 00000001 00000001 00000000
$16: 94c77cc4 94c77c60 00004650 941d5a08 94451c4c 00000000 00000003 94451d88
$24: 94451c28 00000714                   94450000 94451bc8 9411b604 94130b60
Hi : 0000007e
Lo : 013ef214
epc  : 9403f020    Not tainted
Status: 1000fc02
Cause : 3080000c
Process wpa_authenticat (pid: 1161, stackpage=94450000)
Stack: 00000001 6901a8c0 941c8868 94125b00 94130b60 00000002 00000000 942f28e8
       6901a8c0 00000000 94c77c60 94018e20 94451d10 941329c0 00000003 94450000
       94bea2e0 94c640a0 94c77c60 9412fb70 00014287 940dd7f0 0080005d 9405e300
       0101a8c0 00001407 6901a8c0 00001407 940dda34 940d0011 940890a0 c0100d80
       00000000 00000000 94451ce0 94451d10 941d1ca0 940fd5a0 94fa600c 9412dd7c
       00000000 ...
Call Trace: [<94125b00>] [<94130b60>] [<941329c0>] [<9412fb70>]
[<940dd7f0>] [<9405e300>]
 [<940dda34>] [<940d0011>] [<940890a0>] [<c0100d80>] [<940fd5a0>] [<9412dd7c>]
 [<9411d5d0>] [<940fd5a0>] [<940e2224>] [<940d78ac>] [<940fd5a0>] [<940fd5a0>]
 [<940e2874>] [<940fd5a0>] [<9411b63c>] [<940fca88>] [<940fd5a0>] [<9411bb78>]
 [<9404a6f8>] [<941236e8>] [<940d1694>] [<9404769c>] [<940d2660>] [<940543cc>]
 [<9402e9a8>] [<9402e950>] [<9406d3d0>] [<94092bb0>] [<94092b74>] [<94071280>]
 [<9406d88c>] [<9406d754>] [<9402d428>] [<940302f8>]

Code: 00221021  8c420004  8c430000 <ac640004> ac830000  ac820004
ac440000  40016000  30e70001
Kernel panic: Aiee, killing interrupt handler!
===========================================================================


The results of objdump vmlinux are as following:
===========================================================================
...(skip)...

ffffffff9403ef14 <add_timer>:
ffffffff9403ef14:	27bdffe8 	addiu	$sp,$sp,-24
ffffffff9403ef18:	afbf0010 	sw	$ra,16($sp)
ffffffff9403ef1c:	40076000 	mfc0	$a3,$12
ffffffff9403ef20:	00000000 	nop
ffffffff9403ef24:	34e10001 	ori	$at,$a3,0x1
ffffffff9403ef28:	38210001 	xori	$at,$at,0x1
ffffffff9403ef2c:	40816000 	mtc0	$at,$12
ffffffff9403ef30:	00000040 	sll	$zero,$zero,0x1
ffffffff9403ef34:	00000040 	sll	$zero,$zero,0x1
ffffffff9403ef38:	00000040 	sll	$zero,$zero,0x1
ffffffff9403ef3c:	8c820000 	lw	$v0,0($a0)
ffffffff9403ef40:	14400046 	bnez	$v0,ffffffff9403f05c <add_timer+0x148>
ffffffff9403ef44:	00000000 	nop
ffffffff9403ef48:	8c860008 	lw	$a2,8($a0)
ffffffff9403ef4c:	3c02941e 	lui	$v0,0x941e
ffffffff9403ef50:	8c42ce74 	lw	$v0,-12684($v0)
ffffffff9403ef54:	00c22823 	subu	$a1,$a2,$v0
ffffffff9403ef58:	2ca30100 	sltiu	$v1,$a1,256
ffffffff9403ef5c:	14600024 	bnez	$v1,ffffffff9403eff0 <add_timer+0xdc>
ffffffff9403ef60:	30c200ff 	andi	$v0,$a2,0xff
ffffffff9403ef64:	2ca24000 	sltiu	$v0,$a1,16384
ffffffff9403ef68:	10400007 	beqz	$v0,ffffffff9403ef88 <add_timer+0x74>
ffffffff9403ef6c:	00061142 	srl	$v0,$a2,0x5
ffffffff9403ef70:	304201f8 	andi	$v0,$v0,0x1f8
ffffffff9403ef74:	3c01941e 	lui	$at,0x941e
ffffffff9403ef78:	2421c46c 	addiu	$at,$at,-15252
ffffffff9403ef7c:	00221021 	addu	$v0,$at,$v0
ffffffff9403ef80:	0900fc07 	j	ffffffff9403f01c <add_timer+0x108>
ffffffff9403ef84:	8c420004 	lw	$v0,4($v0)
ffffffff9403ef88:	3c02000f 	lui	$v0,0xf
ffffffff9403ef8c:	3442ffff 	ori	$v0,$v0,0xffff
ffffffff9403ef90:	0045102b 	sltu	$v0,$v0,$a1
ffffffff9403ef94:	14400008 	bnez	$v0,ffffffff9403efb8 <add_timer+0xa4>
ffffffff9403ef98:	3c0203ff 	lui	$v0,0x3ff
ffffffff9403ef9c:	000612c2 	srl	$v0,$a2,0xb
ffffffff9403efa0:	304201f8 	andi	$v0,$v0,0x1f8
ffffffff9403efa4:	3c01941e 	lui	$at,0x941e
ffffffff9403efa8:	2421c264 	addiu	$at,$at,-15772
ffffffff9403efac:	00221021 	addu	$v0,$at,$v0
ffffffff9403efb0:	0900fc07 	j	ffffffff9403f01c <add_timer+0x108>
ffffffff9403efb4:	8c420004 	lw	$v0,4($v0)
ffffffff9403efb8:	3442ffff 	ori	$v0,$v0,0xffff
ffffffff9403efbc:	0045102b 	sltu	$v0,$v0,$a1
ffffffff9403efc0:	14400007 	bnez	$v0,ffffffff9403efe0 <add_timer+0xcc>
ffffffff9403efc4:	00061442 	srl	$v0,$a2,0x11
ffffffff9403efc8:	304201f8 	andi	$v0,$v0,0x1f8
ffffffff9403efcc:	3c01941e 	lui	$at,0x941e
ffffffff9403efd0:	2421c05c 	addiu	$at,$at,-16292
ffffffff9403efd4:	00221021 	addu	$v0,$at,$v0
ffffffff9403efd8:	0900fc07 	j	ffffffff9403f01c <add_timer+0x108>
ffffffff9403efdc:	8c420004 	lw	$v0,4($v0)
ffffffff9403efe0:	04a10009 	bgez	$a1,ffffffff9403f008 <add_timer+0xf4>
ffffffff9403efe4:	00061682 	srl	$v0,$a2,0x1a
ffffffff9403efe8:	3c02941e 	lui	$v0,0x941e
ffffffff9403efec:	8c42c670 	lw	$v0,-14736($v0)
ffffffff9403eff0:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403eff4:	3c01941e 	lui	$at,0x941e
ffffffff9403eff8:	2421c674 	addiu	$at,$at,-14732
ffffffff9403effc:	00221021 	addu	$v0,$at,$v0
ffffffff9403f000:	0900fc07 	j	ffffffff9403f01c <add_timer+0x108>
ffffffff9403f004:	8c420004 	lw	$v0,4($v0)
ffffffff9403f008:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403f00c:	3c01941e 	lui	$at,0x941e
ffffffff9403f010:	2421be54 	addiu	$at,$at,-16812
ffffffff9403f014:	00221021 	addu	$v0,$at,$v0
ffffffff9403f018:	8c420004 	lw	$v0,4($v0)
ffffffff9403f01c:	8c430000 	lw	$v1,0($v0)
ffffffff9403f020:	ac640004 	sw	$a0,4($v1)
ffffffff9403f024:	ac830000 	sw	$v1,0($a0)
ffffffff9403f028:	ac820004 	sw	$v0,4($a0)
ffffffff9403f02c:	ac440000 	sw	$a0,0($v0)
ffffffff9403f030:	40016000 	mfc0	$at,$12
ffffffff9403f034:	30e70001 	andi	$a3,$a3,0x1
ffffffff9403f038:	34210001 	ori	$at,$at,0x1
ffffffff9403f03c:	38210001 	xori	$at,$at,0x1
ffffffff9403f040:	00e13825 	or	$a3,$a3,$at
ffffffff9403f044:	40876000 	mtc0	$a3,$12
	...
(skip)
...
ffffffff9403fa2c <timer_bh>:
ffffffff9403fa2c:	27bdffe0 	addiu	$sp,$sp,-32
ffffffff9403fa30:	afbf0018 	sw	$ra,24($sp)
ffffffff9403fa34:	afb10014 	sw	$s1,20($sp)
ffffffff9403fa38:	afb00010 	sw	$s0,16($sp)
ffffffff9403fa3c:	40016000 	mfc0	$at,$12
ffffffff9403fa40:	00000000 	nop
ffffffff9403fa44:	34210001 	ori	$at,$at,0x1
ffffffff9403fa48:	38210001 	xori	$at,$at,0x1
ffffffff9403fa4c:	40816000 	mtc0	$at,$12
	...
ffffffff9403fa5c:	3c11941d 	lui	$s1,0x941d
ffffffff9403fa60:	26319620 	addiu	$s1,$s1,-27104
ffffffff9403fa64:	8e220000 	lw	$v0,0($s1)
ffffffff9403fa68:	3c03941d 	lui	$v1,0x941d
ffffffff9403fa6c:	8c639678 	lw	$v1,-27016($v1)
ffffffff9403fa70:	00438023 	subu	$s0,$v0,$v1
ffffffff9403fa74:	12000005 	beqz	$s0,ffffffff9403fa8c <timer_bh+0x60>
ffffffff9403fa78:	00701021 	addu	$v0,$v1,$s0
ffffffff9403fa7c:	3c01941d 	lui	$at,0x941d
ffffffff9403fa80:	ac229678 	sw	$v0,-27016($at)
ffffffff9403fa84:	0d00fdcd 	jal	ffffffff9403f734 <update_wall_time>
ffffffff9403fa88:	02002021 	move	$a0,$s0
ffffffff9403fa8c:	40016000 	mfc0	$at,$12
ffffffff9403fa90:	00000000 	nop
ffffffff9403fa94:	3421001f 	ori	$at,$at,0x1f
ffffffff9403fa98:	3821001e 	xori	$at,$at,0x1e
ffffffff9403fa9c:	40816000 	mtc0	$at,$12
ffffffff9403faa0:	3c02941d 	lui	$v0,0x941d
ffffffff9403faa4:	8c429630 	lw	$v0,-27088($v0)
ffffffff9403faa8:	00501023 	subu	$v0,$v0,$s0
ffffffff9403faac:	3c01941d 	lui	$at,0x941d
ffffffff9403fab0:	ac229630 	sw	$v0,-27088($at)
ffffffff9403fab4:	04410032 	bgez	$v0,ffffffff9403fb80 <timer_bh+0x154>
ffffffff9403fab8:	244201f4 	addiu	$v0,$v0,500
ffffffff9403fabc:	3c01941d 	lui	$at,0x941d
ffffffff9403fac0:	0d00fe77 	jal	ffffffff9403f9dc <count_active_tasks>
ffffffff9403fac4:	ac229630 	sw	$v0,-27088($at)
ffffffff9403fac8:	00021880 	sll	$v1,$v0,0x2
ffffffff9403facc:	00621821 	addu	$v1,$v1,$v0
ffffffff9403fad0:	000318c0 	sll	$v1,$v1,0x3
ffffffff9403fad4:	3c05941d 	lui	$a1,0x941d
ffffffff9403fad8:	8ca5964c 	lw	$a1,-27060($a1)
ffffffff9403fadc:	00621821 	addu	$v1,$v1,$v0
ffffffff9403fae0:	00052100 	sll	$a0,$a1,0x4
ffffffff9403fae4:	00852023 	subu	$a0,$a0,$a1
ffffffff9403fae8:	00042080 	sll	$a0,$a0,0x2
ffffffff9403faec:	00852023 	subu	$a0,$a0,$a1
ffffffff9403faf0:	000420c0 	sll	$a0,$a0,0x3
ffffffff9403faf4:	00852023 	subu	$a0,$a0,$a1
ffffffff9403faf8:	00641821 	addu	$v1,$v1,$a0
ffffffff9403fafc:	00031880 	sll	$v1,$v1,0x2
ffffffff9403fb00:	00031ac2 	srl	$v1,$v1,0xb
ffffffff9403fb04:	3c01941d 	lui	$at,0x941d
ffffffff9403fb08:	ac23964c 	sw	$v1,-27060($at)
ffffffff9403fb0c:	00021900 	sll	$v1,$v0,0x4
ffffffff9403fb10:	3c05941d 	lui	$a1,0x941d
ffffffff9403fb14:	8ca59650 	lw	$a1,-27056($a1)
ffffffff9403fb18:	00621821 	addu	$v1,$v1,$v0
ffffffff9403fb1c:	00052180 	sll	$a0,$a1,0x6
ffffffff9403fb20:	00852023 	subu	$a0,$a0,$a1
ffffffff9403fb24:	00042100 	sll	$a0,$a0,0x4
ffffffff9403fb28:	00852023 	subu	$a0,$a0,$a1
ffffffff9403fb2c:	00641821 	addu	$v1,$v1,$a0
ffffffff9403fb30:	00031840 	sll	$v1,$v1,0x1
ffffffff9403fb34:	00031ac2 	srl	$v1,$v1,0xb
ffffffff9403fb38:	3c01941d 	lui	$at,0x941d
ffffffff9403fb3c:	ac239650 	sw	$v1,-27056($at)
ffffffff9403fb40:	00021840 	sll	$v1,$v0,0x1
ffffffff9403fb44:	00621821 	addu	$v1,$v1,$v0
ffffffff9403fb48:	00031880 	sll	$v1,$v1,0x2
ffffffff9403fb4c:	3c05941d 	lui	$a1,0x941d
ffffffff9403fb50:	8ca59654 	lw	$a1,-27052($a1)
ffffffff9403fb54:	00621823 	subu	$v1,$v1,$v0
ffffffff9403fb58:	000521c0 	sll	$a0,$a1,0x7
ffffffff9403fb5c:	00852023 	subu	$a0,$a0,$a1
ffffffff9403fb60:	00042080 	sll	$a0,$a0,0x2
ffffffff9403fb64:	00852021 	addu	$a0,$a0,$a1
ffffffff9403fb68:	00042080 	sll	$a0,$a0,0x2
ffffffff9403fb6c:	00852021 	addu	$a0,$a0,$a1
ffffffff9403fb70:	00832021 	addu	$a0,$a0,$v1
ffffffff9403fb74:	000422c2 	srl	$a0,$a0,0xb
ffffffff9403fb78:	3c01941d 	lui	$at,0x941d
ffffffff9403fb7c:	ac249654 	sw	$a0,-27052($at)
ffffffff9403fb80:	40016000 	mfc0	$at,$12
ffffffff9403fb84:	00000000 	nop
ffffffff9403fb88:	34210001 	ori	$at,$at,0x1
ffffffff9403fb8c:	38210001 	xori	$at,$at,0x1
ffffffff9403fb90:	40816000 	mtc0	$at,$12
	...
ffffffff9403fba0:	8e220000 	lw	$v0,0($s1)
ffffffff9403fba4:	3c03941e 	lui	$v1,0x941e
ffffffff9403fba8:	8c63ce74 	lw	$v1,-12684($v1)
ffffffff9403fbac:	00431023 	subu	$v0,$v0,$v1
ffffffff9403fbb0:	04400098 	bltz	$v0,ffffffff9403fe14 <timer_bh+0x3e8>
ffffffff9403fbb4:	00000000 	nop
ffffffff9403fbb8:	3c02941e 	lui	$v0,0x941e
ffffffff9403fbbc:	8c42c670 	lw	$v0,-14736($v0)
ffffffff9403fbc0:	14400060 	bnez	$v0,ffffffff9403fd44 <timer_bh+0x318>
ffffffff9403fbc4:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403fbc8:	240d0001 	li	$t5,1
ffffffff9403fbcc:	240f0001 	li	$t7,1
ffffffff9403fbd0:	3c0e941e 	lui	$t6,0x941e
ffffffff9403fbd4:	8dcece74 	lw	$t6,-12684($t6)
ffffffff9403fbd8:	3c0b9417 	lui	$t3,0x9417
ffffffff9403fbdc:	256b2404 	addiu	$t3,$t3,9220
ffffffff9403fbe0:	8d6a0000 	lw	$t2,0($t3)
ffffffff9403fbe4:	8d420000 	lw	$v0,0($t2)
ffffffff9403fbe8:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403fbec:	24420004 	addiu	$v0,$v0,4
ffffffff9403fbf0:	01423021 	addu	$a2,$t2,$v0
ffffffff9403fbf4:	8cc50000 	lw	$a1,0($a2)
ffffffff9403fbf8:	50a60042 	0x50a60042
ffffffff9403fbfc:	acc60000 	sw	$a2,0($a2)
ffffffff9403fc00:	01c06021 	move	$t4,$t6
ffffffff9403fc04:	3c09000f 	lui	$t1,0xf
ffffffff9403fc08:	3529ffff 	ori	$t1,$t1,0xffff
ffffffff9403fc0c:	3c0803ff 	lui	$t0,0x3ff
ffffffff9403fc10:	3508ffff 	ori	$t0,$t0,0xffff
ffffffff9403fc14:	8ca70000 	lw	$a3,0($a1)
ffffffff9403fc18:	8ca20004 	lw	$v0,4($a1)
ffffffff9403fc1c:	ace20004 	sw	$v0,4($a3)
ffffffff9403fc20:	ac470000 	sw	$a3,0($v0)
ffffffff9403fc24:	8ca40008 	lw	$a0,8($a1)
ffffffff9403fc28:	008c1823 	subu	$v1,$a0,$t4
ffffffff9403fc2c:	2c620100 	sltiu	$v0,$v1,256
ffffffff9403fc30:	14400020 	bnez	$v0,ffffffff9403fcb4 <timer_bh+0x288>
ffffffff9403fc34:	308200ff 	andi	$v0,$a0,0xff
ffffffff9403fc38:	2c624000 	sltiu	$v0,$v1,16384
ffffffff9403fc3c:	10400007 	beqz	$v0,ffffffff9403fc5c <timer_bh+0x230>
ffffffff9403fc40:	00041142 	srl	$v0,$a0,0x5
ffffffff9403fc44:	304201f8 	andi	$v0,$v0,0x1f8
ffffffff9403fc48:	3c01941e 	lui	$at,0x941e
ffffffff9403fc4c:	2421c46c 	addiu	$at,$at,-15252
ffffffff9403fc50:	00221021 	addu	$v0,$at,$v0
ffffffff9403fc54:	0900ff38 	j	ffffffff9403fce0 <timer_bh+0x2b4>
ffffffff9403fc58:	8c430004 	lw	$v1,4($v0)
ffffffff9403fc5c:	0123102b 	sltu	$v0,$t1,$v1
ffffffff9403fc60:	14400008 	bnez	$v0,ffffffff9403fc84 <timer_bh+0x258>
ffffffff9403fc64:	0103102b 	sltu	$v0,$t0,$v1
ffffffff9403fc68:	000412c2 	srl	$v0,$a0,0xb
ffffffff9403fc6c:	304201f8 	andi	$v0,$v0,0x1f8
ffffffff9403fc70:	3c01941e 	lui	$at,0x941e
ffffffff9403fc74:	2421c264 	addiu	$at,$at,-15772
ffffffff9403fc78:	00221021 	addu	$v0,$at,$v0
ffffffff9403fc7c:	0900ff38 	j	ffffffff9403fce0 <timer_bh+0x2b4>
ffffffff9403fc80:	8c430004 	lw	$v1,4($v0)
ffffffff9403fc84:	14400007 	bnez	$v0,ffffffff9403fca4 <timer_bh+0x278>
ffffffff9403fc88:	00041442 	srl	$v0,$a0,0x11
ffffffff9403fc8c:	304201f8 	andi	$v0,$v0,0x1f8
ffffffff9403fc90:	3c01941e 	lui	$at,0x941e
ffffffff9403fc94:	2421c05c 	addiu	$at,$at,-16292
ffffffff9403fc98:	00221021 	addu	$v0,$at,$v0
ffffffff9403fc9c:	0900ff38 	j	ffffffff9403fce0 <timer_bh+0x2b4>
ffffffff9403fca0:	8c430004 	lw	$v1,4($v0)
ffffffff9403fca4:	04610009 	bgez	$v1,ffffffff9403fccc <timer_bh+0x2a0>
ffffffff9403fca8:	00041682 	srl	$v0,$a0,0x1a
ffffffff9403fcac:	3c02941e 	lui	$v0,0x941e
ffffffff9403fcb0:	8c42c670 	lw	$v0,-14736($v0)
ffffffff9403fcb4:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403fcb8:	3c01941e 	lui	$at,0x941e
ffffffff9403fcbc:	2421c674 	addiu	$at,$at,-14732
ffffffff9403fcc0:	00221021 	addu	$v0,$at,$v0
ffffffff9403fcc4:	0900ff38 	j	ffffffff9403fce0 <timer_bh+0x2b4>
ffffffff9403fcc8:	8c430004 	lw	$v1,4($v0)
ffffffff9403fccc:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403fcd0:	3c01941e 	lui	$at,0x941e
ffffffff9403fcd4:	2421be54 	addiu	$at,$at,-16812
ffffffff9403fcd8:	00221021 	addu	$v0,$at,$v0
ffffffff9403fcdc:	8c430004 	lw	$v1,4($v0)
ffffffff9403fce0:	8c620000 	lw	$v0,0($v1)
ffffffff9403fce4:	ac450004 	sw	$a1,4($v0)
ffffffff9403fce8:	aca20000 	sw	$v0,0($a1)
ffffffff9403fcec:	aca30004 	sw	$v1,4($a1)
ffffffff9403fcf0:	ac650000 	sw	$a1,0($v1)
ffffffff9403fcf4:	00e02821 	move	$a1,$a3
ffffffff9403fcf8:	54a6ffc7 	0x54a6ffc7
ffffffff9403fcfc:	8ca70000 	lw	$a3,0($a1)
ffffffff9403fd00:	acc60000 	sw	$a2,0($a2)
ffffffff9403fd04:	acc60004 	sw	$a2,4($a2)
ffffffff9403fd08:	8d420000 	lw	$v0,0($t2)
ffffffff9403fd0c:	24420001 	addiu	$v0,$v0,1
ffffffff9403fd10:	3042003f 	andi	$v0,$v0,0x3f
ffffffff9403fd14:	ad420000 	sw	$v0,0($t2)
ffffffff9403fd18:	8d630000 	lw	$v1,0($t3)
ffffffff9403fd1c:	8c620000 	lw	$v0,0($v1)
ffffffff9403fd20:	144f0005 	bne	$v0,$t7,ffffffff9403fd38 <timer_bh+0x30c>
ffffffff9403fd24:	00000000 	nop
ffffffff9403fd28:	25ad0001 	addiu	$t5,$t5,1
ffffffff9403fd2c:	2da20005 	sltiu	$v0,$t5,5
ffffffff9403fd30:	1440ffab 	bnez	$v0,ffffffff9403fbe0 <timer_bh+0x1b4>
ffffffff9403fd34:	256b0004 	addiu	$t3,$t3,4
ffffffff9403fd38:	3c02941e 	lui	$v0,0x941e
ffffffff9403fd3c:	8c42c670 	lw	$v0,-14736($v0)
ffffffff9403fd40:	000210c0 	sll	$v0,$v0,0x3
ffffffff9403fd44:	3c10941e 	lui	$s0,0x941e
ffffffff9403fd48:	2610c674 	addiu	$s0,$s0,-14732
ffffffff9403fd4c:	02028021 	addu	$s0,$s0,$v0
ffffffff9403fd50:	8e050000 	lw	$a1,0($s0)
ffffffff9403fd54:	10b00020 	beq	$a1,$s0,ffffffff9403fdd8 <timer_bh+0x3ac>
ffffffff9403fd58:	00000000 	nop
ffffffff9403fd5c:	8e220000 	lw	$v0,0($s1)
ffffffff9403fd60:	8ca30008 	lw	$v1,8($a1)
ffffffff9403fd64:	00431023 	subu	$v0,$v0,$v1
ffffffff9403fd68:	0440001b 	bltz	$v0,ffffffff9403fdd8 <timer_bh+0x3ac>
ffffffff9403fd6c:	00000000 	nop
ffffffff9403fd70:	8ca60010 	lw	$a2,16($a1)
ffffffff9403fd74:	8ca30000 	lw	$v1,0($a1)
ffffffff9403fd78:	10600004 	beqz	$v1,ffffffff9403fd8c <timer_bh+0x360>
ffffffff9403fd7c:	8ca4000c 	lw	$a0,12($a1)
ffffffff9403fd80:	8ca20004 	lw	$v0,4($a1)
ffffffff9403fd84:	ac620004 	sw	$v0,4($v1)
ffffffff9403fd88:	ac430000 	sw	$v1,0($v0)
ffffffff9403fd8c:	aca00004 	sw	$zero,4($a1)
ffffffff9403fd90:	aca00000 	sw	$zero,0($a1)
ffffffff9403fd94:	40016000 	mfc0	$at,$12
ffffffff9403fd98:	00000000 	nop
ffffffff9403fd9c:	3421001f 	ori	$at,$at,0x1f
ffffffff9403fda0:	3821001e 	xori	$at,$at,0x1e
ffffffff9403fda4:	40816000 	mtc0	$at,$12
ffffffff9403fda8:	00c0f809 	jalr	$a2
ffffffff9403fdac:	00000000 	nop
ffffffff9403fdb0:	40016000 	mfc0	$at,$12
ffffffff9403fdb4:	00000000 	nop
ffffffff9403fdb8:	34210001 	ori	$at,$at,0x1
ffffffff9403fdbc:	38210001 	xori	$at,$at,0x1
ffffffff9403fdc0:	40816000 	mtc0	$at,$12
	...
(skip)
...
===========================================================================

Thanks.

-- 
Best Regards,

Lee
/*
 *  linux/kernel/timer.c
 *
 *  Kernel internal timers, kernel timekeeping, basic process system calls
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 *
 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
 *
 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
 *              serialize accesses to xtime/lost_ticks).
 *                              Copyright (C) 1998  Andrea Arcangeli
 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
 */

#include <linux/config.h>
#include <linux/mm.h>
#include <linux/timex.h>
#include <linux/delay.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/hrtime.h>
#include <linux/compiler.h>
#include <asm/signal.h>

#include <linux/trace.h>
#include <asm/uaccess.h>
#ifdef CONFIG_TIMER_STATS
#include <asm/timepeg.h>
#else
#define TP_START(a)
#define TP_STOP(a)
#define DTIMEPEG(a,b)
#endif

/*
 * Timekeeping variables
 */

long tick = (1000000 + HZ/2) / HZ;	/* timer interrupt period */

/* The current time */
struct timeval xtime __attribute__ ((aligned (16)));
/*
 * This spinlock protect us from races in SMP while playing with xtime. -arca
 */
rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
/*
 * This atomic prevents re-entry of the run_timer_list and has the side
 * effect of shifting conflict runs to the "owning" cpu.
 */

static atomic_t timer_tasklet_lock = ATOMIC_INIT(-1);

/* Don't completely fail for HZ > 500.  */
int tickadj = 500/HZ ? : 1;		/* microsecs */

DECLARE_TASK_QUEUE(tq_timer);
DECLARE_TASK_QUEUE(tq_immediate);

/*
 * phase-lock loop variables
 */
/* TIME_ERROR prevents overwriting the CMOS clock */
int time_state = TIME_OK;		/* clock synchronization status	*/
int time_status = STA_UNSYNC;		/* clock status bits		*/
long time_offset;			/* time adjustment (us)		*/
long time_constant = 2;			/* pll time constant		*/
long time_tolerance = MAXFREQ;		/* frequency tolerance (ppm)	*/
long time_precision = 1;		/* clock precision (us)		*/
long time_maxerror = NTP_PHASE_LIMIT;	/* maximum error (us)		*/
long time_esterror = NTP_PHASE_LIMIT;	/* estimated error (us)		*/
long time_phase;			/* phase offset (scaled us)	*/
long time_freq = ((1000000 + HZ/2) % HZ - HZ/2) << SHIFT_USEC;
					/* frequency offset (scaled ppm)*/
long time_adj;				/* tick adjust (scaled 1 / HZ)	*/
long time_reftime;			/* time at last adjustment (s)	*/

long time_adjust;
long time_adjust_step;

unsigned long event;

extern int do_setitimer(int, struct itimerval *, struct itimerval *);

volatile u64 jiffies_64=0;

unsigned int * prof_buffer;
unsigned long prof_len;
unsigned long prof_shift;

/*
 * Event timer code
 */
#ifdef CONFIG_HIGH_RES_TIMERS
/*
 * ifdef eliminator macro...
 */
#define IF_HIGH_RES(a) a
#ifndef CONFIG_NEW_TIMERLIST
#define CONFIG_NEW_TIMERLIST
#endif
#else
#define IF_HIGH_RES(a)
#endif
#if ! defined(CONFIG_CASCADING_TIMERS) && ! defined(CONFIG_NEW_TIMERLIST)
#define CONFIG_CASCADING_TIMERS
#endif /* CONFIG_HIGH_RES_TIMERS */
#define TVN_BITS 6
#define TVR_BITS 8
#define TVN_SIZE (1 << TVN_BITS)
#define TVR_SIZE (1 << TVR_BITS)
#define TVN_MASK (TVN_SIZE - 1)
#define TVR_MASK (TVR_SIZE - 1)
#ifdef CONFIG_NEW_TIMERLIST
#define IF_NEW_TIMER_LIST(a) a
#ifndef CONFIG_NEW_TIMER_LISTSIZE
#define CONFIG_NEW_TIMER_LISTSIZE 512
#endif /* CONFIG_NEW_TIMERLIST */
#define NEW_TVEC_SIZE CONFIG_NEW_TIMER_LISTSIZE
#define NEW_TVEC_MASK (NEW_TVEC_SIZE - 1)
static struct list_head new_tvec[NEW_TVEC_SIZE];
#else  /* CONFIG_NEW_TIMERLIST */
#define IF_NEW_TIMER_LIST(a)
#endif /* CONFIG_NEW_TIMERLIST */
#ifdef CONFIG_CASCADING_TIMERS
struct timer_vec {
	int index;
	struct list_head vec[TVN_SIZE];
};

struct timer_vec_root {
	int index;
	struct list_head vec[TVR_SIZE];
};

static struct timer_vec tv5;
static struct timer_vec tv4;
static struct timer_vec tv3;
static struct timer_vec tv2;
static struct timer_vec_root tv1;

static struct timer_vec * const tvecs[] = {
	(struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
};

#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
#endif /* CONFIG_CASCADING_TIMERS */

void init_timervecs (void)
{
	int i;

#ifdef CONFIG_CASCADING_TIMERS
	for (i = 0; i < TVN_SIZE; i++) {
		INIT_LIST_HEAD(tv5.vec + i);
		INIT_LIST_HEAD(tv4.vec + i);
		INIT_LIST_HEAD(tv3.vec + i);
		INIT_LIST_HEAD(tv2.vec + i);
	}
	for (i = 0; i < TVR_SIZE; i++)
		INIT_LIST_HEAD(tv1.vec + i);
#endif /* CONFIG_CASCADING_TIMERS */
#ifdef CONFIG_NEW_TIMERLIST
        for (i = 0; i < NEW_TVEC_SIZE; i++)
                INIT_LIST_HEAD( new_tvec + i);
#endif /* CONFIG_NEW_TIMERLIST */
}

static unsigned long timer_jiffies;

/* Initialize both explicitly - let's try to have them in the same cache line */
spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;

#if defined( CONFIG_SMP) ||  defined(CONFIG_HIGH_RES_TIMERS)
volatile struct timer_list * volatile running_timer;
#define timer_enter(t) do { running_timer = t; mb(); } while (0)
#define timer_exit() do { running_timer = NULL; } while (0)
#define timer_is_inactive() (running_timer == NULL)
#else
#define timer_enter(t)		do { } while (0)
#define timer_exit()		do { } while (0)
#define timer_is_inactive()     1
#endif

#ifdef CONFIG_SMP
#define timer_is_running(t) (running_timer == t)
#define timer_synchronize(t) while (timer_is_running(t)) barrier()
#endif



static inline void internal_add_timer(struct timer_list *timer)
{
	/*
	 * must be cli-ed when calling this
	 */
#ifdef CONFIG_NEW_TIMERLIST
                unsigned long expires = timer->expires;
                IF_HIGH_RES(int sub_expires = timer->sub_expires;)
                int indx;
                struct list_head *pos,*list_start;

                if ( time_before(expires, timer_jiffies) ){
                        /*
                         * already expired, schedule for next tick 
                         * would like to do better here
                         * Actually this now works just fine with the
                         * back up of timer_jiffies in "run_timer_list".
                         * Note that this puts the timer on a list other
                         * than the one it idexes to.  We don't want to
                         * change the expires value in the timer as it is
                         * used by the repeat code in setitimer and the
                         * POSIX timers code.
                         */
                        expires = timer_jiffies;
                        IF_HIGH_RES(sub_expires = 0);
                }
                        
                indx =  expires & NEW_TVEC_MASK;
                if ((expires - timer_jiffies) <= NEW_TVEC_SIZE) {
#ifdef CONFIG_HIGH_RES_TIMERS
                        unsigned long jiffies_f;
                        /*
                         * The high diff bits are the same, goes to the head of 
                         * the list, sort on sub_expire.
                         */
                        for (pos = (list_start = &new_tvec[indx])->next; 
                             pos != list_start; 
                             pos = pos->next){
                                struct timer_list *tmr = 
                                        list_entry(pos,
                                                   struct timer_list,
                                                   list);
                                if ((tmr->sub_expires >= sub_expires) ||
                                    (tmr->expires != expires)){
                                        break;
                                }
                        }
                        list_add_tail(&timer->list, pos);
                        /*
                         * Notes to me.  Use jiffies here instead of 
                         * timer_jiffies to prevent adding unneeded interrupts.
                         * Timer_is_inactive() is false if we are currently 
                         * activly dispatching timers.  Since we are under
                         * the same spin lock, it being false means that 
                         * it has dropped the spinlock to call the timer
                         * function, which could well be who called us.
                         * In any case, we don't need a new interrupt as
                         * the timer dispach code (run_timer_list) will
                         * pick this up when the function it is calling 
                         * returns.
                         */
                        if ( expires == (jiffies_f = jiffies) && 
                             list_start->next == &timer->list &&
                             timer_is_inactive()) {
                             schedule_next_int(jiffies_f, sub_expires,1);
                        }
#else
                        pos = (&new_tvec[indx])->next;
                        list_add_tail(&timer->list, pos);
#endif /* CONFIG_HIGH_RES_TIMERS */
                }else{
                        /*
                         * The high diff bits differ, search from the tail
                         * The for will pick up an empty list.
                         */
                         for (pos = (list_start = &new_tvec[indx])->prev; 
                             pos != list_start; 
                             pos = pos->prev){
                                struct timer_list *tmr = 
                                        list_entry(pos,
                                                   struct timer_list,
                                                   list);
                                if (time_after(tmr->expires, expires)){
                                        continue;
                                }
                                IF_HIGH_RES(
                                            if ((tmr->expires != expires) ||
                                                (tmr->sub_expires < sub_expires)) {
                                                    break;
                                            }
                                            )
                        }
                        list_add(&timer->list, pos);
                }
                                
#endif /* CONFIG_NEW_TIMERLIST */
#ifdef CONFIG_CASCADING_TIMERS
	unsigned long expires = timer->expires;
	unsigned long idx = expires - timer_jiffies;
	struct list_head * vec;

	if (idx < TVR_SIZE) {
		int i = expires & TVR_MASK;
		vec = tv1.vec + i;
	} else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
		int i = (expires >> TVR_BITS) & TVN_MASK;
		vec = tv2.vec + i;
	} else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
		int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
		vec =  tv3.vec + i;
	} else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
		int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
		vec = tv4.vec + i;
	} else if ((signed long) idx < 0) {
		/* can happen if you add a timer with expires == jiffies,
		 * or you set a timer to go off in the past
		 */
		vec = tv1.vec + tv1.index;
	} else if (idx <= 0xffffffffUL) {
		int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
		vec = tv5.vec + i;
	} else {
		/* Can only get here on architectures with 64-bit jiffies */
		INIT_LIST_HEAD(&timer->list);
		return;
	}
	/*
	 * Timers are FIFO!
	 */
	list_add(&timer->list, vec->prev);
#endif /* CONFIG_CASCADING_TIMERS */
}

void add_timer(struct timer_list *timer)
{
	unsigned long flags;

	spin_lock_irqsave(&timerlist_lock, flags);
	if (timer_pending(timer))
		goto bug;
	internal_add_timer(timer);
	spin_unlock_irqrestore(&timerlist_lock, flags);
	return;
bug:
	spin_unlock_irqrestore(&timerlist_lock, flags);
	printk("bug: kernel timer added twice at %p.\n",
			__builtin_return_address(0));
}

static inline int detach_timer (struct timer_list *timer)
{
	if (!timer_pending(timer)){
		return 0;
        }
	list_del(&timer->list);
	return 1;
}

int mod_timer(struct timer_list *timer, unsigned long expires)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&timerlist_lock, flags);
	timer->expires = expires;
	ret = detach_timer(timer);
	internal_add_timer(timer);
	spin_unlock_irqrestore(&timerlist_lock, flags);
	return ret;
}
#ifdef CONFIG_HIGH_RES_TIMERS
int mod_timer_hr(struct timer_list *timer, 
                 unsigned long expires,
                 long sub_expires)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&timerlist_lock, flags);
	timer->expires = expires;
        timer->sub_expires = sub_expires;
	ret = detach_timer(timer);
	internal_add_timer(timer);
	spin_unlock_irqrestore(&timerlist_lock, flags);
	return ret;
}
#endif /* CONFIG_HIGH_RES_TIMERS */

int del_timer(struct timer_list * timer)
{
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&timerlist_lock, flags);
	ret = detach_timer(timer);
	timer->list.next = timer->list.prev = NULL;
	spin_unlock_irqrestore(&timerlist_lock, flags);
	return ret;
}

#ifdef CONFIG_SMP
void sync_timers(void)
{
	spin_unlock_wait(&global_bh_lock);
}

/*
 * SMP specific function to delete periodic timer.
 * Caller must disable by some means restarting the timer
 * for new. Upon exit the timer is not queued and handler is not running
 * on any CPU. It returns number of times, which timer was deleted
 * (for reference counting).
 */

int del_timer_sync(struct timer_list * timer)
{
	int ret = 0;

	for (;;) {
		unsigned long flags;
		int running;

		spin_lock_irqsave(&timerlist_lock, flags);
		ret += detach_timer(timer);
		timer->list.next = timer->list.prev = 0;
		running = timer_is_running(timer);
		spin_unlock_irqrestore(&timerlist_lock, flags);

		if (!running)
			break;

		timer_synchronize(timer);
	}

	return ret;
}
#endif

#ifdef CONFIG_CASCADING_TIMERS

static inline void cascade_timers(struct timer_vec *tv)
{
	/* cascade all the timers from tv up one level */
	struct list_head *head, *curr, *next;

	head = tv->vec + tv->index;
	curr = head->next;
	/*
	 * We are removing _all_ timers from the list, so we don't  have to
	 * detach them individually, just clear the list afterwards.
	 */
	while (curr != head) {
		struct timer_list *tmp;

		tmp = list_entry(curr, struct timer_list, list);
		next = curr->next;
		list_del(curr); // not needed
		internal_add_timer(tmp);
		curr = next;
	}
	INIT_LIST_HEAD(head);
	tv->index = (tv->index + 1) & TVN_MASK;
}
#endif /* CONFIG_CASCADING_TIMERS */

static inline void run_timer_list(void)
{
        IF_HIGH_RES( unsigned long jiffies_f;
                     long sub_jiff = -1;
                     long sub_jiffie_f);
	spin_lock_irq(&timerlist_lock);
#ifdef CONFIG_HIGH_RES_TIMERS
        read_lock(&xtime_lock);
        jiffies_f = jiffies;
        sub_jiffie_f = sub_jiffie() + quick_get_cpuctr();
        read_unlock(&xtime_lock);
        while ( unlikely(sub_jiffie_f >= cycles_per_jiffies)){
                sub_jiffie_f -= cycles_per_jiffies;
                jiffies_f++;
        }
	while ((long)(jiffies_f - timer_jiffies) >= 0) {
#else
	while ((long)(jiffies - timer_jiffies) >= 0) {
#endif /* CONFIG_HIGH_RES_TIMERS */
		struct list_head *head, *curr;
#ifdef CONFIG_CASCADING_TIMERS
		if (!tv1.index) {
			int n = 1;
			do {
				cascade_timers(tvecs[n]);
			} while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
		}

		head = tv1.vec + tv1.index;
#endif /* CONFIG_CASCADING_TIMERS */
                IF_NEW_TIMER_LIST(
                                  head = new_tvec + 
                                  (timer_jiffies  & NEW_TVEC_MASK);
                                  )
                /*
                 * Note that we never move "head" but continue to
                 * pick the first entry from it.  This allows new
                 * entries to be inserted while we unlock for the
                 * function call below.
                 */
repeat:
		curr = head->next;
		if (curr != head) {
			struct timer_list *timer;
			void (*fn)(unsigned long);
			unsigned long data;

			timer = list_entry(curr, struct timer_list, list);
#ifdef CONFIG_HIGH_RES_TIMERS
                        /*
                         * This would be simpler if we never got behind
                         * i.e. if timer_jiffies == jiffies, we could
                         * drop one of the tests.  Since we may get 
                         * behind, (in fact we don't up date until
                         * we are behind to allow sub_jiffie entries)
                         * we need a way to negate the sub_jiffie
                         * test in that case...
                         */
                        if (time_before(timer->expires, jiffies_f)||
                            ((timer->expires == jiffies_f) &&
                             timer->sub_expires <= sub_jiffie_f))
#else
                        if (time_before_eq(timer->expires, jiffies))
#endif /*  CONFIG_HIGH_RES_TIMERS */
                                {fn = timer->function;
 			data= timer->data;

			detach_timer(timer);
			timer->list.next = timer->list.prev = NULL;
			timer_enter(timer);
			spin_unlock_irq(&timerlist_lock);
			fn(data);
			spin_lock_irq(&timerlist_lock);
			timer_exit();
			goto repeat;
		}
#ifdef CONFIG_HIGH_RES_TIMERS
                        else{
                                /*
                                 * The new timer list is not always emptied
                                 * here as it contains:
                                 * a.) entries (list size)^N*jiffies out and
                                 * b.) entries that match in jiffies, but have
                                 *     sub_expire times further out than now.
                                 */
                                 if (timer->expires == jiffies_f ){
                                        sub_jiff = timer->sub_expires;
                                }
                        }
#endif /* CONFIG_HIGH_RES_TIMERS */
                }
		++timer_jiffies; 
#ifdef CONFIG_CASCADING_TIMERS
		tv1.index = (tv1.index + 1) & TVR_MASK;
#endif
	}
        /*
         * It is faster to back out the last bump, than to prevent it.
         * This allows zero time inserts as well as sub_jiffie values in
         * the current jiffie.  Will not work for the cascade as tv1.index
         * also needs adjusting. 
         */
        IF_NEW_TIMER_LIST(--timer_jiffies);

        IF_HIGH_RES(if (schedule_next_int( jiffies_f, sub_jiff, 0)){
                /*
                 * If schedule_next_int says the time has passed
                 * bump the tasklet lock so we go round again
                 */
                atomic_inc(&timer_tasklet_lock);
                });

	spin_unlock_irq(&timerlist_lock);
}

spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;

void tqueue_bh(void)
{
	run_task_queue(&tq_timer);
}

void immediate_bh(void)
{
	run_task_queue(&tq_immediate);
}

/*
 * this routine handles the overflow of the microsecond field
 *
 * The tricky bits of code to handle the accurate clock support
 * were provided by Dave Mills ([email protected]) of NTP fame.
 * They were originally developed for SUN and DEC kernels.
 * All the kudos should go to Dave for this stuff.
 *
 */
static void second_overflow(void)
{
    long ltemp;

    /* Bump the maxerror field */
    time_maxerror += time_tolerance >> SHIFT_USEC;
    if ( time_maxerror > NTP_PHASE_LIMIT ) {
	time_maxerror = NTP_PHASE_LIMIT;
	time_status |= STA_UNSYNC;
    }

    /*
     * Leap second processing. If in leap-insert state at
     * the end of the day, the system clock is set back one
     * second; if in leap-delete state, the system clock is
     * set ahead one second. The microtime() routine or
     * external clock driver will insure that reported time
     * is always monotonic. The ugly divides should be
     * replaced.
     */
    switch (time_state) {

    case TIME_OK:
	if (time_status & STA_INS)
	    time_state = TIME_INS;
	else if (time_status & STA_DEL)
	    time_state = TIME_DEL;
	break;

    case TIME_INS:
	if (xtime.tv_sec % 86400 == 0) {
	    xtime.tv_sec--;
	    time_state = TIME_OOP;
	    printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
	}
	break;

    case TIME_DEL:
	if ((xtime.tv_sec + 1) % 86400 == 0) {
	    xtime.tv_sec++;
	    time_state = TIME_WAIT;
	    printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
	}
	break;

    case TIME_OOP:
	time_state = TIME_WAIT;
	break;

    case TIME_WAIT:
	if (!(time_status & (STA_INS | STA_DEL)))
	    time_state = TIME_OK;
    }

    /*
     * Compute the phase adjustment for the next second. In
     * PLL mode, the offset is reduced by a fixed factor
     * times the time constant. In FLL mode the offset is
     * used directly. In either mode, the maximum phase
     * adjustment for each second is clamped so as to spread
     * the adjustment over not more than the number of
     * seconds between updates.
     */
    if (time_offset < 0) {
	ltemp = -time_offset;
	if (!(time_status & STA_FLL))
	    ltemp >>= SHIFT_KG + time_constant;
	if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
	    ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
	time_offset += ltemp;
	time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
    } else {
	ltemp = time_offset;
	if (!(time_status & STA_FLL))
	    ltemp >>= SHIFT_KG + time_constant;
	if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
	    ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
	time_offset -= ltemp;
	time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
    }

    /*
     * Compute the frequency estimate and additional phase
     * adjustment due to frequency error for the next
     * second. When the PPS signal is engaged, gnaw on the
     * watchdog counter and update the frequency computed by
     * the pll and the PPS signal.
     */
    pps_valid++;
    if (pps_valid == PPS_VALID) {	/* PPS signal lost */
	pps_jitter = MAXTIME;
	pps_stabil = MAXFREQ;
	time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
			 STA_PPSWANDER | STA_PPSERROR);
    }
    ltemp = time_freq + pps_freq;
    if (ltemp < 0)
	time_adj -= -ltemp >>
	    (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
    else
	time_adj += ltemp >>
	    (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);

#if HZ == 100
    /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
     * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
     */
    if (time_adj < 0)
	time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
    else
	time_adj += (time_adj >> 2) + (time_adj >> 5);
#endif
}

/* in the NTP reference this is called "hardclock()" */
static void update_wall_time_one_tick(void)
{
	if ( (time_adjust_step = time_adjust) != 0 ) {
	    /* We are doing an adjtime thing. 
	     *
	     * Prepare time_adjust_step to be within bounds.
	     * Note that a positive time_adjust means we want the clock
	     * to run faster.
	     *
	     * Limit the amount of the step to be in the range
	     * -tickadj .. +tickadj
	     */
	     if (time_adjust > tickadj)
		time_adjust_step = tickadj;
	     else if (time_adjust < -tickadj)
		time_adjust_step = -tickadj;
	     
	    /* Reduce by this step the amount of time left  */
	    time_adjust -= time_adjust_step;
	}
	xtime.tv_usec += tick + time_adjust_step;
	/*
	 * Advance the phase, once it gets to one microsecond, then
	 * advance the tick more.
	 */
	time_phase += time_adj;
	if (time_phase <= -FINEUSEC) {
		long ltemp = -time_phase >> SHIFT_SCALE;
		time_phase += ltemp << SHIFT_SCALE;
		xtime.tv_usec -= ltemp;
	}
	else if (time_phase >= FINEUSEC) {
		long ltemp = time_phase >> SHIFT_SCALE;
		time_phase -= ltemp << SHIFT_SCALE;
		xtime.tv_usec += ltemp;
	}
}

/*
 * Using a loop looks inefficient, but "ticks" is
 * usually just one (we shouldn't be losing ticks,
 * we're doing this this way mainly for interrupt
 * latency reasons, not because we think we'll
 * have lots of lost timer ticks
 */
static void update_wall_time(unsigned long ticks)
{
	do {
		ticks--;
		update_wall_time_one_tick();
	} while (ticks);

	if (xtime.tv_usec >= 1000000) {
	    xtime.tv_usec -= 1000000;
	    xtime.tv_sec++;
	    second_overflow();
	}
}

static inline void do_process_times(struct task_struct *p,
	unsigned long user, unsigned long system)
{
	unsigned long psecs;

	psecs = (p->times.tms_utime += user);
	psecs += (p->times.tms_stime += system);
	if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
		/* Send SIGXCPU every second.. */
		if (!(psecs % HZ))
			send_sig(SIGXCPU, p, 1);
		/* and SIGKILL when we go over max.. */
		if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
			send_sig(SIGKILL, p, 1);
	}
}

static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
{
	unsigned long it_virt = p->it_virt_value;

	if (it_virt) {
		it_virt -= ticks;
		if (!it_virt) {
			it_virt = p->it_virt_incr;
			send_sig(SIGVTALRM, p, 1);
		}
		p->it_virt_value = it_virt;
	}
}

static inline void do_it_prof(struct task_struct *p)
{
	unsigned long it_prof = p->it_prof_value;

	if (it_prof) {
		if (--it_prof == 0) {
			it_prof = p->it_prof_incr;
			send_sig(SIGPROF, p, 1);
		}
		p->it_prof_value = it_prof;
	}
}

void update_one_process(struct task_struct *p, unsigned long user,
			unsigned long system, int cpu)
{
	p->per_cpu_utime[cpu] += user;
	p->per_cpu_stime[cpu] += system;
	do_process_times(p, user, system);
	do_it_virt(p, user);
	do_it_prof(p);
}	

/*
 * Called from the timer interrupt handler to charge one tick to the current 
 * process.  user_tick is 1 if the tick is user time, 0 for system.
 */
void update_process_times(int user_tick)
{
	struct task_struct *p = current;
	int cpu = smp_processor_id(), system = user_tick ^ 1;

	update_one_process(p, user_tick, system, cpu);
	if (p->pid) {
#ifdef CONFIG_RTSCHED
                /* SCHED_FIFO and the idle(s) have counters set to -100, 
                 * so we won't count them, seems like a good idea for 
                 * both schedulers, but, being pure...
                 */
		if (p->counter >= 0 && --p->counter <= 0) {
#else
		if (--p->counter <= 0) {
#endif
			p->counter = 0;
			p->need_resched = 1;
		}
		if (p->nice > 0)
			kstat.per_cpu_nice[cpu] += user_tick;
		else
			kstat.per_cpu_user[cpu] += user_tick;
		kstat.per_cpu_system[cpu] += system;
	} else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
		kstat.per_cpu_system[cpu] += system;
}

/*
 * Nr of active tasks - counted in fixed-point numbers
 */
static unsigned long count_active_tasks(void)
{
	struct task_struct *p;
	unsigned long nr = 0;

	read_lock(&tasklist_lock);
	for_each_task(p) {
		if ((p->state == TASK_RUNNING ||
		     (p->state & TASK_UNINTERRUPTIBLE)))
			nr += FIXED_1;
	}
	read_unlock(&tasklist_lock);
	return nr;
}

/*
 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
 * imply that avenrun[] is the standard name for this kind of thing.
 * Nothing else seems to be standardized: the fractional size etc
 * all seem to differ on different machines.
 */
unsigned long avenrun[3];

static inline void calc_load(unsigned long ticks)
{
	unsigned long active_tasks; /* fixed-point */
	static int count = LOAD_FREQ;

	count -= ticks;
	if (count < 0) {
		count += LOAD_FREQ;
		active_tasks = count_active_tasks();
		CALC_LOAD(avenrun[0], EXP_1, active_tasks);
		CALC_LOAD(avenrun[1], EXP_5, active_tasks);
		CALC_LOAD(avenrun[2], EXP_15, active_tasks);
	}
}

/* jiffies at the most recent update of wall time */
unsigned long wall_jiffies;


static inline unsigned long update_times(void)
{
	unsigned long ticks;

	/*
	 * update_times() is run from the raw timer_bh handler so we
	 * just know that the irqs are locally enabled and so we don't
	 * need to save/restore the flags of the local CPU here. -arca
	 */
	write_lock_irq(&xtime_lock);

	ticks = jiffies - wall_jiffies;
	if (ticks) {
		wall_jiffies += ticks;
		update_wall_time(ticks);
	}
	write_unlock_irq(&xtime_lock);
	return ticks; /* This is dum.  change calc_load to a timer */

}
#ifdef CONFIG_HIGH_RES_TIMERS
void update_real_wall_time(void)
{
       unsigned long ticks;
       /*
        * To get the time of day really right, we need to make sure 
        * every one is on the same jiffie. (Because of adj_time, etc.)
        * So we provide this for the high res code.  Must be called 
        * under the write(xtime_lock).  (External locking allows the
        * caller to include sub jiffies in the lock region.)
        */
        ticks = jiffies - wall_jiffies;
        if (ticks) {
               wall_jiffies += ticks;
               update_wall_time(ticks);
        }
}
#endif /* CONFIG_HIGH_RES_TIMERS */
void timer_bh(void)
{
       unsigned long ticks;
	TRACE_EVENT(TRACE_EV_KERNEL_TIMER, NULL);
	printk("haha-1\n");
    ticks = update_times();
	printk("haha-2\n");
    calc_load(ticks);
	printk("haha-3\n");
	run_timer_list();
}

/*
 * timer_tasklet_lock starts at -1.  0, then means it is cool to continue.
 * If another cpu bumps it while the first is still in timer_bh, it will
 * be detected on exit, and we will run it again.  But multiple entries
 * are not needed, just once for all the "hits" while we are in timer_bh.
 */
void timer_softirq(struct softirq_action* a)
{

         if ( ! atomic_inc_and_test(&timer_tasklet_lock)){
                 //kgdb_ts(atomic_read(&timer_tasklet_lock),0);
                 return;
         }
         do {
                 //kgdb_ts(atomic_read(&timer_tasklet_lock),1);
                 atomic_set(&timer_tasklet_lock,0);
                 timer_bh();
	//	 mark_bh(TIMER_BH);
         }while ( ! atomic_add_negative(-1,&timer_tasklet_lock));
 
         //kgdb_ts(atomic_read(&timer_tasklet_lock),2);
 }

void do_timer(struct pt_regs *regs)
{
	update_jiffies();
#ifndef CONFIG_SMP
	/* SMP process accounting uses the local APIC timer */
	IF_HIGH_RES( if (new_jiffie()))
	                 update_process_times(user_mode(regs));

#endif
	/*mark_bh(TIMER_BH);*/
        raise_softirq( RUN_TIMER_LIST );
	if (TQ_ACTIVE(tq_timer))
		mark_bh(TQUEUE_BH);
}

#if !defined(__alpha__) && !defined(__ia64__)

/*
 * For backwards compatibility?  This can be done in libc so Alpha
 * and all newer ports shouldn't need it.
 */
asmlinkage unsigned long sys_alarm(unsigned int seconds)
{
	struct itimerval it_new, it_old;
	unsigned int oldalarm;

	it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
	it_new.it_value.tv_sec = seconds;
	it_new.it_value.tv_usec = 0;
	do_setitimer(ITIMER_REAL, &it_new, &it_old);
	oldalarm = it_old.it_value.tv_sec;
	/* ehhh.. We can't return 0 if we have an alarm pending.. */
	/* And we'd better return too much than too little anyway */
	if (it_old.it_value.tv_usec)
		oldalarm++;
	return oldalarm;
}

#endif

#ifndef __alpha__

/*
 * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
 * should be moved into arch/i386 instead?
 */
 
asmlinkage long sys_getpid(void)
{
	/* This is SMP safe - current->pid doesn't change */
	return current->tgid;
}

/*
 * This is not strictly SMP safe: p_opptr could change
 * from under us. However, rather than getting any lock
 * we can use an optimistic algorithm: get the parent
 * pid, and go back and check that the parent is still
 * the same. If it has changed (which is extremely unlikely
 * indeed), we just try again..
 *
 * NOTE! This depends on the fact that even if we _do_
 * get an old value of "parent", we can happily dereference
 * the pointer: we just can't necessarily trust the result
 * until we know that the parent pointer is valid.
 *
 * The "mb()" macro is a memory barrier - a synchronizing
 * event. It also makes sure that gcc doesn't optimize
 * away the necessary memory references.. The barrier doesn't
 * have to have all that strong semantics: on x86 we don't
 * really require a synchronizing instruction, for example.
 * The barrier is more important for code generation than
 * for any real memory ordering semantics (even if there is
 * a small window for a race, using the old pointer is
 * harmless for a while).
 */
asmlinkage long sys_getppid(void)
{
	int pid;
	struct task_struct * me = current;
	struct task_struct * parent;

	parent = me->p_opptr;
	for (;;) {
		pid = parent->pid;
#if CONFIG_SMP
{
		struct task_struct *old = parent;
		mb();
		parent = me->p_opptr;
		if (old != parent)
			continue;
}
#endif
		break;
	}
	return pid;
}

asmlinkage long sys_getuid(void)
{
	/* Only we change this so SMP safe */
	return current->uid;
}

asmlinkage long sys_geteuid(void)
{
	/* Only we change this so SMP safe */
	return current->euid;
}

asmlinkage long sys_getgid(void)
{
	/* Only we change this so SMP safe */
	return current->gid;
}

asmlinkage long sys_getegid(void)
{
	/* Only we change this so SMP safe */
	return  current->egid;
}

#endif

/* Thread ID - the internal kernel "pid" */
asmlinkage long sys_gettid(void)
{
	return current->pid;
}

#if 0  
// This #if 0 is to keep the pretty printer/ formatter happy so the indents will
// correct below.  
// The NANOSLEEP_ENTRY macro is defined in  asm/signal.h and
// is structured to allow code as well as entry definitions, so that when
// we get control back here the entry parameters will be available as expected.
// Some systems may find these paramerts in other ways than as entry parms, 
// for example, struct pt_regs *regs is defined in i386 as the address of the
// first parameter, where as other archs pass it as one of the paramerters.
asmlinkage long sys_nanosleep(void)
{
#endif
        NANOSLEEP_ENTRY(	struct timespec t;
                                unsigned long expire;)


                // The following code expects rqtp, rmtp to be available as a result of
                // the above macro.  Also any regs needed for the _do_signal() macro 
                // shoule be set up here.
	if(copy_from_user(&t, rqtp, sizeof(struct timespec)))
		return -EFAULT;

	if (t.tv_nsec >= 1000000000L || t.tv_nsec < 0 || t.tv_sec < 0)
		return -EINVAL;


	if (t.tv_sec == 0 && t.tv_nsec <= 2000000L &&
	    current->policy != SCHED_OTHER)
	{
		/*
		 * Short delay requests up to 2 ms will be handled with
		 * high precision by a busy wait for all real-time processes.
		 *
		 * Its important on SMP not to do this holding locks.
		 */
		udelay((t.tv_nsec + 999) / 1000);
		return 0;
	}

	expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
	do {
	current->state = TASK_INTERRUPTIBLE;
        } while((expire = schedule_timeout(expire)) && !_do_signal());

	if (expire) {
		if (rmtp) {
			jiffies_to_timespec(expire, &t);
			if (copy_to_user(rmtp, &t, sizeof(struct timespec)))
				return -EFAULT;
		}
		return -EINTR;
	}
	return 0;
}


[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux