This is the mail archive of the systemtap@sourceware.org mailing list for the systemtap project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: loc2-test and stap disagree with each other


Roland McGrath wrote:
Here's the location list info

[43c0e6] variable
name "prev"
decl_file 1
decl_line 2867
type [42eaaf]
location location list [ 80ad5]

[ 80ad5] 0xc00000000036c540..0xc00000000036c540 [ 0] reg31
0xc00000000036c540..0xc00000000036c70c [ 0] reg27
0xc00000000036c790..0xc00000000036c7ec [ 0] reg31
0xc00000000036c9bc..0xc00000000036cbb0 [ 0] reg31
0xc00000000036cd54..0xc00000000036cdc8 [ 0] reg31
0xc00000000036cdcc..0xc00000000036ce70 [ 0] reg31
0xc00000000036d180..0xc00000000036d230 [ 0] reg31
0xc00000000036d248..0xc00000000036d278 [ 0] reg31
0xc00000000036d288..0xc00000000036d2c4 [ 0] reg31

Here's the assembly listing for schedule, probe is inserted at offset

The location list uses absolute addresses. You gave a disassembly listing that only gives addresses relative to the start of the function. Please post something with consistent use of offsets so it's possible to match up the addresses.


Thanks,
Roland
Here's the assembly list for schedule() with absolute address. The probe point should be at

0xc00000000036d048


c00000000036c490 <.schedule>: c00000000036c490: 7c 08 02 a6 mflr r0 c00000000036c494: fb c1 ff f0 std r30,-16(r1) c00000000036c498: fb e1 ff f8 std r31,-8(r1) c00000000036c49c: 7f e8 02 a6 mflr r31 c00000000036c4a0: fa 01 ff 80 std r16,-128(r1) c00000000036c4a4: fa 21 ff 88 std r17,-120(r1) c00000000036c4a8: fa 41 ff 90 std r18,-112(r1) c00000000036c4ac: fa 61 ff 98 std r19,-104(r1) c00000000036c4b0: fa 81 ff a0 std r20,-96(r1) c00000000036c4b4: fa a1 ff a8 std r21,-88(r1) c00000000036c4b8: fa c1 ff b0 std r22,-80(r1) c00000000036c4bc: fa e1 ff b8 std r23,-72(r1) c00000000036c4c0: fb 01 ff c0 std r24,-64(r1) c00000000036c4c4: fb 21 ff c8 std r25,-56(r1) c00000000036c4c8: fb 41 ff d0 std r26,-48(r1) c00000000036c4cc: fb 61 ff d8 std r27,-40(r1) c00000000036c4d0: fb 81 ff e0 std r28,-32(r1) c00000000036c4d4: fb a1 ff e8 std r29,-24(r1) c00000000036c4d8: f8 01 00 10 std r0,16(r1) c00000000036c4dc: f8 21 fe f1 stdu r1,-272(r1) c00000000036c4e0: eb c2 c1 b8 ld r30,-15944(r2) c00000000036c4e4: 60 00 00 00 nop c00000000036c4e8: e8 8d 01 98 ld r4,408(r13) c00000000036c4ec: e8 04 01 10 ld r0,272(r4) c00000000036c4f0: 2f a0 00 00 cmpdi cr7,r0,0 c00000000036c4f4: 40 9e 00 38 bne- cr7,c00000000036c52c <.schedule+0x9c> register unsigned long sp asm("r1");

	/* gcc4, at least, is smart enough to turn this into a single
	 * rlwinm for ppc32 and clrrdi for ppc64 */
	return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
c00000000036c4f8:	78 29 04 64 	rldicr  r9,r1,0,49
c00000000036c4fc:	e8 a9 00 16 	lwa     r5,20(r9)
c00000000036c500:	54 a0 18 7e 	rlwinm  r0,r5,3,1,31
c00000000036c504:	54 00 e8 3e 	rotlwi  r0,r0,29
c00000000036c508:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036c50c:	41 be 00 20 	beq+    cr7,c00000000036c52c <.schedule+0x9c>
c00000000036c510:	e8 c4 01 36 	lwa     r6,308(r4)
c00000000036c514:	e8 7e 81 18 	ld      r3,-32488(r30)
c00000000036c518:	38 84 03 04 	addi    r4,r4,772
c00000000036c51c:	4b cf 28 39 	bl      c00000000005ed54 <.printk>
c00000000036c520:	60 00 00 00 	nop
c00000000036c524:	4b ca 26 09 	bl      c00000000000eb2c <.dump_stack>
c00000000036c528:	60 00 00 00 	nop
c00000000036c52c:	7f e4 fb 78 	mr      r4,r31
c00000000036c530:	38 60 00 02 	li      r3,2
	register unsigned long sp asm("r1");

	/* gcc4, at least, is smart enough to turn this into a single
	 * rlwinm for ppc32 and clrrdi for ppc64 */
	return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
c00000000036c534:	78 30 04 64 	rldicr  r16,r1,0,49
c00000000036c538:	4b cf 30 11 	bl      c00000000005f548 <.profile_hit>
c00000000036c53c:	60 00 00 00 	nop
c00000000036c540:	eb 6d 01 98 	ld      r27,408(r13)
c00000000036c544:	e8 1b 00 2a 	lwa     r0,40(r27)
c00000000036c548:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036c54c:	41 bc 00 0c 	blt+    cr7,c00000000036c558 <.schedule+0xc8>
c00000000036c550:	48 00 34 f9 	bl      c00000000036fa48 <.__release_kernel_lock>
c00000000036c554:	60 00 00 00 	nop
c00000000036c558:	e9 2d 00 30 	ld      r9,48(r13)
c00000000036c55c:	e8 1e 80 00 	ld      r0,-32768(r30)
c00000000036c560:	7f 40 4a 14 	add     r26,r0,r9
c00000000036c564:	e8 1a 00 50 	ld      r0,80(r26)
c00000000036c568:	7f bb 00 00 	cmpd    cr7,r27,r0
c00000000036c56c:	40 be 00 24 	bne+    cr7,c00000000036c590 <.schedule+0x100>
c00000000036c570:	e8 1b 00 00 	ld      r0,0(r27)
c00000000036c574:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036c578:	41 9e 00 18 	beq-    cr7,c00000000036c590 <.schedule+0x100>
c00000000036c57c:	e8 7e 81 20 	ld      r3,-32480(r30)
c00000000036c580:	4b cf 27 d5 	bl      c00000000005ed54 <.printk>
c00000000036c584:	60 00 00 00 	nop
c00000000036c588:	4b ca 25 a5 	bl      c00000000000eb2c <.dump_stack>
c00000000036c58c:	60 00 00 00 	nop
c00000000036c590:	4b cb 53 89 	bl      c000000000021918 <.sched_clock>
c00000000036c594:	60 00 00 00 	nop
c00000000036c598:	e9 3b 00 60 	ld      r9,96(r27)
c00000000036c59c:	3c 00 3b 9a 	lis     r0,15258
c00000000036c5a0:	3e 20 3b 9a 	lis     r17,15258
c00000000036c5a4:	7c 75 1b 78 	mr      r21,r3
c00000000036c5a8:	60 00 c9 ff 	ori     r0,r0,51711
c00000000036c5ac:	62 31 ca 00 	ori     r17,r17,51712
c00000000036c5b0:	7d 29 18 50 	subf    r9,r9,r3
c00000000036c5b4:	7f a9 00 00 	cmpd    cr7,r9,r0
c00000000036c5b8:	41 9d 00 14 	bgt-    cr7,c00000000036c5cc <.schedule+0x13c>
c00000000036c5bc:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036c5c0:	3a 20 00 00 	li      r17,0
c00000000036c5c4:	41 9c 00 08 	blt-    cr7,c00000000036c5cc <.schedule+0x13c>
c00000000036c5c8:	7d 31 4b 78 	mr      r17,r9
c00000000036c5cc:	e9 3b 00 58 	ld      r9,88(r27)
c00000000036c5d0:	3c 00 00 98 	lis     r0,152
c00000000036c5d4:	60 00 96 80 	ori     r0,r0,38528
c00000000036c5d8:	7d 29 03 92 	divdu   r9,r9,r0
c00000000036c5dc:	38 00 00 64 	li      r0,100
c00000000036c5e0:	1d 29 00 0a 	mulli   r9,r9,10
c00000000036c5e4:	7e 89 03 92 	divdu   r20,r9,r0
c00000000036c5e8:	2f b4 00 00 	cmpdi   cr7,r20,0
c00000000036c5ec:	40 9e 00 08 	bne-    cr7,c00000000036c5f4 <.schedule+0x164>
c00000000036c5f0:	3a 80 00 01 	li      r20,1
c00000000036c5f4:	7f 43 d3 78 	mr      r3,r26
c00000000036c5f8:	48 00 32 c9 	bl      c00000000036f8c0 <._spin_lock_irq>
c00000000036c5fc:	60 00 00 00 	nop
c00000000036c600:	e8 1b 00 18 	ld      r0,24(r27)
c00000000036c604:	78 08 ef e3 	rldicl. r8,r0,61,63
c00000000036c608:	41 a2 00 0c 	beq+    c00000000036c614 <.schedule+0x184>
c00000000036c60c:	38 00 00 20 	li      r0,32
c00000000036c610:	f8 1b 00 00 	std     r0,0(r27)
c00000000036c614:	e8 1b 00 00 	ld      r0,0(r27)
c00000000036c618:	3a 5b 02 50 	addi    r18,r27,592
c00000000036c61c:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036c620:	41 9e 00 60 	beq-    cr7,c00000000036c680 <.schedule+0x1f0>
c00000000036c624:	e8 10 00 16 	lwa     r0,20(r16)
c00000000036c628:	78 09 27 e3 	rldicl. r9,r0,36,63
c00000000036c62c:	40 82 00 54 	bne-    c00000000036c680 <.schedule+0x1f0>
c00000000036c630:	e8 1b 00 00 	ld      r0,0(r27)
c00000000036c634:	3a 5b 02 48 	addi    r18,r27,584
c00000000036c638:	78 0a 07 e1 	clrldi. r10,r0,63
c00000000036c63c:	41 82 00 20 	beq-    c00000000036c65c <.schedule+0x1cc>
}

static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
	return test_ti_thread_flag(task_thread_info(tsk), flag);
c00000000036c640:	e9 3b 00 08 	ld      r9,8(r27)
/* Non-atomic versions */
static __inline__ int test_bit(unsigned long nr,
			       __const__ volatile unsigned long *addr)
{
	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
c00000000036c644:	e8 09 00 80 	ld      r0,128(r9)
c00000000036c648:	78 0b f7 e3 	rldicl. r11,r0,62,63
c00000000036c64c:	41 a2 00 10 	beq+    c00000000036c65c <.schedule+0x1cc>
c00000000036c650:	38 00 00 00 	li      r0,0
c00000000036c654:	f8 1b 00 00 	std     r0,0(r27)
c00000000036c658:	48 00 00 28 	b       c00000000036c680 <.schedule+0x1f0>
c00000000036c65c:	e8 1b 00 00 	ld      r0,0(r27)
c00000000036c660:	2f a0 00 02 	cmpdi   cr7,r0,2
c00000000036c664:	40 9e 00 10 	bne-    cr7,c00000000036c674 <.schedule+0x1e4>
c00000000036c668:	e9 3a 00 30 	ld      r9,48(r26)
c00000000036c66c:	39 29 00 01 	addi    r9,r9,1
c00000000036c670:	f9 3a 00 30 	std     r9,48(r26)
c00000000036c674:	7f 63 db 78 	mr      r3,r27
c00000000036c678:	7f 44 d3 78 	mr      r4,r26
c00000000036c67c:	4b ce 66 09 	bl      c000000000052c84 <.deactivate_task>
c00000000036c680:	e8 1a 00 08 	ld      r0,8(r26)
c00000000036c684:	a2 ed 00 0a 	lhz     r23,10(r13)
c00000000036c688:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036c68c:	40 be 03 30 	bne+    cr7,c00000000036c9bc <.schedule+0x52c>
c00000000036c690:	e8 1e 80 08 	ld      r0,-32760(r30)
c00000000036c694:	7a eb 4d a4 	rldicr  r11,r23,9,54
c00000000036c698:	e9 3e 80 00 	ld      r9,-32768(r30)
c00000000036c69c:	7d 6b 02 14 	add     r11,r11,r0
c00000000036c6a0:	e8 0b 00 30 	ld      r0,48(r11)
c00000000036c6a4:	7d 29 02 14 	add     r9,r9,r0
c00000000036c6a8:	eb a9 12 38 	ld      r29,4664(r9)
c00000000036c6ac:	48 00 01 08 	b       c00000000036c7b4 <.schedule+0x324>
c00000000036c6b0:	e8 1d 00 5e 	lwa     r0,92(r29)
c00000000036c6b4:	78 08 ff e3 	rldicl. r8,r0,63,63
c00000000036c6b8:	41 82 00 f8 	beq-    c00000000036c7b0 <.schedule+0x320>
c00000000036c6bc:	38 00 00 00 	li      r0,0
c00000000036c6c0:	90 01 00 70 	stw     r0,112(r1)
c00000000036c6c4:	60 00 00 00 	nop
c00000000036c6c8:	60 00 00 00 	nop
c00000000036c6cc:	e8 1d 00 5e 	lwa     r0,92(r29)
c00000000036c6d0:	78 09 cf e3 	rldicl. r9,r0,57,63
c00000000036c6d4:	41 82 00 0c 	beq-    c00000000036c6e0 <.schedule+0x250>
c00000000036c6d8:	38 00 00 01 	li      r0,1
c00000000036c6dc:	90 01 00 70 	stw     r0,112(r1)
c00000000036c6e0:	7f a3 eb 78 	mr      r3,r29
c00000000036c6e4:	7e e4 bb 78 	mr      r4,r23
c00000000036c6e8:	38 a1 00 78 	addi    r5,r1,120
c00000000036c6ec:	38 c0 00 02 	li      r6,2
c00000000036c6f0:	38 e1 00 70 	addi    r7,r1,112
c00000000036c6f4:	4b ce 74 15 	bl      c000000000053b08 <.find_busiest_group>
c00000000036c6f8:	2f a3 00 00 	cmpdi   cr7,r3,0
c00000000036c6fc:	41 9e 00 94 	beq-    cr7,c00000000036c790 <.schedule+0x300>
c00000000036c700:	38 80 00 02 	li      r4,2
c00000000036c704:	4b ce 73 2d 	bl      c000000000053a30 <.find_busiest_queue>
c00000000036c708:	7c 7f 1b 79 	mr.     r31,r3
c00000000036c70c:	41 82 00 84 	beq-    c00000000036c790 <.schedule+0x300>
c00000000036c710:	7f e0 d2 78 	xor     r0,r31,r26
c00000000036c714:	7c 00 00 74 	cntlzd  r0,r0
c00000000036c718:	78 00 d1 82 	rldicl  r0,r0,58,6
c00000000036c71c:	0b 00 00 00 	tdnei   r0,0
c00000000036c720:	e8 1f 00 08 	ld      r0,8(r31)
c00000000036c724:	2b a0 00 01 	cmpldi  cr7,r0,1
c00000000036c728:	40 9d 00 44 	ble-    cr7,c00000000036c76c <.schedule+0x2dc>
c00000000036c72c:	7f 43 d3 78 	mr      r3,r26
c00000000036c730:	7f e4 fb 78 	mr      r4,r31
c00000000036c734:	4b ce 92 ad 	bl      c0000000000559e0 <.double_lock_balance>
c00000000036c738:	e8 c1 00 78 	ld      r6,120(r1)
c00000000036c73c:	7f 43 d3 78 	mr      r3,r26
c00000000036c740:	7e e4 bb 78 	mr      r4,r23
c00000000036c744:	7f e5 fb 78 	mr      r5,r31
c00000000036c748:	7f a7 eb 78 	mr      r7,r29
c00000000036c74c:	39 00 00 02 	li      r8,2
c00000000036c750:	39 20 00 00 	li      r9,0
c00000000036c754:	4b ce 77 45 	bl      c000000000053e98 <.move_tasks>
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036c758:	7c 20 04 ac 	lwsync
c00000000036c75c:	2f a3 00 00 	cmpdi   cr7,r3,0
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036c760:	38 00 00 00 	li      r0,0
c00000000036c764:	90 1f 00 00 	stw     r0,0(r31)
c00000000036c768:	40 9e 00 20 	bne-    cr7,c00000000036c788 <.schedule+0x2f8>
c00000000036c76c:	80 01 00 70 	lwz     r0,112(r1)
c00000000036c770:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036c774:	40 9e 00 3c 	bne-    cr7,c00000000036c7b0 <.schedule+0x320>
c00000000036c778:	e8 1d 00 5e 	lwa     r0,92(r29)
c00000000036c77c:	78 08 cf e3 	rldicl. r8,r0,57,63
c00000000036c780:	41 82 00 30 	beq-    c00000000036c7b0 <.schedule+0x320>
c00000000036c784:	48 00 00 38 	b       c00000000036c7bc <.schedule+0x32c>
c00000000036c788:	90 1d 00 6c 	stw     r0,108(r29)
c00000000036c78c:	48 00 00 30 	b       c00000000036c7bc <.schedule+0x32c>
c00000000036c790:	80 01 00 70 	lwz     r0,112(r1)
c00000000036c794:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036c798:	40 9e 00 10 	bne-    cr7,c00000000036c7a8 <.schedule+0x318>
c00000000036c79c:	e8 1d 00 5e 	lwa     r0,92(r29)
c00000000036c7a0:	78 09 cf e3 	rldicl. r9,r0,57,63
c00000000036c7a4:	40 82 00 18 	bne-    c00000000036c7bc <.schedule+0x32c>
c00000000036c7a8:	38 00 00 00 	li      r0,0
c00000000036c7ac:	90 1d 00 6c 	stw     r0,108(r29)
c00000000036c7b0:	eb bd 00 00 	ld      r29,0(r29)
c00000000036c7b4:	2f bd 00 00 	cmpdi   cr7,r29,0
c00000000036c7b8:	40 9e fe f8 	bne+    cr7,c00000000036c6b0 <.schedule+0x220>
c00000000036c7bc:	e8 1a 00 08 	ld      r0,8(r26)
c00000000036c7c0:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036c7c4:	40 9e 06 14 	bne-    cr7,c00000000036cdd8 <.schedule+0x948>
c00000000036c7c8:	f8 1a 00 38 	std     r0,56(r26)
c00000000036c7cc:	60 00 00 00 	nop
c00000000036c7d0:	60 00 00 00 	nop
c00000000036c7d4:	60 00 00 00 	nop
c00000000036c7d8:	e8 1e 80 08 	ld      r0,-32760(r30)
c00000000036c7dc:	7a eb 4d a4 	rldicr  r11,r23,9,54
c00000000036c7e0:	39 40 00 00 	li      r10,0
c00000000036c7e4:	e9 3e 80 00 	ld      r9,-32768(r30)
c00000000036c7e8:	eb fa 00 50 	ld      r31,80(r26)
c00000000036c7ec:	7d 6b 02 14 	add     r11,r11,r0
c00000000036c7f0:	e8 0b 00 30 	ld      r0,48(r11)
c00000000036c7f4:	7d 29 02 14 	add     r9,r9,r0
c00000000036c7f8:	e9 29 12 38 	ld      r9,4664(r9)
c00000000036c7fc:	48 00 00 18 	b       c00000000036c814 <.schedule+0x384>
c00000000036c800:	e8 09 00 5e 	lwa     r0,92(r9)
c00000000036c804:	78 0b cf e3 	rldicl. r11,r0,57,63
c00000000036c808:	41 82 00 08 	beq-    c00000000036c810 <.schedule+0x380>
c00000000036c80c:	7d 2a 4b 78 	mr      r10,r9
c00000000036c810:	e9 29 00 00 	ld      r9,0(r9)
c00000000036c814:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036c818:	40 9e ff e8 	bne+    cr7,c00000000036c800 <.schedule+0x370>
c00000000036c81c:	2f aa 00 00 	cmpdi   cr7,r10,0
c00000000036c820:	41 9e 01 8c 	beq-    cr7,c00000000036c9ac <.schedule+0x51c>
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036c824:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036c828:	91 3a 00 00 	stw     r9,0(r26)

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036c82c:	38 61 00 80 	addi    r3,r1,128
c00000000036c830:	38 80 00 80 	li      r4,128
c00000000036c834:	38 a0 00 00 	li      r5,0
c00000000036c838:	e8 0a 00 10 	ld      r0,16(r10)
c00000000036c83c:	e9 2a 00 18 	ld      r9,24(r10)
c00000000036c840:	f8 01 00 80 	std     r0,128(r1)
c00000000036c844:	f9 21 00 88 	std     r9,136(r1)
c00000000036c848:	48 00 00 30 	b       c00000000036c878 <.schedule+0x3e8>
c00000000036c84c:	e9 3e 80 08 	ld      r9,-32760(r30)
c00000000036c850:	e8 7e 80 00 	ld      r3,-32768(r30)
c00000000036c854:	7d 20 4a 14 	add     r9,r0,r9
c00000000036c858:	e8 09 00 30 	ld      r0,48(r9)
c00000000036c85c:	7c 63 02 14 	add     r3,r3,r0
c00000000036c860:	48 00 29 ad 	bl      c00000000036f20c <._spin_lock>
c00000000036c864:	60 00 00 00 	nop

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036c868:	38 bd 00 01 	addi    r5,r29,1
c00000000036c86c:	7f 83 e3 78 	mr      r3,r28
c00000000036c870:	38 80 00 80 	li      r4,128
c00000000036c874:	7c a5 07 b4 	extsw   r5,r5
c00000000036c878:	4b cc 77 a9 	bl      c000000000034020 <.find_next_bit>
c00000000036c87c:	60 00 00 00 	nop
c00000000036c880:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036c884:	40 9d 00 08 	ble-    cr7,c00000000036c88c <.schedule+0x3fc>
c00000000036c888:	38 60 00 80 	li      r3,128
c00000000036c88c:	7c 7d 07 b4 	extsw   r29,r3
c00000000036c890:	3b 81 00 80 	addi    r28,r1,128
c00000000036c894:	2f 9d 00 7f 	cmpwi   cr7,r29,127
c00000000036c898:	7b a0 4d a4 	rldicr  r0,r29,9,54
c00000000036c89c:	40 9d ff b0 	ble+    cr7,c00000000036c84c <.schedule+0x3bc>

static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
{
	unsigned long old;
	unsigned long mask = BITOP_MASK(nr);
c00000000036c8a0:	7e e0 36 70 	srawi   r0,r23,6
c00000000036c8a4:	7c 00 01 94 	addze   r0,r0
	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
c00000000036c8a8:	7c 09 07 b4 	extsw   r9,r0
c00000000036c8ac:	54 00 30 32 	rlwinm  r0,r0,6,0,25
c00000000036c8b0:	79 29 1f 24 	rldicr  r9,r9,3,60
c00000000036c8b4:	7d 40 b8 50 	subf    r10,r0,r23
c00000000036c8b8:	38 00 00 01 	li      r0,1
c00000000036c8bc:	7d 7c 4a 14 	add     r11,r28,r9
c00000000036c8c0:	7c 00 50 36 	sld     r0,r0,r10

	__asm__ __volatile__(
c00000000036c8c4:	7d 00 58 a8 	ldarx   r8,0,r11
c00000000036c8c8:	7d 08 00 78 	andc    r8,r8,r0
c00000000036c8cc:	7d 00 59 ad 	stdcx.  r8,0,r11
c00000000036c8d0:	40 a2 ff f4 	bne-    c00000000036c8c4 <.schedule+0x434>

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036c8d4:	38 80 00 80 	li      r4,128
c00000000036c8d8:	7f 83 e3 78 	mr      r3,r28
c00000000036c8dc:	38 a0 00 00 	li      r5,0
c00000000036c8e0:	48 00 00 48 	b       c00000000036c928 <.schedule+0x498>
c00000000036c8e4:	e9 3e 80 08 	ld      r9,-32760(r30)
c00000000036c8e8:	e8 1e 80 00 	ld      r0,-32768(r30)
c00000000036c8ec:	7d 2b 4a 14 	add     r9,r11,r9
c00000000036c8f0:	e9 29 00 30 	ld      r9,48(r9)
c00000000036c8f4:	7d 20 4a 14 	add     r9,r0,r9
c00000000036c8f8:	e8 69 00 48 	ld      r3,72(r9)
c00000000036c8fc:	e8 09 00 50 	ld      r0,80(r9)
c00000000036c900:	7f a3 00 00 	cmpd    cr7,r3,r0
c00000000036c904:	40 9e 00 14 	bne-    cr7,c00000000036c918 <.schedule+0x488>
c00000000036c908:	e8 09 00 08 	ld      r0,8(r9)
c00000000036c90c:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036c910:	41 9e 00 08 	beq-    cr7,c00000000036c918 <.schedule+0x488>
c00000000036c914:	4b ce 6c 49 	bl      c00000000005355c <.resched_task>

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036c918:	38 bd 00 01 	addi    r5,r29,1
c00000000036c91c:	38 61 00 80 	addi    r3,r1,128
c00000000036c920:	38 80 00 80 	li      r4,128
c00000000036c924:	7c a5 07 b4 	extsw   r5,r5
c00000000036c928:	4b cc 76 f9 	bl      c000000000034020 <.find_next_bit>
c00000000036c92c:	60 00 00 00 	nop
c00000000036c930:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036c934:	40 9d 00 08 	ble-    cr7,c00000000036c93c <.schedule+0x4ac>
c00000000036c938:	38 60 00 80 	li      r3,128
c00000000036c93c:	7c 7d 07 b4 	extsw   r29,r3
c00000000036c940:	2f 9d 00 7f 	cmpwi   cr7,r29,127
c00000000036c944:	7b ab 4d a4 	rldicr  r11,r29,9,54
c00000000036c948:	40 9d ff 9c 	ble+    cr7,c00000000036c8e4 <.schedule+0x454>

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036c94c:	38 61 00 80 	addi    r3,r1,128
c00000000036c950:	38 80 00 80 	li      r4,128
c00000000036c954:	38 a0 00 00 	li      r5,0
c00000000036c958:	48 00 00 30 	b       c00000000036c988 <.schedule+0x4f8>
c00000000036c95c:	e9 3e 80 08 	ld      r9,-32760(r30)
c00000000036c960:	e9 7e 80 00 	ld      r11,-32768(r30)
c00000000036c964:	7d 20 4a 14 	add     r9,r0,r9
c00000000036c968:	e9 29 00 30 	ld      r9,48(r9)
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036c96c:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036c970:	38 00 00 00 	li      r0,0

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036c974:	38 a3 00 01 	addi    r5,r3,1
c00000000036c978:	38 80 00 80 	li      r4,128
c00000000036c97c:	38 61 00 80 	addi    r3,r1,128
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036c980:	7c 09 59 2e 	stwx    r0,r9,r11

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036c984:	7c a5 07 b4 	extsw   r5,r5
c00000000036c988:	4b cc 76 99 	bl      c000000000034020 <.find_next_bit>
c00000000036c98c:	60 00 00 00 	nop
c00000000036c990:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036c994:	40 9d 00 08 	ble-    cr7,c00000000036c99c <.schedule+0x50c>
c00000000036c998:	38 60 00 80 	li      r3,128
c00000000036c99c:	7c 63 07 b4 	extsw   r3,r3
c00000000036c9a0:	2f 83 00 7f 	cmpwi   cr7,r3,127
c00000000036c9a4:	78 60 4d a4 	rldicr  r0,r3,9,54
c00000000036c9a8:	40 9d ff b4 	ble+    cr7,c00000000036c95c <.schedule+0x4cc>
c00000000036c9ac:	e8 1a 00 08 	ld      r0,8(r26)
c00000000036c9b0:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036c9b4:	41 9e 05 54 	beq-    cr7,c00000000036cf08 <.schedule+0xa78>
c00000000036c9b8:	48 00 04 20 	b       c00000000036cdd8 <.schedule+0x948>
c00000000036c9bc:	e8 1e 80 08 	ld      r0,-32760(r30)
c00000000036c9c0:	7a eb 4d a4 	rldicr  r11,r23,9,54
c00000000036c9c4:	e9 3e 80 00 	ld      r9,-32768(r30)
c00000000036c9c8:	3a c0 00 00 	li      r22,0
c00000000036c9cc:	7d 6b 02 14 	add     r11,r11,r0
c00000000036c9d0:	e8 0b 00 30 	ld      r0,48(r11)
c00000000036c9d4:	7d 29 02 14 	add     r9,r9,r0
c00000000036c9d8:	e9 29 12 38 	ld      r9,4664(r9)
c00000000036c9dc:	48 00 00 18 	b       c00000000036c9f4 <.schedule+0x564>
c00000000036c9e0:	e8 09 00 5e 	lwa     r0,92(r9)
c00000000036c9e4:	78 08 cf e3 	rldicl. r8,r0,57,63
c00000000036c9e8:	41 82 00 08 	beq-    c00000000036c9f0 <.schedule+0x560>
c00000000036c9ec:	7d 36 4b 78 	mr      r22,r9
c00000000036c9f0:	e9 29 00 00 	ld      r9,0(r9)
c00000000036c9f4:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036c9f8:	40 9e ff e8 	bne+    cr7,c00000000036c9e0 <.schedule+0x550>
c00000000036c9fc:	2f b6 00 00 	cmpdi   cr7,r22,0
c00000000036ca00:	41 9e 03 cc 	beq-    cr7,c00000000036cdcc <.schedule+0x93c>
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036ca04:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036ca08:	91 3a 00 00 	stw     r9,0(r26)

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036ca0c:	38 61 00 80 	addi    r3,r1,128
c00000000036ca10:	38 80 00 80 	li      r4,128
c00000000036ca14:	38 a0 00 00 	li      r5,0
c00000000036ca18:	e8 16 00 18 	ld      r0,24(r22)
c00000000036ca1c:	e9 36 00 10 	ld      r9,16(r22)
c00000000036ca20:	f8 01 00 88 	std     r0,136(r1)
c00000000036ca24:	f9 21 00 80 	std     r9,128(r1)
c00000000036ca28:	48 00 00 30 	b       c00000000036ca58 <.schedule+0x5c8>
c00000000036ca2c:	e9 3e 80 08 	ld      r9,-32760(r30)
c00000000036ca30:	e8 7e 80 00 	ld      r3,-32768(r30)
c00000000036ca34:	7d 20 4a 14 	add     r9,r0,r9
c00000000036ca38:	e8 09 00 30 	ld      r0,48(r9)
c00000000036ca3c:	7c 63 02 14 	add     r3,r3,r0
c00000000036ca40:	48 00 27 cd 	bl      c00000000036f20c <._spin_lock>
c00000000036ca44:	60 00 00 00 	nop

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036ca48:	38 bd 00 01 	addi    r5,r29,1
c00000000036ca4c:	7f 83 e3 78 	mr      r3,r28
c00000000036ca50:	38 80 00 80 	li      r4,128
c00000000036ca54:	7c a5 07 b4 	extsw   r5,r5
c00000000036ca58:	4b cc 75 c9 	bl      c000000000034020 <.find_next_bit>
c00000000036ca5c:	60 00 00 00 	nop
c00000000036ca60:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036ca64:	40 9d 00 08 	ble-    cr7,c00000000036ca6c <.schedule+0x5dc>
c00000000036ca68:	38 60 00 80 	li      r3,128
c00000000036ca6c:	7c 7d 07 b4 	extsw   r29,r3
c00000000036ca70:	3b 81 00 80 	addi    r28,r1,128
c00000000036ca74:	2f 9d 00 7f 	cmpwi   cr7,r29,127
c00000000036ca78:	7b a0 4d a4 	rldicr  r0,r29,9,54
c00000000036ca7c:	40 9d ff b0 	ble+    cr7,c00000000036ca2c <.schedule+0x59c>

static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
{
	unsigned long old;
	unsigned long mask = BITOP_MASK(nr);
c00000000036ca80:	7e e0 36 70 	srawi   r0,r23,6
c00000000036ca84:	7c 00 01 94 	addze   r0,r0
	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
c00000000036ca88:	7c 09 07 b4 	extsw   r9,r0
c00000000036ca8c:	54 00 30 32 	rlwinm  r0,r0,6,0,25
c00000000036ca90:	79 29 1f 24 	rldicr  r9,r9,3,60
c00000000036ca94:	7d 40 b8 50 	subf    r10,r0,r23
c00000000036ca98:	38 00 00 01 	li      r0,1
c00000000036ca9c:	7d 7c 4a 14 	add     r11,r28,r9
c00000000036caa0:	7c 00 50 36 	sld     r0,r0,r10

	__asm__ __volatile__(
c00000000036caa4:	7d 00 58 a8 	ldarx   r8,0,r11
c00000000036caa8:	7d 08 00 78 	andc    r8,r8,r0
c00000000036caac:	7d 00 59 ad 	stdcx.  r8,0,r11
c00000000036cab0:	40 a2 ff f4 	bne-    c00000000036caa4 <.schedule+0x614>
c00000000036cab4:	60 00 00 00 	nop
c00000000036cab8:	60 00 00 00 	nop
c00000000036cabc:	60 00 00 00 	nop
c00000000036cac0:	e8 1a 00 08 	ld      r0,8(r26)
c00000000036cac4:	3a 60 00 00 	li      r19,0
c00000000036cac8:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cacc:	41 9e 02 90 	beq-    cr7,c00000000036cd5c <.schedule+0x8cc>
c00000000036cad0:	e9 7a 00 60 	ld      r11,96(r26)
c00000000036cad4:	80 0b 00 00 	lwz     r0,0(r11)
c00000000036cad8:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cadc:	40 9e 00 08 	bne-    cr7,c00000000036cae4 <.schedule+0x654>
c00000000036cae0:	e9 7a 00 68 	ld      r11,104(r26)
c00000000036cae4:	80 0b 00 00 	lwz     r0,0(r11)
c00000000036cae8:	7c 00 00 74 	cntlzd  r0,r0
c00000000036caec:	78 00 d1 82 	rldicl  r0,r0,58,6
c00000000036caf0:	0b 00 00 00 	tdnei   r0,0
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#ifdef CONFIG_PPC64
	if (unlikely(b[0]))
c00000000036caf4:	e9 2b 00 08 	ld      r9,8(r11)
c00000000036caf8:	39 4b 00 08 	addi    r10,r11,8
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#ifdef CONFIG_PPC64
	if (unlikely(b[0]))
c00000000036cafc:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036cb00:	41 be 00 18 	beq+    cr7,c00000000036cb18 <.schedule+0x688>
c00000000036cb04:	7c 09 00 d0 	neg     r0,r9
c00000000036cb08:	7d 20 00 38 	and     r0,r9,r0
c00000000036cb0c:	7c 00 00 74 	cntlzd  r0,r0
c00000000036cb10:	20 00 00 3f 	subfic  r0,r0,63
c00000000036cb14:	48 00 00 38 	b       c00000000036cb4c <.schedule+0x6bc>
		return __ffs(b[0]);
	if (unlikely(b[1]))
c00000000036cb18:	e9 2a 00 08 	ld      r9,8(r10)
c00000000036cb1c:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036cb20:	41 be 00 18 	beq+    cr7,c00000000036cb38 <.schedule+0x6a8>
c00000000036cb24:	7c 09 00 d0 	neg     r0,r9
c00000000036cb28:	7d 20 00 38 	and     r0,r9,r0
c00000000036cb2c:	7c 00 00 74 	cntlzd  r0,r0
		return __ffs(b[1]) + 64;
c00000000036cb30:	20 00 00 7f 	subfic  r0,r0,127
c00000000036cb34:	48 00 00 18 	b       c00000000036cb4c <.schedule+0x6bc>
	return __ffs(b[2]) + 128;
c00000000036cb38:	e8 0b 00 18 	ld      r0,24(r11)
c00000000036cb3c:	7d 20 00 d0 	neg     r9,r0
c00000000036cb40:	7c 00 48 38 	and     r0,r0,r9
c00000000036cb44:	7c 00 00 74 	cntlzd  r0,r0
c00000000036cb48:	20 00 00 bf 	subfic  r0,r0,191
c00000000036cb4c:	7c 00 07 b4 	extsw   r0,r0

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036cb50:	38 61 00 80 	addi    r3,r1,128
c00000000036cb54:	38 80 00 80 	li      r4,128
c00000000036cb58:	38 a0 00 00 	li      r5,0
c00000000036cb5c:	78 09 26 e4 	rldicr  r9,r0,4,59
c00000000036cb60:	7d 29 5a 14 	add     r9,r9,r11
c00000000036cb64:	e9 29 00 20 	ld      r9,32(r9)
c00000000036cb68:	3b 29 ff c8 	addi    r25,r9,-56

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036cb6c:	4b cc 74 b5 	bl      c000000000034020 <.find_next_bit>
c00000000036cb70:	60 00 00 00 	nop
c00000000036cb74:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036cb78:	40 9d 00 08 	ble-    cr7,c00000000036cb80 <.schedule+0x6f0>
c00000000036cb7c:	38 60 00 80 	li      r3,128
c00000000036cb80:	7c 78 07 b4 	extsw   r24,r3
c00000000036cb84:	3a 60 00 00 	li      r19,0
c00000000036cb88:	48 00 01 cc 	b       c00000000036cd54 <.schedule+0x8c4>
c00000000036cb8c:	e9 7e 80 08 	ld      r11,-32760(r30)
c00000000036cb90:	7b 09 4d a4 	rldicr  r9,r24,9,54
c00000000036cb94:	e9 59 00 f8 	ld      r10,248(r25)
c00000000036cb98:	e8 1e 80 00 	ld      r0,-32768(r30)
c00000000036cb9c:	7d 29 5a 14 	add     r9,r9,r11
c00000000036cba0:	2f aa 00 00 	cmpdi   cr7,r10,0
c00000000036cba4:	e9 29 00 30 	ld      r9,48(r9)
c00000000036cba8:	7f 80 4a 14 	add     r28,r0,r9
c00000000036cbac:	eb fc 00 48 	ld      r31,72(r28)
c00000000036cbb0:	41 9e 00 a4 	beq-    cr7,c00000000036cc54 <.schedule+0x7c4>
c00000000036cbb4:	e8 1f 00 f8 	ld      r0,248(r31)
c00000000036cbb8:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cbbc:	41 9e 00 98 	beq-    cr7,c00000000036cc54 <.schedule+0x7c4>
c00000000036cbc0:	e9 59 00 2e 	lwa     r10,44(r25)
c00000000036cbc4:	2f 8a 00 63 	cmpwi   cr7,r10,99
c00000000036cbc8:	40 9d 00 8c 	ble-    cr7,c00000000036cc54 <.schedule+0x7c4>
c00000000036cbcc:	e9 7f 00 2e 	lwa     r11,44(r31)
c00000000036cbd0:	2f 8b 00 63 	cmpwi   cr7,r11,99
c00000000036cbd4:	41 bd 00 38 	bgt+    cr7,c00000000036cc0c <.schedule+0x77c>
c00000000036cbd8:	e9 7e 80 68 	ld      r11,-32664(r30)
c00000000036cbdc:	81 36 00 44 	lwz     r9,68(r22)
c00000000036cbe0:	38 00 00 0a 	li      r0,10
c00000000036cbe4:	e9 4b 00 00 	ld      r10,0(r11)
c00000000036cbe8:	1d 29 00 0a 	mulli   r9,r9,10
c00000000036cbec:	39 60 00 64 	li      r11,100
c00000000036cbf0:	7c 0a 03 92 	divdu   r0,r10,r0
c00000000036cbf4:	7d 29 5b 96 	divwu   r9,r9,r11
c00000000036cbf8:	1c 00 00 0a 	mulli   r0,r0,10
c00000000036cbfc:	7d 40 50 50 	subf    r10,r0,r10
c00000000036cc00:	79 29 00 20 	clrldi  r9,r9,32
c00000000036cc04:	7f aa 48 40 	cmpld   cr7,r10,r9
c00000000036cc08:	48 00 00 44 	b       c00000000036cc4c <.schedule+0x7bc>
c00000000036cc0c:	e8 1f 00 32 	lwa     r0,48(r31)
c00000000036cc10:	e9 39 00 32 	lwa     r9,48(r25)
c00000000036cc14:	7f 80 48 00 	cmpw    cr7,r0,r9
c00000000036cc18:	40 9c 00 3c 	bge-    cr7,c00000000036cc54 <.schedule+0x7c4>
c00000000036cc1c:	7f 8a 58 00 	cmpw    cr7,r10,r11
c00000000036cc20:	41 9c 00 34 	blt-    cr7,c00000000036cc54 <.schedule+0x7c4>
c00000000036cc24:	83 b6 00 44 	lwz     r29,68(r22)
c00000000036cc28:	80 1f 00 98 	lwz     r0,152(r31)
c00000000036cc2c:	7f 23 cb 78 	mr      r3,r25
c00000000036cc30:	23 bd 00 64 	subfic  r29,r29,100
c00000000036cc34:	7f bd 01 d6 	mullw   r29,r29,r0
c00000000036cc38:	38 00 00 64 	li      r0,100
c00000000036cc3c:	7f bd 03 96 	divwu   r29,r29,r0
c00000000036cc40:	4b ce 5f 39 	bl      c000000000052b78 <.task_timeslice>
c00000000036cc44:	7b bd 00 20 	clrldi  r29,r29,32
c00000000036cc48:	7f bd 18 40 	cmpld   cr7,r29,r3
c00000000036cc4c:	40 9d 00 08 	ble-    cr7,c00000000036cc54 <.schedule+0x7c4>
c00000000036cc50:	3a 60 00 01 	li      r19,1
c00000000036cc54:	e8 1f 00 f8 	ld      r0,248(r31)
c00000000036cc58:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cc5c:	40 9e 00 10 	bne-    cr7,c00000000036cc6c <.schedule+0x7dc>
c00000000036cc60:	e8 1c 00 50 	ld      r0,80(r28)
c00000000036cc64:	7f bf 00 00 	cmpd    cr7,r31,r0
c00000000036cc68:	40 9e 00 c4 	bne-    cr7,c00000000036cd2c <.schedule+0x89c>
c00000000036cc6c:	e8 1f 00 2e 	lwa     r0,44(r31)
c00000000036cc70:	2f 80 00 63 	cmpwi   cr7,r0,99
c00000000036cc74:	40 9d 00 b8 	ble-    cr7,c00000000036cd2c <.schedule+0x89c>
c00000000036cc78:	e8 19 00 f8 	ld      r0,248(r25)
c00000000036cc7c:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cc80:	41 9e 00 8c 	beq-    cr7,c00000000036cd0c <.schedule+0x87c>
c00000000036cc84:	e9 79 00 2e 	lwa     r11,44(r25)
c00000000036cc88:	2f 8b 00 63 	cmpwi   cr7,r11,99
c00000000036cc8c:	41 bd 00 3c 	bgt+    cr7,c00000000036ccc8 <.schedule+0x838>
c00000000036cc90:	e9 7e 80 68 	ld      r11,-32664(r30)
c00000000036cc94:	81 36 00 44 	lwz     r9,68(r22)
c00000000036cc98:	38 00 00 0a 	li      r0,10
c00000000036cc9c:	e9 4b 00 00 	ld      r10,0(r11)
c00000000036cca0:	1d 29 00 0a 	mulli   r9,r9,10
c00000000036cca4:	39 60 00 64 	li      r11,100
c00000000036cca8:	7c 0a 03 92 	divdu   r0,r10,r0
c00000000036ccac:	7d 29 5b 96 	divwu   r9,r9,r11
c00000000036ccb0:	1c 00 00 0a 	mulli   r0,r0,10
c00000000036ccb4:	7d 40 50 50 	subf    r10,r0,r10
c00000000036ccb8:	79 29 00 20 	clrldi  r9,r9,32
c00000000036ccbc:	7f aa 48 40 	cmpld   cr7,r10,r9
c00000000036ccc0:	40 9d 00 6c 	ble-    cr7,c00000000036cd2c <.schedule+0x89c>
c00000000036ccc4:	48 00 00 40 	b       c00000000036cd04 <.schedule+0x874>
c00000000036ccc8:	e9 3c 00 48 	ld      r9,72(r28)
c00000000036cccc:	e8 09 00 2e 	lwa     r0,44(r9)
c00000000036ccd0:	7f 8b 00 00 	cmpw    cr7,r11,r0
c00000000036ccd4:	40 9c 00 38 	bge-    cr7,c00000000036cd0c <.schedule+0x87c>
c00000000036ccd8:	83 b6 00 44 	lwz     r29,68(r22)
c00000000036ccdc:	80 19 00 98 	lwz     r0,152(r25)
c00000000036cce0:	7f e3 fb 78 	mr      r3,r31
c00000000036cce4:	23 bd 00 64 	subfic  r29,r29,100
c00000000036cce8:	7f bd 01 d6 	mullw   r29,r29,r0
c00000000036ccec:	38 00 00 64 	li      r0,100
c00000000036ccf0:	7f bd 03 96 	divwu   r29,r29,r0
c00000000036ccf4:	4b ce 5e 85 	bl      c000000000052b78 <.task_timeslice>
c00000000036ccf8:	7b bd 00 20 	clrldi  r29,r29,32
c00000000036ccfc:	7f bd 18 40 	cmpld   cr7,r29,r3
c00000000036cd00:	40 9d 00 0c 	ble-    cr7,c00000000036cd0c <.schedule+0x87c>
c00000000036cd04:	7f e3 fb 78 	mr      r3,r31
c00000000036cd08:	48 00 00 20 	b       c00000000036cd28 <.schedule+0x898>
c00000000036cd0c:	e8 7c 00 48 	ld      r3,72(r28)
c00000000036cd10:	e8 1c 00 50 	ld      r0,80(r28)
c00000000036cd14:	7f a3 00 00 	cmpd    cr7,r3,r0
c00000000036cd18:	40 9e 00 14 	bne-    cr7,c00000000036cd2c <.schedule+0x89c>
c00000000036cd1c:	e8 1c 00 08 	ld      r0,8(r28)
c00000000036cd20:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cd24:	41 9e 00 08 	beq-    cr7,c00000000036cd2c <.schedule+0x89c>
c00000000036cd28:	4b ce 68 35 	bl      c00000000005355c <.resched_task>

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036cd2c:	38 b8 00 01 	addi    r5,r24,1
c00000000036cd30:	38 61 00 80 	addi    r3,r1,128
c00000000036cd34:	38 80 00 80 	li      r4,128
c00000000036cd38:	7c a5 07 b4 	extsw   r5,r5
c00000000036cd3c:	4b cc 72 e5 	bl      c000000000034020 <.find_next_bit>
c00000000036cd40:	60 00 00 00 	nop
c00000000036cd44:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036cd48:	40 9d 00 08 	ble-    cr7,c00000000036cd50 <.schedule+0x8c0>
c00000000036cd4c:	38 60 00 80 	li      r3,128
c00000000036cd50:	7c 78 07 b4 	extsw   r24,r3
c00000000036cd54:	2f 98 00 7f 	cmpwi   cr7,r24,127
c00000000036cd58:	40 9d fe 34 	ble+    cr7,c00000000036cb8c <.schedule+0x6fc>

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
c00000000036cd5c:	38 61 00 80 	addi    r3,r1,128
c00000000036cd60:	38 80 00 80 	li      r4,128
c00000000036cd64:	38 a0 00 00 	li      r5,0
c00000000036cd68:	48 00 00 30 	b       c00000000036cd98 <.schedule+0x908>
c00000000036cd6c:	e9 3e 80 08 	ld      r9,-32760(r30)
c00000000036cd70:	e9 7e 80 00 	ld      r11,-32768(r30)
c00000000036cd74:	7d 20 4a 14 	add     r9,r0,r9
c00000000036cd78:	e9 29 00 30 	ld      r9,48(r9)
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036cd7c:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036cd80:	38 00 00 00 	li      r0,0

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036cd84:	38 a3 00 01 	addi    r5,r3,1
c00000000036cd88:	38 80 00 80 	li      r4,128
c00000000036cd8c:	38 61 00 80 	addi    r3,r1,128
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036cd90:	7c 09 59 2e 	stwx    r0,r9,r11

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
c00000000036cd94:	7c a5 07 b4 	extsw   r5,r5
c00000000036cd98:	4b cc 72 89 	bl      c000000000034020 <.find_next_bit>
c00000000036cd9c:	60 00 00 00 	nop
c00000000036cda0:	2f 83 00 80 	cmpwi   cr7,r3,128
c00000000036cda4:	40 9d 00 08 	ble-    cr7,c00000000036cdac <.schedule+0x91c>
c00000000036cda8:	38 60 00 80 	li      r3,128
c00000000036cdac:	7c 63 07 b4 	extsw   r3,r3
c00000000036cdb0:	2f 83 00 7f 	cmpwi   cr7,r3,127
c00000000036cdb4:	78 60 4d a4 	rldicr  r0,r3,9,54
c00000000036cdb8:	40 9d ff b4 	ble+    cr7,c00000000036cd6c <.schedule+0x8dc>
c00000000036cdbc:	2f b3 00 00 	cmpdi   cr7,r19,0
c00000000036cdc0:	41 9e 00 0c 	beq-    cr7,c00000000036cdcc <.schedule+0x93c>
c00000000036cdc4:	eb fa 00 50 	ld      r31,80(r26)
c00000000036cdc8:	48 00 01 40 	b       c00000000036cf08 <.schedule+0xa78>
c00000000036cdcc:	e8 1a 00 08 	ld      r0,8(r26)
c00000000036cdd0:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cdd4:	41 be f8 bc 	beq-    cr7,c00000000036c690 <.schedule+0x200>
c00000000036cdd8:	e9 7a 00 60 	ld      r11,96(r26)
c00000000036cddc:	80 0b 00 00 	lwz     r0,0(r11)
c00000000036cde0:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036cde4:	40 be 00 20 	bne+    cr7,c00000000036ce04 <.schedule+0x974>
c00000000036cde8:	e9 3a 00 68 	ld      r9,104(r26)
c00000000036cdec:	f8 1a 00 38 	std     r0,56(r26)
c00000000036cdf0:	38 00 00 8c 	li      r0,140
c00000000036cdf4:	f9 7a 00 68 	std     r11,104(r26)
c00000000036cdf8:	90 1a 12 30 	stw     r0,4656(r26)
c00000000036cdfc:	f9 3a 00 60 	std     r9,96(r26)
c00000000036ce00:	7d 2b 4b 78 	mr      r11,r9
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#ifdef CONFIG_PPC64
	if (unlikely(b[0]))
c00000000036ce04:	e9 2b 00 08 	ld      r9,8(r11)
c00000000036ce08:	39 4b 00 08 	addi    r10,r11,8
*/
static inline int sched_find_first_bit(const unsigned long *b)
{
#ifdef CONFIG_PPC64
	if (unlikely(b[0]))
c00000000036ce0c:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036ce10:	41 be 00 18 	beq+    cr7,c00000000036ce28 <.schedule+0x998>
c00000000036ce14:	7c 09 00 d0 	neg     r0,r9
c00000000036ce18:	7d 20 00 38 	and     r0,r9,r0
c00000000036ce1c:	7c 00 00 74 	cntlzd  r0,r0
c00000000036ce20:	20 00 00 3f 	subfic  r0,r0,63
c00000000036ce24:	48 00 00 38 	b       c00000000036ce5c <.schedule+0x9cc>
		return __ffs(b[0]);
	if (unlikely(b[1]))
c00000000036ce28:	e9 2a 00 08 	ld      r9,8(r10)
c00000000036ce2c:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036ce30:	41 be 00 18 	beq+    cr7,c00000000036ce48 <.schedule+0x9b8>
c00000000036ce34:	7c 09 00 d0 	neg     r0,r9
c00000000036ce38:	7d 20 00 38 	and     r0,r9,r0
c00000000036ce3c:	7c 00 00 74 	cntlzd  r0,r0
		return __ffs(b[1]) + 64;
c00000000036ce40:	20 00 00 7f 	subfic  r0,r0,127
c00000000036ce44:	48 00 00 18 	b       c00000000036ce5c <.schedule+0x9cc>
	return __ffs(b[2]) + 128;
c00000000036ce48:	e8 0b 00 18 	ld      r0,24(r11)
c00000000036ce4c:	7d 20 00 d0 	neg     r9,r0
c00000000036ce50:	7c 00 48 38 	and     r0,r0,r9
c00000000036ce54:	7c 00 00 74 	cntlzd  r0,r0
c00000000036ce58:	20 00 00 bf 	subfic  r0,r0,191
c00000000036ce5c:	7c 00 07 b4 	extsw   r0,r0
c00000000036ce60:	78 09 26 e4 	rldicr  r9,r0,4,59
c00000000036ce64:	7d 29 5a 14 	add     r9,r9,r11
c00000000036ce68:	e9 29 00 20 	ld      r9,32(r9)
c00000000036ce6c:	3b e9 ff c8 	addi    r31,r9,-56
c00000000036ce70:	e8 1f 00 2e 	lwa     r0,44(r31)
c00000000036ce74:	2f 80 00 63 	cmpwi   cr7,r0,99
c00000000036ce78:	40 9d 00 88 	ble-    cr7,c00000000036cf00 <.schedule+0xa70>
c00000000036ce7c:	e8 1f 00 7a 	lwa     r0,120(r31)
c00000000036ce80:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036ce84:	40 9d 00 7c 	ble-    cr7,c00000000036cf00 <.schedule+0xa70>
c00000000036ce88:	e8 1f 00 60 	ld      r0,96(r31)
c00000000036ce8c:	39 20 00 00 	li      r9,0
c00000000036ce90:	7c 00 a8 51 	subf.   r0,r0,r21
c00000000036ce94:	41 80 00 08 	blt-    c00000000036ce9c <.schedule+0xa0c>
c00000000036ce98:	7c 09 03 78 	mr      r9,r0
c00000000036ce9c:	e8 1f 00 7a 	lwa     r0,120(r31)
c00000000036cea0:	2f 80 00 01 	cmpwi   cr7,r0,1
c00000000036cea4:	40 9e 00 0c 	bne-    cr7,c00000000036ceb0 <.schedule+0xa20>
c00000000036cea8:	1c 09 00 26 	mulli   r0,r9,38
c00000000036ceac:	78 09 c9 c2 	rldicl  r9,r0,57,7
c00000000036ceb0:	e8 9f 00 60 	ld      r4,96(r31)
c00000000036ceb4:	7f e3 fb 78 	mr      r3,r31
c00000000036ceb8:	eb bf 00 48 	ld      r29,72(r31)
c00000000036cebc:	7c 84 4a 14 	add     r4,r4,r9
c00000000036cec0:	4b ce 64 41 	bl      c000000000053300 <.recalc_task_prio>
c00000000036cec4:	e8 1f 00 2e 	lwa     r0,44(r31)
c00000000036cec8:	7c 7c 1b 78 	mr      r28,r3
c00000000036cecc:	7f 80 18 00 	cmpw    cr7,r0,r3
c00000000036ced0:	41 be 00 24 	beq+    cr7,c00000000036cef4 <.schedule+0xa64>
c00000000036ced4:	7f e3 fb 78 	mr      r3,r31
c00000000036ced8:	7f a4 eb 78 	mr      r4,r29
c00000000036cedc:	4b ce 5c e1 	bl      c000000000052bbc <.dequeue_task>
c00000000036cee0:	93 9f 00 2c 	stw     r28,44(r31)
c00000000036cee4:	7f a4 eb 78 	mr      r4,r29
c00000000036cee8:	7f e3 fb 78 	mr      r3,r31
c00000000036ceec:	4b ce 65 59 	bl      c000000000053444 <.enqueue_task>
c00000000036cef0:	48 00 00 10 	b       c00000000036cf00 <.schedule+0xa70>
c00000000036cef4:	7f a4 eb 78 	mr      r4,r29
c00000000036cef8:	7f e3 fb 78 	mr      r3,r31
c00000000036cefc:	4b ce 5d 4d 	bl      c000000000052c48 <.requeue_task>
c00000000036cf00:	38 00 00 00 	li      r0,0
c00000000036cf04:	90 1f 00 78 	stw     r0,120(r31)
#define ARCH_HAS_SPINLOCK_PREFETCH

static inline void prefetch(const void *x)
{
	if (unlikely(!x))
c00000000036cf08:	2f bf 00 00 	cmpdi   cr7,r31,0
c00000000036cf0c:	41 9e 00 08 	beq-    cr7,c00000000036cf14 <.schedule+0xa84>
		return;

	__asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
c00000000036cf10:	7c 00 fa 2c 	dcbt    r0,r31
}

static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
	clear_ti_thread_flag(task_thread_info(tsk), flag);
c00000000036cf14:	e9 3b 00 08 	ld      r9,8(r27)
	unsigned long old;
	unsigned long mask = BITOP_MASK(nr);
	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);

	__asm__ __volatile__(
c00000000036cf18:	39 60 00 08 	li      r11,8
}

static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
	clear_bit(flag,&ti->flags);
c00000000036cf1c:	38 09 00 80 	addi    r0,r9,128
	unsigned long old;
	unsigned long mask = BITOP_MASK(nr);
	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);

	__asm__ __volatile__(
c00000000036cf20:	7d 00 00 a8 	ldarx   r8,0,r0
c00000000036cf24:	7d 08 58 78 	andc    r8,r8,r11
c00000000036cf28:	7d 00 01 ad 	stdcx.  r8,0,r0
c00000000036cf2c:	40 a2 ff f4 	bne-    c00000000036cf20 <.schedule+0xa90>
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return task_thread_info(p)->cpu;
c00000000036cf30:	e9 3b 00 08 	ld      r9,8(r27)
* one since the start of the grace period. Thus just a flag.
*/
static inline void rcu_qsctr_inc(int cpu)
{
	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
c00000000036cf34:	e9 1e 80 08 	ld      r8,-32760(r30)
c00000000036cf38:	e9 7e 81 28 	ld      r11,-32472(r30)
c00000000036cf3c:	e9 29 00 12 	lwa     r9,16(r9)
c00000000036cf40:	79 29 4d a4 	rldicr  r9,r9,9,54
c00000000036cf44:	7d 29 42 14 	add     r9,r9,r8
c00000000036cf48:	e8 09 00 30 	ld      r0,48(r9)
c00000000036cf4c:	7d 6b 02 14 	add     r11,r11,r0
	rdp->passed_quiesc = 1;
c00000000036cf50:	38 00 00 01 	li      r0,1
c00000000036cf54:	90 0b 00 08 	stw     r0,8(r11)
c00000000036cf58:	e9 3b 00 60 	ld      r9,96(r27)
c00000000036cf5c:	e8 1a 00 40 	ld      r0,64(r26)
c00000000036cf60:	e9 5b 00 70 	ld      r10,112(r27)
c00000000036cf64:	7f a0 48 40 	cmpld   cr7,r0,r9
c00000000036cf68:	40 9c 00 08 	bge-    cr7,c00000000036cf70 <.schedule+0xae0>
c00000000036cf6c:	7d 20 4b 78 	mr      r0,r9
c00000000036cf70:	7d 71 a3 92 	divdu   r11,r17,r20
c00000000036cf74:	e9 3b 00 58 	ld      r9,88(r27)
c00000000036cf78:	7c 00 a8 50 	subf    r0,r0,r21
c00000000036cf7c:	7c 0a 02 14 	add     r0,r10,r0
c00000000036cf80:	f8 1b 00 70 	std     r0,112(r27)
c00000000036cf84:	7d 2b 48 50 	subf    r9,r11,r9
c00000000036cf88:	2f a9 00 00 	cmpdi   cr7,r9,0
c00000000036cf8c:	f9 3b 00 58 	std     r9,88(r27)
c00000000036cf90:	41 9d 00 0c 	bgt-    cr7,c00000000036cf9c <.schedule+0xb0c>
c00000000036cf94:	38 00 00 00 	li      r0,0
c00000000036cf98:	f8 1b 00 58 	std     r0,88(r27)
c00000000036cf9c:	fa bb 00 68 	std     r21,104(r27)
c00000000036cfa0:	fa bb 00 60 	std     r21,96(r27)
c00000000036cfa4:	e9 3e 80 60 	ld      r9,-32672(r30)
c00000000036cfa8:	60 00 00 00 	nop
c00000000036cfac:	80 09 00 00 	lwz     r0,0(r9)
c00000000036cfb0:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036cfb4:	41 be 00 94 	beq+    cr7,c00000000036d048 <.schedule+0xbb8>
#ifdef CONFIG_SMP

static inline unsigned int task_cpu(const struct task_struct *p)
{
	return task_thread_info(p)->cpu;
c00000000036cfb8:	e9 3b 00 08 	ld      r9,8(r27)
c00000000036cfbc:	e9 7e 80 00 	ld      r11,-32768(r30)
c00000000036cfc0:	81 29 00 10 	lwz     r9,16(r9)
c00000000036cfc4:	79 29 4d a4 	rldicr  r9,r9,9,54
c00000000036cfc8:	7d 29 42 14 	add     r9,r9,r8
c00000000036cfcc:	e8 09 00 30 	ld      r0,48(r9)
c00000000036cfd0:	7d 0b 02 14 	add     r8,r11,r0
c00000000036cfd4:	e8 08 00 50 	ld      r0,80(r8)
c00000000036cfd8:	7f bb 00 00 	cmpd    cr7,r27,r0
c00000000036cfdc:	41 9e 00 20 	beq-    cr7,c00000000036cffc <.schedule+0xb6c>
c00000000036cfe0:	e9 7e 80 68 	ld      r11,-32664(r30)
c00000000036cfe4:	e9 5b 00 b8 	ld      r10,184(r27)
c00000000036cfe8:	e9 3b 00 a0 	ld      r9,160(r27)
c00000000036cfec:	e8 0b 00 00 	ld      r0,0(r11)
c00000000036cff0:	7c 0a 00 50 	subf    r0,r10,r0
c00000000036cff4:	7d 29 02 14 	add     r9,r9,r0
c00000000036cff8:	f9 3b 00 a0 	std     r9,160(r27)
c00000000036cffc:	e8 08 00 50 	ld      r0,80(r8)
c00000000036d000:	7f bf 00 00 	cmpd    cr7,r31,r0
c00000000036d004:	41 9e 00 44 	beq-    cr7,c00000000036d048 <.schedule+0xbb8>
c00000000036d008:	e8 1f 00 c0 	ld      r0,192(r31)
c00000000036d00c:	e9 3e 80 68 	ld      r9,-32664(r30)
c00000000036d010:	39 40 00 00 	li      r10,0
c00000000036d014:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036d018:	e9 29 00 00 	ld      r9,0(r9)
c00000000036d01c:	41 9e 00 08 	beq-    cr7,c00000000036d024 <.schedule+0xb94>
c00000000036d020:	7d 40 48 50 	subf    r10,r0,r9
c00000000036d024:	e8 1f 00 a8 	ld      r0,168(r31)
c00000000036d028:	e9 7f 00 b0 	ld      r11,176(r31)
c00000000036d02c:	f9 3f 00 b8 	std     r9,184(r31)
c00000000036d030:	39 20 00 00 	li      r9,0
c00000000036d034:	f9 3f 00 c0 	std     r9,192(r31)
c00000000036d038:	7c 00 52 14 	add     r0,r0,r10
c00000000036d03c:	39 6b 00 01 	addi    r11,r11,1
c00000000036d040:	f8 1f 00 a8 	std     r0,168(r31)
c00000000036d044:	f9 7f 00 b0 	std     r11,176(r31)
c00000000036d048:	7f bb f8 00 	cmpd    cr7,r27,r31
c00000000036d04c:	41 9e 01 e4 	beq-    cr7,c00000000036d230 <.schedule+0xda0>
c00000000036d050:	fa bf 00 60 	std     r21,96(r31)
c00000000036d054:	fb fa 00 48 	std     r31,72(r26)
c00000000036d058:	60 00 00 00 	nop
c00000000036d05c:	60 00 00 00 	nop
c00000000036d060:	e9 3a 00 28 	ld      r9,40(r26)
c00000000036d064:	39 29 00 01 	addi    r9,r9,1
c00000000036d068:	f9 3a 00 28 	std     r9,40(r26)
c00000000036d06c:	60 00 00 00 	nop
c00000000036d070:	e9 32 00 00 	ld      r9,0(r18)
c00000000036d074:	39 29 00 01 	addi    r9,r9,1
c00000000036d078:	f9 32 00 00 	std     r9,0(r18)
c00000000036d07c:	60 00 00 00 	nop
c00000000036d080:	e8 9f 00 f8 	ld      r4,248(r31)
c00000000036d084:	eb bb 01 00 	ld      r29,256(r27)
c00000000036d088:	2f a4 00 00 	cmpdi   cr7,r4,0
c00000000036d08c:	40 be 00 28 	bne+    cr7,c00000000036d0b4 <.schedule+0xc24>
c00000000036d090:	fb bf 01 00 	std     r29,256(r31)
c00000000036d094:	38 1d 00 54 	addi    r0,r29,84
static __inline__ void atomic_inc(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
c00000000036d098:	60 00 00 00 	nop
c00000000036d09c:	60 00 00 00 	nop
c00000000036d0a0:	7d 20 00 28 	lwarx   r9,0,r0
c00000000036d0a4:	31 29 00 01 	addic   r9,r9,1
c00000000036d0a8:	7d 20 01 2d 	stwcx.  r9,0,r0
c00000000036d0ac:	40 a2 ff f4 	bne-    c00000000036d0a0 <.schedule+0xc10>
c00000000036d0b0:	48 00 00 94 	b       c00000000036d144 <.schedule+0xcb4>
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
c00000000036d0b4:	a1 0d 00 0a 	lhz     r8,10(r13)
/* Non-atomic versions */
static __inline__ int test_bit(unsigned long nr,
			       __const__ volatile unsigned long *addr)
{
	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
c00000000036d0b8:	79 09 d1 82 	rldicl  r9,r8,58,6
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
c00000000036d0bc:	55 0b 06 be 	clrlwi  r11,r8,26
/* Non-atomic versions */
static __inline__ int test_bit(unsigned long nr,
			       __const__ volatile unsigned long *addr)
{
	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
c00000000036d0c0:	79 29 1f 24 	rldicr  r9,r9,3,60
c00000000036d0c4:	7d 29 22 14 	add     r9,r9,r4
c00000000036d0c8:	e8 09 02 b0 	ld      r0,688(r9)
*/
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
c00000000036d0cc:	7c 00 5c 36 	srd     r0,r0,r11
c00000000036d0d0:	78 0a 07 e1 	clrldi. r10,r0,63
c00000000036d0d4:	40 82 00 3c 	bne-    c00000000036d110 <.schedule+0xc80>

static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
	unsigned long old;
	unsigned long mask = BITOP_MASK(nr);
c00000000036d0d8:	7d 00 36 70 	srawi   r0,r8,6
c00000000036d0dc:	7c 00 01 94 	addze   r0,r0
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
			     struct task_struct *tsk)
{
	if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
		cpu_set(smp_processor_id(), next->cpu_vm_mask);
c00000000036d0e0:	39 44 02 b0 	addi    r10,r4,688
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
	unsigned long old;
	unsigned long mask = BITOP_MASK(nr);
	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
c00000000036d0e4:	7c 09 07 b4 	extsw   r9,r0
c00000000036d0e8:	54 00 30 32 	rlwinm  r0,r0,6,0,25
c00000000036d0ec:	79 29 1f 24 	rldicr  r9,r9,3,60
c00000000036d0f0:	7d 00 40 50 	subf    r8,r0,r8
c00000000036d0f4:	38 00 00 01 	li      r0,1
c00000000036d0f8:	7d 6a 4a 14 	add     r11,r10,r9
c00000000036d0fc:	7c 00 40 36 	sld     r0,r0,r8

	__asm__ __volatile__(
c00000000036d100:	7d 00 58 a8 	ldarx   r8,0,r11
c00000000036d104:	7d 08 03 78 	or      r8,r8,r0
c00000000036d108:	7d 00 59 ad 	stdcx.  r8,0,r11
c00000000036d10c:	40 a2 ff f4 	bne-    c00000000036d100 <.schedule+0xc70>
#ifdef CONFIG_PPC_64K_PAGES
	if (prev == next && get_paca()->pgdir == next->pgd)
		return;
#else
	if (prev == next)
c00000000036d110:	7f bd 20 00 	cmpd    cr7,r29,r4
c00000000036d114:	41 9e 00 30 	beq-    cr7,c00000000036d144 <.schedule+0xcb4>
};

static inline int cpu_has_feature(unsigned long feature)
{
	return (CPU_FTRS_ALWAYS & feature) ||
c00000000036d118:	e9 3e 80 c0 	ld      r9,-32576(r30)
c00000000036d11c:	e9 29 00 00 	ld      r9,0(r9)
c00000000036d120:	e8 09 00 10 	ld      r0,16(r9)
c00000000036d124:	78 09 ef e3 	rldicl. r9,r0,61,63
c00000000036d128:	41 82 00 08 	beq-    c00000000036d130 <.schedule+0xca0>
#endif /* CONFIG_PPC_64K_PAGES */

#ifdef CONFIG_ALTIVEC
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		asm volatile ("dssall");
c00000000036d12c:	7e 00 06 6c 	dssall
};

static inline int cpu_has_feature(unsigned long feature)
{
	return (CPU_FTRS_ALWAYS & feature) ||
c00000000036d130:	78 0a 07 e3 	rldicl. r10,r0,32,63
c00000000036d134:	41 82 01 44 	beq-    c00000000036d278 <.schedule+0xde8>
		asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */

	if (cpu_has_feature(CPU_FTR_SLB))
		switch_slb(tsk, next);
c00000000036d138:	7f e3 fb 78 	mr      r3,r31
c00000000036d13c:	4b cc 29 f5 	bl      c00000000002fb30 <.switch_slb>
c00000000036d140:	60 00 00 00 	nop
c00000000036d144:	e8 1b 00 f8 	ld      r0,248(r27)
c00000000036d148:	2f a0 00 00 	cmpdi   cr7,r0,0
c00000000036d14c:	40 be 00 20 	bne+    cr7,c00000000036d16c <.schedule+0xcdc>
c00000000036d150:	f8 1b 01 00 	std     r0,256(r27)
c00000000036d154:	60 00 00 00 	nop
c00000000036d158:	60 00 00 00 	nop
c00000000036d15c:	60 00 00 00 	nop
c00000000036d160:	e8 1a 00 58 	ld      r0,88(r26)
c00000000036d164:	0b 00 00 00 	tdnei   r0,0
c00000000036d168:	fb ba 00 58 	std     r29,88(r26)
c00000000036d16c:	7f e4 fb 78 	mr      r4,r31
c00000000036d170:	7f 63 db 78 	mr      r3,r27
c00000000036d174:	4b ca 1e 11 	bl      c00000000000ef84 <.__switch_to>
c00000000036d178:	60 00 00 00 	nop
c00000000036d17c:	7c 7f 1b 78 	mr      r31,r3
c00000000036d180:	e9 4d 00 30 	ld      r10,48(r13)
c00000000036d184:	e9 3e 80 00 	ld      r9,-32768(r30)
c00000000036d188:	38 00 00 00 	li      r0,0
c00000000036d18c:	7d 69 52 14 	add     r11,r9,r10
c00000000036d190:	e8 6b 00 58 	ld      r3,88(r11)
c00000000036d194:	f8 0b 00 58 	std     r0,88(r11)
c00000000036d198:	60 00 00 00 	nop
c00000000036d19c:	60 00 00 00 	nop
c00000000036d1a0:	60 00 00 00 	nop
c00000000036d1a4:	eb bf 00 18 	ld      r29,24(r31)
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036d1a8:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036d1ac:	7c 0a 49 2e 	stwx    r0,r10,r9
	__asm__ __volatile__("wrteei 1": : :"memory");
#else
	unsigned long msr;
	__asm__ __volatile__("": : :"memory");
	msr = mfmsr();
c00000000036d1b0:	7c 00 00 a6 	mfmsr   r0
	SET_MSR_EE(msr | MSR_EE);
c00000000036d1b4:	60 00 80 00 	ori     r0,r0,32768
c00000000036d1b8:	7c 01 01 64 	mtmsrd  r0,1
c00000000036d1bc:	2f a3 00 00 	cmpdi   cr7,r3,0
c00000000036d1c0:	41 9e 00 30 	beq-    cr7,c00000000036d1f0 <.schedule+0xd60>
static __inline__ int atomic_dec_return(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
c00000000036d1c4:	38 03 00 54 	addi    r0,r3,84
c00000000036d1c8:	7c 20 04 ac 	lwsync
c00000000036d1cc:	7d 00 00 28 	lwarx   r8,0,r0
c00000000036d1d0:	31 08 ff ff 	addic   r8,r8,-1
c00000000036d1d4:	7d 00 01 2d 	stwcx.  r8,0,r0
c00000000036d1d8:	40 a2 ff f4 	bne-    c00000000036d1cc <.schedule+0xd3c>
c00000000036d1dc:	4c 00 01 2c 	isync
/* mmdrop drops the mm and the page tables */
extern void FASTCALL(__mmdrop(struct mm_struct *));
static inline void mmdrop(struct mm_struct * mm)
{
	if (atomic_dec_and_test(&mm->mm_count))
c00000000036d1e0:	2f 88 00 00 	cmpwi   cr7,r8,0
c00000000036d1e4:	40 9e 00 0c 	bne-    cr7,c00000000036d1f0 <.schedule+0xd60>
		__mmdrop(mm);
c00000000036d1e8:	4b ce e1 69 	bl      c00000000005b350 <.__mmdrop>
c00000000036d1ec:	60 00 00 00 	nop
c00000000036d1f0:	7b a0 ef e3 	rldicl. r0,r29,61,63
c00000000036d1f4:	41 a2 00 54 	beq+    c00000000036d248 <.schedule+0xdb8>
static __inline__ int atomic_dec_return(atomic_t *v)
{
	int t;

	__asm__ __volatile__(
c00000000036d1f8:	38 1f 00 10 	addi    r0,r31,16
c00000000036d1fc:	7c 20 04 ac 	lwsync
c00000000036d200:	7d 00 00 28 	lwarx   r8,0,r0
c00000000036d204:	31 08 ff ff 	addic   r8,r8,-1
c00000000036d208:	7d 00 01 2d 	stwcx.  r8,0,r0
c00000000036d20c:	40 a2 ff f4 	bne-    c00000000036d200 <.schedule+0xd70>
c00000000036d210:	4c 00 01 2c 	isync
extern void __put_task_struct_cb(struct rcu_head *rhp);

static inline void put_task_struct(struct task_struct *t)
{
	if (atomic_dec_and_test(&t->usage))
c00000000036d214:	2f 88 00 00 	cmpwi   cr7,r8,0
c00000000036d218:	40 9e 00 30 	bne-    cr7,c00000000036d248 <.schedule+0xdb8>
		call_rcu(&t->rcu, __put_task_struct_cb);
c00000000036d21c:	e8 9e 80 e8 	ld      r4,-32536(r30)
c00000000036d220:	38 7f 07 f0 	addi    r3,r31,2032
c00000000036d224:	4b d0 a0 ad 	bl      c0000000000772d0 <.call_rcu>
c00000000036d228:	60 00 00 00 	nop
c00000000036d22c:	48 00 00 1c 	b       c00000000036d248 <.schedule+0xdb8>
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036d230:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036d234:	38 00 00 00 	li      r0,0
c00000000036d238:	90 1a 00 00 	stw     r0,0(r26)
	__asm__ __volatile__("wrteei 1": : :"memory");
#else
	unsigned long msr;
	__asm__ __volatile__("": : :"memory");
	msr = mfmsr();
c00000000036d23c:	7c 00 00 a6 	mfmsr   r0
	SET_MSR_EE(msr | MSR_EE);
c00000000036d240:	60 00 80 00 	ori     r0,r0,32768
c00000000036d244:	7c 01 01 64 	mtmsrd  r0,1
c00000000036d248:	eb 6d 01 98 	ld      r27,408(r13)
#endif

static inline int reacquire_kernel_lock(struct task_struct *task)
{
	if (unlikely(task->lock_depth >= 0))
c00000000036d24c:	e8 1b 00 2a 	lwa     r0,40(r27)
c00000000036d250:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036d254:	41 bc 00 14 	blt+    cr7,c00000000036d268 <.schedule+0xdd8>
		return_value_on_smp __reacquire_kernel_lock();
c00000000036d258:	48 00 27 8d 	bl      c00000000036f9e4 <.__reacquire_kernel_lock>
c00000000036d25c:	60 00 00 00 	nop
c00000000036d260:	2f 83 00 00 	cmpwi   cr7,r3,0
c00000000036d264:	41 bc f2 f4 	blt-    cr7,c00000000036c558 <.schedule+0xc8>
/* Non-atomic versions */
static __inline__ int test_bit(unsigned long nr,
			       __const__ volatile unsigned long *addr)
{
	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
c00000000036d268:	e8 10 00 80 	ld      r0,128(r16)
c00000000036d26c:	78 08 ef e3 	rldicl. r8,r0,61,63
c00000000036d270:	41 a2 00 18 	beq+    c00000000036d288 <.schedule+0xdf8>
c00000000036d274:	4b ff f2 cc 	b       c00000000036c540 <.schedule+0xb0>

	if (cpu_has_feature(CPU_FTR_SLB))
		switch_slb(tsk, next);
	else
		switch_stab(tsk, next);
c00000000036d278:	7f e3 fb 78 	mr      r3,r31
c00000000036d27c:	4b cc 2f 2d 	bl      c0000000000301a8 <.switch_stab>
c00000000036d280:	60 00 00 00 	nop
c00000000036d284:	4b ff fe c0 	b       c00000000036d144 <.schedule+0xcb4>
c00000000036d288:	38 21 01 10 	addi    r1,r1,272
c00000000036d28c:	e8 01 00 10 	ld      r0,16(r1)
c00000000036d290:	ea 01 ff 80 	ld      r16,-128(r1)
c00000000036d294:	ea 21 ff 88 	ld      r17,-120(r1)
c00000000036d298:	ea 41 ff 90 	ld      r18,-112(r1)
c00000000036d29c:	ea 61 ff 98 	ld      r19,-104(r1)
c00000000036d2a0:	ea 81 ff a0 	ld      r20,-96(r1)
c00000000036d2a4:	ea a1 ff a8 	ld      r21,-88(r1)
c00000000036d2a8:	ea c1 ff b0 	ld      r22,-80(r1)
c00000000036d2ac:	7c 08 03 a6 	mtlr    r0
c00000000036d2b0:	ea e1 ff b8 	ld      r23,-72(r1)
c00000000036d2b4:	eb 01 ff c0 	ld      r24,-64(r1)
c00000000036d2b8:	eb 21 ff c8 	ld      r25,-56(r1)
c00000000036d2bc:	eb 41 ff d0 	ld      r26,-48(r1)
c00000000036d2c0:	eb 61 ff d8 	ld      r27,-40(r1)
c00000000036d2c4:	eb 81 ff e0 	ld      r28,-32(r1)
c00000000036d2c8:	eb a1 ff e8 	ld      r29,-24(r1)
c00000000036d2cc:	eb c1 ff f0 	ld      r30,-16(r1)
c00000000036d2d0:	eb e1 ff f8 	ld      r31,-8(r1)
c00000000036d2d4:	4e 80 00 20 	blr

c00000000036d2d8 <.cond_resched>:
c00000000036d2d8: 7c 08 02 a6 mflr r0
c00000000036d2dc: fb a1 ff e8 std r29,-24(r1)
c00000000036d2e0: fb c1 ff f0 std r30,-16(r1)
c00000000036d2e4: fb e1 ff f8 std r31,-8(r1)
c00000000036d2e8: eb c2 c1 b8 ld r30,-15944(r2)
}
static inline int need_resched(void)
{
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
c00000000036d2ec: 38 60 00 00 li r3,0
c00000000036d2f0: f8 01 00 10 std r0,16(r1)
c00000000036d2f4: f8 21 ff 71 stdu r1,-144(r1)
}


static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
{
	return test_bit(flag,&ti->flags);
c00000000036d2f8:	78 3f 04 64 	rldicr  r31,r1,0,49
/* Non-atomic versions */
static __inline__ int test_bit(unsigned long nr,
			       __const__ volatile unsigned long *addr)
{
	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
c00000000036d2fc:	60 00 00 00 	nop
c00000000036d300:	e8 1f 00 80 	ld      r0,128(r31)
}

static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_bit(flag,&ti->flags);
c00000000036d304: 3b bf 00 80 addi r29,r31,128
}
static inline int need_resched(void)
{
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
c00000000036d308: 78 09 ef e3 rldicl. r9,r0,61,63
c00000000036d30c: 41 a2 00 50 beq+ c00000000036d35c <.cond_resched+0x84>
c00000000036d310: 80 1f 00 14 lwz r0,20(r31)
c00000000036d314: 2f 80 00 00 cmpwi cr7,r0,0
c00000000036d318: 40 9e 00 40 bne- cr7,c00000000036d358 <.cond_resched+0x80>
c00000000036d31c: e9 3e 81 30 ld r9,-32464(r30)
c00000000036d320: 80 09 00 00 lwz r0,0(r9)
c00000000036d324: 2f 80 00 01 cmpwi cr7,r0,1
c00000000036d328: 40 9e 00 30 bne- cr7,c00000000036d358 <.cond_resched+0x80>
c00000000036d32c: 81 3f 00 14 lwz r9,20(r31)
c00000000036d330: 3d 29 10 00 addis r9,r9,4096
c00000000036d334: 91 3f 00 14 stw r9,20(r31)
c00000000036d338: 4b ff f1 59 bl c00000000036c490 <.schedule>
c00000000036d33c: 81 3f 00 14 lwz r9,20(r31)
c00000000036d340: 3d 29 f0 00 addis r9,r9,-4096
c00000000036d344: 91 3f 00 14 stw r9,20(r31)
/* Non-atomic versions */
static __inline__ int test_bit(unsigned long nr,
__const__ volatile unsigned long *addr)
{
return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
c00000000036d348: 60 00 00 00 nop
c00000000036d34c: e8 1d 00 00 ld r0,0(r29)
}
static inline int need_resched(void)
{
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
c00000000036d350: 78 09 ef e3 rldicl. r9,r0,61,63
c00000000036d354: 40 a2 ff d8 bne- c00000000036d32c <.cond_resched+0x54>
c00000000036d358: 38 60 00 01 li r3,1
c00000000036d35c: 38 21 00 90 addi r1,r1,144
c00000000036d360: e8 01 00 10 ld r0,16(r1)
c00000000036d364: eb a1 ff e8 ld r29,-24(r1)
c00000000036d368: eb c1 ff f0 ld r30,-16(r1)
c00000000036d36c: eb e1 ff f8 ld r31,-8(r1)
c00000000036d370: 7c 08 03 a6 mtlr r0
c00000000036d374: 4e 80 00 20 blr


c00000000036d378 <.yield>:
c00000000036d378: e9 2d 01 98 ld r9,408(r13)
c00000000036d37c: 38 00 00 00 li r0,0
c00000000036d380: f8 09 00 00 std r0,0(r9)
c00000000036d384: 7c 00 04 ac sync c00000000036d388: 4b ce 9a 6c b c000000000056df4 <.sys_sched_yield>


c00000000036d38c <.wait_for_completion>:
c00000000036d38c:	7c 08 02 a6 	mflr    r0
c00000000036d390:	fb 81 ff e0 	std     r28,-32(r1)
c00000000036d394:	fb c1 ff f0 	std     r30,-16(r1)
c00000000036d398:	fb e1 ff f8 	std     r31,-8(r1)
c00000000036d39c:	fb a1 ff e8 	std     r29,-24(r1)
c00000000036d3a0:	eb c2 c1 b8 	ld      r30,-15944(r2)
c00000000036d3a4:	f8 01 00 10 	std     r0,16(r1)
c00000000036d3a8:	f8 21 ff 11 	stdu    r1,-240(r1)
c00000000036d3ac:	7c 7f 1b 78 	mr      r31,r3
c00000000036d3b0:	3b 9f 00 08 	addi    r28,r31,8
c00000000036d3b4:	4b ff ff 25 	bl      c00000000036d2d8 <.cond_resched>
c00000000036d3b8:	7f 83 e3 78 	mr      r3,r28
c00000000036d3bc:	48 00 25 05 	bl      c00000000036f8c0 <._spin_lock_irq>
c00000000036d3c0:	60 00 00 00 	nop
c00000000036d3c4:	80 1f 00 00 	lwz     r0,0(r31)
c00000000036d3c8:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036d3cc:	40 9e 00 dc 	bne-    cr7,c00000000036d4a8 <.wait_for_completion+0x11c>
c00000000036d3d0:	3b a1 00 a0 	addi    r29,r1,160
c00000000036d3d4:	38 a0 00 28 	li      r5,40
c00000000036d3d8:	38 80 00 00 	li      r4,0
c00000000036d3dc:	7f a3 eb 78 	mr      r3,r29
c00000000036d3e0:	4b cc 7c 2d 	bl      c00000000003500c <.memset>
c00000000036d3e4:	60 00 00 00 	nop
c00000000036d3e8:	38 81 00 70 	addi    r4,r1,112
c00000000036d3ec:	38 7f 00 10 	addi    r3,r31,16
c00000000036d3f0:	7c bd 04 aa 	lswi    r5,r29,32
c00000000036d3f4:	7c a4 05 aa 	stswi   r5,r4,32
c00000000036d3f8:	e9 5e 81 38 	ld      r10,-32456(r30)
static inline void __list_add(struct list_head *new,
			      struct list_head *prev,
			      struct list_head *next)
{
	next->prev = new;
c00000000036d3fc:	39 61 00 88 	addi    r11,r1,136
c00000000036d400:	e8 01 00 c0 	ld      r0,192(r1)
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
	__list_add(new, head->prev, head);
c00000000036d404:	e9 03 00 08 	ld      r8,8(r3)
c00000000036d408:	f8 01 00 90 	std     r0,144(r1)
c00000000036d40c:	60 00 00 00 	nop
c00000000036d410:	60 00 00 00 	nop
c00000000036d414:	60 00 00 00 	nop
c00000000036d418:	80 01 00 70 	lwz     r0,112(r1)
c00000000036d41c:	60 00 00 01 	ori     r0,r0,1
c00000000036d420:	e9 2d 01 98 	ld      r9,408(r13)
c00000000036d424:	f9 41 00 80 	std     r10,128(r1)
c00000000036d428:	90 01 00 70 	stw     r0,112(r1)
			      struct list_head *prev,
			      struct list_head *next)
{
	next->prev = new;
	new->next = next;
c00000000036d42c:	f8 61 00 88 	std     r3,136(r1)
c00000000036d430:	f9 21 00 78 	std     r9,120(r1)
static inline void __list_add(struct list_head *new,
			      struct list_head *prev,
			      struct list_head *next)
{
	next->prev = new;
c00000000036d434:	f9 63 00 08 	std     r11,8(r3)
	new->next = next;
	new->prev = prev;
	prev->next = new;
c00000000036d438:	f9 68 00 00 	std     r11,0(r8)
c00000000036d43c:	f9 01 00 90 	std     r8,144(r1)
c00000000036d440:	e9 2d 01 98 	ld      r9,408(r13)
c00000000036d444:	38 00 00 02 	li      r0,2
c00000000036d448:	f8 09 00 00 	std     r0,0(r9)
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036d44c:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036d450:	38 00 00 00 	li      r0,0
c00000000036d454:	90 1f 00 08 	stw     r0,8(r31)
	__asm__ __volatile__("wrteei 1": : :"memory");
#else
	unsigned long msr;
	__asm__ __volatile__("": : :"memory");
	msr = mfmsr();
c00000000036d458:	7c 00 00 a6 	mfmsr   r0
	SET_MSR_EE(msr | MSR_EE);
c00000000036d45c:	60 00 80 00 	ori     r0,r0,32768
c00000000036d460:	7c 01 01 64 	mtmsrd  r0,1
c00000000036d464:	4b ff f0 2d 	bl      c00000000036c490 <.schedule>
c00000000036d468:	7f 83 e3 78 	mr      r3,r28
c00000000036d46c:	48 00 24 55 	bl      c00000000036f8c0 <._spin_lock_irq>
c00000000036d470:	60 00 00 00 	nop
c00000000036d474:	80 1f 00 00 	lwz     r0,0(r31)
c00000000036d478:	2f 80 00 00 	cmpwi   cr7,r0,0
c00000000036d47c:	41 9e ff c4 	beq+    cr7,c00000000036d440 <.wait_for_completion+0xb4>
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
	__list_del(entry->prev, entry->next);
c00000000036d480:	e9 41 00 90 	ld      r10,144(r1)
c00000000036d484:	e9 61 00 88 	ld      r11,136(r1)
	entry->next = LIST_POISON1;
c00000000036d488:	3c 00 00 10 	lis     r0,16
	entry->prev = LIST_POISON2;
c00000000036d48c:	3d 20 00 20 	lis     r9,32
c00000000036d490:	60 00 01 00 	ori     r0,r0,256
c00000000036d494:	61 29 02 00 	ori     r9,r9,512
c00000000036d498:	f9 6a 00 00 	std     r11,0(r10)
c00000000036d49c:	f9 4b 00 08 	std     r10,8(r11)
c00000000036d4a0:	f8 01 00 88 	std     r0,136(r1)
c00000000036d4a4:	f9 21 00 90 	std     r9,144(r1)
c00000000036d4a8:	81 3f 00 00 	lwz     r9,0(r31)
c00000000036d4ac:	39 29 ff ff 	addi    r9,r9,-1
c00000000036d4b0:	91 3f 00 00 	stw     r9,0(r31)
}

static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
	__asm__ __volatile__("# __raw_spin_unlock\n\t"
c00000000036d4b4:	7c 20 04 ac 	lwsync
				LWSYNC_ON_SMP: : :"memory");
	lock->slock = 0;
c00000000036d4b8:	38 00 00 00 	li      r0,0
c00000000036d4bc:	90 1f 00 08 	stw     r0,8(r31)
	__asm__ __volatile__("wrteei 1": : :"memory");
#else
	unsigned long msr;
	__asm__ __volatile__("": : :"memory");
	msr = mfmsr();
c00000000036d4c0:	7c 00 00 a6 	mfmsr   r0
	SET_MSR_EE(msr | MSR_EE);
c00000000036d4c4:	60 00 80 00 	ori     r0,r0,32768
c00000000036d4c8:	7c 01 01 64 	mtmsrd  r0,1
c00000000036d4cc:	38 21 00 f0 	addi    r1,r1,240
c00000000036d4d0:	e8 01 00 10 	ld      r0,16(r1)
c00000000036d4d4:	eb 81 ff e0 	ld      r28,-32(r1)
c00000000036d4d8:	eb a1 ff e8 	ld      r29,-24(r1)
c00000000036d4dc:	eb c1 ff f0 	ld      r30,-16(r1)
c00000000036d4e0:	eb e1 ff f8 	ld      r31,-8(r1)
c00000000036d4e4:	7c 08 03 a6 	mtlr    r0
c00000000036d4e8:	4e 80 00 20 	blr




Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]