Linux preempt-rt

Check our new training course

Real-Time Linux with PREEMPT_RT

Check our new training course
with Creative Commons CC-BY-SA
lecture and lab materials

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Unified implementation of memcpy, memmove and the __copy_user backend.
 *
 * Copyright (C) 1998, 1999, 2000, 2001 Ralf Baechle
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 *
 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
 * they're not protected.  In order to keep the exception fixup routine
 * simple all memory accesses in __copy_user to src rsp. dst are stricly
 * incremental.  The fixup routine depends on $at not being changed.
 */
#include <asm/asm.h>
#include <asm/offset.h>
#include <asm/regdef.h>

/*
 * The fixup routine for copy_to_user depends on copying strictly in
 * increasing order.  Gas expands the ulw/usw macros in the wrong order for
 * little endian machines, so we cannot depend on them.
 */
#ifdef __MIPSEB__
#define uswL	swl
#define uswU	swr
#define ulwL	lwl
#define ulwU	lwr
#define usdL	sdl
#define usdU	sdr
#define uldL	ldl
#define uldU	ldr
#endif
#ifdef __MIPSEL__
#define uswL	swr
#define uswU	swl
#define ulwL	lwr
#define ulwU	lwl
#define usdL	sdr
#define usdU	sdl
#define uldL	ldr
#define uldU	ldl
#endif

#define EX(insn,reg,addr,handler)			\
9:	insn	reg, addr;				\
	.section __ex_table,"a"; 			\
	PTR	9b, handler; 				\
	.previous

#define UEX(insn,reg,addr,handler)			\
9:	insn ## L reg, addr;				\
10:	insn ## U reg, 3 + addr;			\
	.section __ex_table,"a"; 			\
	PTR	9b, handler; 				\
	PTR	10b, handler; 				\
	.previous

#define UEXD(insn,reg,addr,handler)			\
9:	insn ## L reg, addr;				\
10:	insn ## U reg, 7 + addr;			\
	.section __ex_table,"a"; 			\
	PTR	9b, handler; 				\
	PTR	10b, handler; 				\
	.previous

/* ascending order, destination aligned  */
#define MOVE_BIGGERCHUNK(src, dst, offset, t0, t1, t2, t3) \
	EX(ld, t0, (offset + 0x00)(src), l_fixup); \
	EX(ld, t1, (offset + 0x08)(src), l_fixup); \
	EX(ld, t2, (offset + 0x10)(src), l_fixup); \
	EX(ld, t3, (offset + 0x18)(src), l_fixup); \
	EX(sd, t0, (offset + 0x00)(dst), s_fixup); \
	EX(sd, t1, (offset + 0x08)(dst), s_fixup); \
	EX(sd, t2, (offset + 0x10)(dst), s_fixup); \
	EX(sd, t3, (offset + 0x18)(dst), s_fixup); \
	EX(ld, t0, (offset + 0x20)(src), l_fixup); \
	EX(ld, t1, (offset + 0x28)(src), l_fixup); \
	EX(ld, t2, (offset + 0x30)(src), l_fixup); \
	EX(ld, t3, (offset + 0x38)(src), l_fixup); \
	EX(sd, t0, (offset + 0x20)(dst), s_fixup); \
	EX(sd, t1, (offset + 0x28)(dst), s_fixup); \
	EX(sd, t2, (offset + 0x30)(dst), s_fixup); \
	EX(sd, t3, (offset + 0x38)(dst), s_fixup)

/* ascending order, destination aligned  */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
	EX(lw, t0, (offset + 0x00)(src), l_fixup); \
	EX(lw, t1, (offset + 0x04)(src), l_fixup); \
	EX(lw, t2, (offset + 0x08)(src), l_fixup); \
	EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
	EX(sw, t0, (offset + 0x00)(dst), s_fixup); \
	EX(sw, t1, (offset + 0x04)(dst), s_fixup); \
	EX(sw, t2, (offset + 0x08)(dst), s_fixup); \
	EX(sw, t3, (offset + 0x0c)(dst), s_fixup); \
	EX(lw, t0, (offset + 0x10)(src), l_fixup); \
	EX(lw, t1, (offset + 0x14)(src), l_fixup); \
	EX(lw, t2, (offset + 0x18)(src), l_fixup); \
	EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
	EX(sw, t0, (offset + 0x10)(dst), s_fixup); \
	EX(sw, t1, (offset + 0x14)(dst), s_fixup); \
	EX(sw, t2, (offset + 0x18)(dst), s_fixup); \
	EX(sw, t3, (offset + 0x1c)(dst), s_fixup)

/* ascending order, destination unaligned  */
#define UMOVE_BIGGERCHUNK(src, dst, offset, t0, t1, t2, t3) \
	EX(ld, t0, (offset + 0x00)(src), l_fixup); \
	EX(ld, t1, (offset + 0x08)(src), l_fixup); \
	EX(ld, t2, (offset + 0x10)(src), l_fixup); \
	EX(ld, t3, (offset + 0x18)(src), l_fixup); \
	UEXD(usd, t0, (offset + 0x00)(dst), s_fixup); \
	UEXD(usd, t1, (offset + 0x08)(dst), s_fixup); \
	UEXD(usd, t2, (offset + 0x10)(dst), s_fixup); \
	UEXD(usd, t3, (offset + 0x18)(dst), s_fixup); \
	EX(ld, t0, (offset + 0x20)(src), l_fixup); \
	EX(ld, t1, (offset + 0x28)(src), l_fixup); \
	EX(ld, t2, (offset + 0x30)(src), l_fixup); \
	EX(ld, t3, (offset + 0x38)(src), l_fixup); \
	UEXD(usd, t0, (offset + 0x20)(dst), s_fixup); \
	UEXD(usd, t1, (offset + 0x28)(dst), s_fixup); \
	UEXD(usd, t2, (offset + 0x30)(dst), s_fixup); \
	UEXD(usd, t3, (offset + 0x38)(dst), s_fixup)

/* ascending order, destination unaligned  */
#define UMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
	EX(lw, t0, (offset + 0x00)(src), l_fixup); \
	EX(lw, t1, (offset + 0x04)(src), l_fixup); \
	EX(lw, t2, (offset + 0x08)(src), l_fixup); \
	EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
	UEX(usw, t0, (offset + 0x00)(dst), s_fixup); \
	UEX(usw, t1, (offset + 0x04)(dst), s_fixup); \
	UEX(usw, t2, (offset + 0x08)(dst), s_fixup); \
	UEX(usw, t3, (offset + 0x0c)(dst), s_fixup); \
	EX(lw, t0, (offset + 0x10)(src), l_fixup); \
	EX(lw, t1, (offset + 0x14)(src), l_fixup); \
	EX(lw, t2, (offset + 0x18)(src), l_fixup); \
	EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
	UEX(usw, t0, (offset + 0x10)(dst), s_fixup); \
	UEX(usw, t1, (offset + 0x14)(dst), s_fixup); \
	UEX(usw, t2, (offset + 0x18)(dst), s_fixup); \
	UEX(usw, t3, (offset + 0x1c)(dst), s_fixup)

	.text
	.set	noreorder
	.set	noat

	.align	5
LEAF(memcpy)					/* a0=dst a1=src a2=len */
	move	v0, a0				/* return value */
__memcpy:
FEXPORT(__copy_user)
	xor	ta0, a0, a1
	andi	ta0, ta0, 0x7
	move	t3, a0
	beqz	ta0, can_align
	 sltiu	t8, a2, 0x8

	b	memcpy_u_src			# bad alignment
	 move	ta2, a2

can_align:
	bnez	t8, small_memcpy		# < 8 bytes to copy
	 move	ta2, a2

	beqz	a2, out
	 andi	t8, a1, 0x1

hword_align:
	beqz	t8, word_align
	 andi	t8, a1, 0x2

	EX(lb, ta0, (a1), l_fixup)
	dsubu	a2, a2, 0x1
	EX(sb, ta0, (a0), s_fixup)
	daddu	a1, a1, 0x1
	daddu	a0, a0, 0x1
	andi	t8, a1, 0x2

word_align:
	beqz	t8, dword_align
	 sltiu	t8, a2, 56

	EX(lh, ta0, (a1), l_fixup)
	dsubu	a2, a2, 0x2
	EX(sh, ta0, (a0), s_fixup)
	sltiu	t8, a2, 56
	daddu	a0, a0, 0x2
	daddu	a1, a1, 0x2

dword_align:
	bnez	t8, do_end_words
	 move	t8, a2

	andi	t8, a1, 0x4
	beqz	t8, qword_align
	 andi	t8, a1, 0x8

	EX(lw, ta0, 0x00(a1), l_fixup)
	dsubu	a2, a2, 0x4
	EX(sw, ta0, 0x00(a0), s_fixup)
	daddu	a1, a1, 0x4
	daddu	a0, a0, 0x4
	andi	t8, a1, 0x8

qword_align:
	beqz	t8, oword_align
	 andi	t8, a1, 0x10

	EX(lw, ta0, 0x00(a1), l_fixup)
	EX(lw, ta1, 0x04(a1), l_fixup)
	dsubu	a2, a2, 0x8
	EX(sw, ta0, 0x00(a0), s_fixup)
	EX(sw, ta1, 0x04(a0), s_fixup)
	daddu	a1, a1, 0x8
	andi	t8, a1, 0x10
	daddu	a0, a0, 0x8

oword_align:
	beqz	t8, begin_movement
	 srl	t8, a2, 0x7

	EX(lw, ta3, 0x00(a1), l_fixup)
	EX(lw, t0, 0x04(a1), l_fixup)
	EX(lw, ta0, 0x08(a1), l_fixup)
	EX(lw, ta1, 0x0c(a1), l_fixup)
	EX(sw, ta3, 0x00(a0), s_fixup)
	EX(sw, t0, 0x04(a0), s_fixup)
	EX(sw, ta0, 0x08(a0), s_fixup)
	EX(sw, ta1, 0x0c(a0), s_fixup)
	dsubu	a2, a2, 0x10
	daddu	a1, a1, 0x10
	srl	t8, a2, 0x7
	daddu	a0, a0, 0x10

begin_movement:
	beqz	t8, 0f
	 andi	ta2, a2, 0x40

move_128bytes:
	PREF	(0, 2*128(a0))
	PREF	(1, 2*128(a1))
	MOVE_BIGGERCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	MOVE_BIGGERCHUNK(a1, a0, 0x40, ta0, ta1, ta3, t0)
	dsubu	t8, t8, 0x01
	daddu	a1, a1, 0x80
	bnez	t8, move_128bytes
	 daddu	a0, a0, 0x80

0:
	beqz	ta2, 1f
	 andi	ta2, a2, 0x20

move_64bytes:
	MOVE_BIGGERCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	daddu	a1, a1, 0x40
	daddu	a0, a0, 0x40

1:
	beqz	ta2, do_end_words
	 andi	t8, a2, 0x1c

move_32bytes:
	MOVE_BIGCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	andi	t8, a2, 0x1c
	daddu	a1, a1, 0x20
	daddu	a0, a0, 0x20

do_end_words:
	beqz	t8, maybe_end_cruft
	 srl	t8, t8, 0x2

end_words:
	EX(lw, ta0, (a1), l_fixup)
	dsubu	t8, t8, 0x1
	EX(sw, ta0, (a0), s_fixup)
	daddu	a1, a1, 0x4
	bnez	t8, end_words
	 daddu	a0, a0, 0x4

maybe_end_cruft:
	andi	ta2, a2, 0x3

small_memcpy:
	beqz	ta2, out
	 move	a2, ta2

end_bytes:
	EX(lb, ta0, (a1), l_fixup)
	dsubu	a2, a2, 0x1
	EX(sb, ta0, (a0), s_fixup)
	daddu	a1, a1, 0x1
	bnez	a2, end_bytes
	 daddu	a0, a0, 0x1

out:	jr	ra
	 move	a2, zero

/* ------------------------------------------------------------------------- */

/* Bad, bad.  At least try to align the source  */

memcpy_u_src:
	bnez	t8, small_memcpy		# < 8 bytes?
	 move	ta2, a2

	daddiu	ta0, a1, 7			# ta0: how much to align
	ori	ta0, 7
	xori	ta0, 7
	dsubu	ta0, a1

	UEXD(uld, ta1, 0(a1), l_fixup)		# dword alignment
	UEXD(usd, ta1, 0(a0), s_fixup)

	daddu	a1, ta0				# src
	daddu	a0, ta0				# dst
	dsubu	a2, ta0				# len

	sltiu	t8, a2, 56
	bnez	t8, u_do_end_words
	 andi	t8, a2, 0x3c

	andi	t8, a1, 8			# now qword aligned?

u_qword_align:
	beqz	t8, u_oword_align
	 andi	t8, a1, 0x10

	EX(ld, ta0, 0x00(a1), l_fixup)
	dsubu	a2, a2, 0x8
	UEXD(usd, ta0, 0x00(a0), s_fixup)
	daddu	a1, a1, 0x8
	andi	t8, a1, 0x10
	daddu	a0, a0, 0x8

u_oword_align:
	beqz	t8, u_begin_movement
	 srl	t8, a2, 0x7

	EX(lw, ta3, 0x08(a1), l_fixup)
	EX(lw, t0, 0x0c(a1), l_fixup)
	EX(lw, ta0, 0x00(a1), l_fixup)
	EX(lw, ta1, 0x04(a1), l_fixup)
	UEX(usw, ta3, 0x08(a0), s_fixup)
	UEX(usw, t0, 0x0c(a0), s_fixup)
	UEX(usw, ta0, 0x00(a0), s_fixup)
	UEX(usw, ta1, 0x04(a0), s_fixup)
	dsubu	a2, a2, 0x10
	daddu	a1, a1, 0x10
	srl	t8, a2, 0x7
	daddu	a0, a0, 0x10

u_begin_movement:
	beqz	t8, 0f
	 andi	ta2, a2, 0x40

u_move_128bytes:
	UMOVE_BIGGERCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	UMOVE_BIGGERCHUNK(a1, a0, 0x40, ta0, ta1, ta3, t0)
	dsubu	t8, t8, 0x01
	daddu	a1, a1, 0x80
	bnez	t8, u_move_128bytes
	 daddu	a0, a0, 0x80

0:
	beqz	ta2, 1f
	 andi	ta2, a2, 0x20

u_move_64bytes:
	UMOVE_BIGGERCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	daddu	a1, a1, 0x40
	daddu	a0, a0, 0x40

1:
	beqz	ta2, u_do_end_words
	 andi	t8, a2, 0x1c

u_move_32bytes:
	UMOVE_BIGCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	andi	t8, a2, 0x1c
	daddu	a1, a1, 0x20
	daddu	a0, a0, 0x20

u_do_end_words:
	beqz	t8, u_maybe_end_cruft
	 srl	t8, t8, 0x2

u_end_words:
	EX(lw, ta0, 0x00(a1), l_fixup)
	dsubu	t8, t8, 0x1
	UEX(usw, ta0, 0x00(a0), s_fixup)
	daddu	a1, a1, 0x4
	bnez	t8, u_end_words
	 daddu	a0, a0, 0x4

u_maybe_end_cruft:
	andi	ta2, a2, 0x3

u_cannot_optimize:
	beqz	ta2, out
	 move	a2, ta2

u_end_bytes:
	EX(lb, ta0, (a1), l_fixup)
	dsubu	a2, a2, 0x1
	EX(sb, ta0, (a0), s_fixup)
	daddu	a1, a1, 0x1
	bnez	a2, u_end_bytes
	 daddu	a0, a0, 0x1

	jr	ra
	 move	a2, zero
	END(memcpy)

/* descending order, destination aligned  */
#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
	lw	t0, (offset + 0x10)(src); \
	lw	t1, (offset + 0x14)(src); \
	lw	t2, (offset + 0x18)(src); \
	lw	t3, (offset + 0x1c)(src); \
	sw	t0, (offset + 0x10)(dst); \
	sw	t1, (offset + 0x14)(dst); \
	sw	t2, (offset + 0x18)(dst); \
	sw	t3, (offset + 0x1c)(dst); \
	lw	t0, (offset + 0x00)(src); \
	lw	t1, (offset + 0x04)(src); \
	lw	t2, (offset + 0x08)(src); \
	lw	t3, (offset + 0x0c)(src); \
	sw	t0, (offset + 0x00)(dst); \
	sw	t1, (offset + 0x04)(dst); \
	sw	t2, (offset + 0x08)(dst); \
	sw	t3, (offset + 0x0c)(dst)

/* descending order, destination ununaligned  */
#define RUMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
	lw	t0, (offset + 0x10)(src); \
	lw	t1, (offset + 0x14)(src); \
	lw	t2, (offset + 0x18)(src); \
	lw	t3, (offset + 0x1c)(src); \
	usw	t0, (offset + 0x10)(dst); \
	usw	t1, (offset + 0x14)(dst); \
	usw	t2, (offset + 0x18)(dst); \
	usw	t3, (offset + 0x1c)(dst); \
	lw	t0, (offset + 0x00)(src); \
	lw	t1, (offset + 0x04)(src); \
	lw	t2, (offset + 0x08)(src); \
	lw	t3, (offset + 0x0c)(src); \
	usw	t0, (offset + 0x00)(dst); \
	usw	t1, (offset + 0x04)(dst); \
	usw	t2, (offset + 0x08)(dst); \
	usw	t3, (offset + 0x0c)(dst)

	.align	5
LEAF(memmove)
	daddu	t0, a0, a2
	sltu	t0, a1, t0			# dst + len <= src -> memcpy
	daddu	t1, a1, a2
	sltu	t1, a0, t1			# dst >= src + len -> memcpy
	and	t0, t1
	beqz	t0, __memcpy

	 move	v0, a0				/* return value */
	beqz	a2, r_out
	END(memmove)

LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */
	sltu	t0, a1, a0
	beqz	t0, r_end_bytes_up		# src >= dst
	 nop
	daddu	a0, a2				# dst = dst + len
	daddu	a1, a2				# src = src + len

#if 0 /* Horror fix */
	xor	ta0, a0, a1
	andi	ta0, ta0, 0x3
	move	t3, a0
	beqz	ta0, r_can_align
	 sltiu	t8, a2, 0x8

	b	r_memcpy_u_src			# bad alignment
	 move	ta2, a2

r_can_align:
	bnez	t8, r_small_memcpy		# < 8 bytes to copy
	 move	ta2, a2

	beqz	a2, r_out
	 andi	t8, a1, 0x1

r_hword_align:
	beqz	t8, r_word_align
	 andi	t8, a1, 0x2

	lb	ta0, -1(a1)
	dsubu	a2, a2, 0x1
	sb	ta0, -1(a0)
	dsubu	a1, a1, 0x1
	dsubu	a0, a0, 0x1
	andi	t8, a1, 0x2

r_word_align:
	beqz	t8, r_dword_align
	 sltiu	t8, a2, 56
	
	lh	ta0, -2(a1)
	dsubu	a2, a2, 0x2
	sh	ta0, -2(a0)
	sltiu	t8, a2, 56
	dsubu	a0, a0, 0x2
	dsubu	a1, a1, 0x2

r_dword_align:
	bnez	t8, r_do_end_words
	 move	t8, a2

	andi	t8, a1, 0x4
	beqz	t8, r_qword_align
	 andi	t8, a1, 0x8

	lw	ta0, -4(a1)
	dsubu	a2, a2, 0x4
	sw	ta0, -4(a0)
	dsubu	a1, a1, 0x4
	dsubu	a0, a0, 0x4
	andi	t8, a1, 0x8

r_qword_align:
	beqz	t8, r_oword_align
	 andi	t8, a1, 0x10

	dsubu	a1, a1, 0x8
	lw	ta0, 0x04(a1)
	lw	ta1, 0x00(a1)
	dsubu	a0, a0, 0x8
	sw	ta0, 0x04(a0)
	sw	ta1, 0x00(a0)
	dsubu	a2, a2, 0x8

	andi	t8, a1, 0x10

r_oword_align:
	beqz	t8, r_begin_movement
	 srl	t8, a2, 0x7

	dsubu	a1, a1, 0x10
	lw	ta3, 0x08(a1)			# assumes subblock ordering
	lw	t0, 0x0c(a1)
	lw	ta0, 0x00(a1)
	lw	ta1, 0x04(a1)
	dsubu	a0, a0, 0x10
	sw	ta3, 0x08(a0)
	sw	t0, 0x0c(a0)
	sw	ta0, 0x00(a0)
	sw	ta1, 0x04(a0)
	dsubu	a2, a2, 0x10
	srl	t8, a2, 0x7

r_begin_movement:
	beqz	t8, 0f
	 andi	ta2, a2, 0x40

r_move_128bytes:
	RMOVE_BIGCHUNK(a1, a0, -0x80, ta0, ta1, ta3, t0)
	RMOVE_BIGCHUNK(a1, a0, -0x60, ta0, ta1, ta3, t0)
	RMOVE_BIGCHUNK(a1, a0, -0x40, ta0, ta1, ta3, t0)
	RMOVE_BIGCHUNK(a1, a0, -0x20, ta0, ta1, ta3, t0)
	dsubu	t8, t8, 0x01
	dsubu	a1, a1, 0x80
	bnez	t8, r_move_128bytes
	 dsubu	a0, a0, 0x80

0:
	beqz	ta2, 1f
	 andi	ta2, a2, 0x20

r_move_64bytes:
	dsubu	a1, a1, 0x40
	dsubu	a0, a0, 0x40
	RMOVE_BIGCHUNK(a1, a0, 0x20, ta0, ta1, ta3, t0)
	RMOVE_BIGCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)

1:
	beqz	ta2, r_do_end_words
	 andi	t8, a2, 0x1c

r_move_32bytes:
	dsubu	a1, a1, 0x20
	dsubu	a0, a0, 0x20
	RMOVE_BIGCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	andi	t8, a2, 0x1c

r_do_end_words:
	beqz	t8, r_maybe_end_cruft
	 srl	t8, t8, 0x2

r_end_words:
	lw	ta0, -4(a1)
	dsubu	t8, t8, 0x1
	sw	ta0, -4(a0)
	dsubu	a1, a1, 0x4
	bnez	t8, r_end_words
	 dsubu	a0, a0, 0x4

r_maybe_end_cruft:
	andi	ta2, a2, 0x3

r_small_memcpy:
	beqz	ta2, r_out
	 move	a2, ta2
#endif /* Horror fix */

r_end_bytes:
	lb	ta0, -1(a1)
	dsubu	a2, a2, 0x1
	sb	ta0, -1(a0)
	dsubu	a1, a1, 0x1
	bnez	a2, r_end_bytes
	 dsubu	a0, a0, 0x1

r_out:
	jr      ra
	 move   a2, zero

r_end_bytes_up:
	lb	t0, (a1)
	dsubu	a2, a2, 0x1
	sb	t0, (a0)
	daddu	a1, a1, 0x1
	bnez	a2, r_end_bytes_up
	 daddu	a0, a0, 0x1

	jr	ra
	 move	a2, zero

#if 0 /* Horror fix */
/* ------------------------------------------------------------------------- */

/* Bad, bad.  At least try to align the source  */

r_memcpy_u_src:
	bnez	t8, r_small_memcpy		# < 8 bytes?
	 move	ta2, a2

	andi	ta0, a1, 7			# ta0: how much to align

	ulw	ta1, -8(a1)			# dword alignment
	ulw	ta2, -4(a1)
	usw	ta1, -8(a0)
	usw	ta2, -4(a0)

	dsubu	a1, ta0				# src
	dsubu	a0, ta0				# dst
	dsubu	a2, ta0				# len

	sltiu	t8, a2, 56
	bnez	t8, ru_do_end_words
	 andi	t8, a2, 0x3c

	andi	t8, a1, 8			# now qword aligned?

ru_qword_align:
	beqz	t8, ru_oword_align
	 andi	t8, a1, 0x10

	dsubu	a1, a1, 0x8
	lw	ta0, 0x00(a1)
	lw	ta1, 0x04(a1)
	dsubu	a0, a0, 0x8
	usw	ta0, 0x00(a0)
	usw	ta1, 0x04(a0)
	dsubu	a2, a2, 0x8

	andi	t8, a1, 0x10

ru_oword_align:
	beqz	t8, ru_begin_movement
	 srl	t8, a2, 0x7

	dsubu	a1, a1, 0x10
	lw	ta3, 0x08(a1)			# assumes subblock ordering
	lw	t0, 0x0c(a1)
	lw	ta0, 0x00(a1)
	lw	ta1, 0x04(a1)
	dsubu	a0, a0, 0x10
	usw	ta3, 0x08(a0)
	usw	t0, 0x0c(a0)
	usw	ta0, 0x00(a0)
	usw	ta1, 0x04(a0)
	dsubu	a2, a2, 0x10

	srl	t8, a2, 0x7

ru_begin_movement:
	beqz	t8, 0f
	 andi	ta2, a2, 0x40

ru_move_128bytes:
	RUMOVE_BIGCHUNK(a1, a0, -0x80, ta0, ta1, ta3, t0)
	RUMOVE_BIGCHUNK(a1, a0, -0x60, ta0, ta1, ta3, t0)
	RUMOVE_BIGCHUNK(a1, a0, -0x40, ta0, ta1, ta3, t0)
	RUMOVE_BIGCHUNK(a1, a0, -0x20, ta0, ta1, ta3, t0)
	dsubu	t8, t8, 0x01
	dsubu	a1, a1, 0x80
	bnez	t8, ru_move_128bytes
	 dsubu	a0, a0, 0x80

0:
	beqz	ta2, 1f
	 andi	ta2, a2, 0x20

ru_move_64bytes:
	dsubu	a1, a1, 0x40
	dsubu	a0, a0, 0x40
	RUMOVE_BIGCHUNK(a1, a0, 0x20, ta0, ta1, ta3, t0)
	RUMOVE_BIGCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)

1:
	beqz	ta2, ru_do_end_words
	 andi	t8, a2, 0x1c

ru_move_32bytes:
	dsubu	a1, a1, 0x20
	dsubu	a0, a0, 0x20
	RUMOVE_BIGCHUNK(a1, a0, 0x00, ta0, ta1, ta3, t0)
	andi	t8, a2, 0x1c

ru_do_end_words:
	beqz	t8, ru_maybe_end_cruft
	 srl	t8, t8, 0x2

ru_end_words:
	lw	ta0, -4(a1)
	usw	ta0, -4(a0)
	dsubu	t8, t8, 0x1
	dsubu	a1, a1, 0x4
	bnez	t8, ru_end_words
	 dsubu	a0, a0, 0x4

ru_maybe_end_cruft:
	andi	ta2, a2, 0x3

ru_cannot_optimize:
	beqz	ta2, r_out
	 move	a2, ta2

ru_end_bytes:
	lb	ta0, -1(a1)
	dsubu	a2, a2, 0x1
	sb	ta0, -1(a0)
	dsubu	a1, a1, 0x1
	bnez	a2, ru_end_bytes
	 dsubu	a0, a0, 0x1

	jr	ra
	 move	a2, zero
#endif /* Horror fix */
	END(__rmemcpy)

l_fixup:					# clear the rest of the buffer
	ld	ta0, THREAD_BUADDR($28)
	 nop
	dsubu	a2, AT, ta0			# a2 bytes to go
	daddu	a0, ta0				# compute start address in a1
	dsubu	a0, a1
	j	__bzero
	 move	a1, zero

s_fixup:
	jr	ra
	 nop