Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
K
kernel
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Container Registry
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
redox-os
kernel
Commits
0a72d1cb
Commit
0a72d1cb
authored
8 years ago
by
Jeremy Soller
Committed by
GitHub
8 years ago
Browse files
Options
Downloads
Plain Diff
Merge pull request #11 from pi-pi3/faster-externs
A faster implementation of the memcpy family
parents
56a533fb
5c1e6190
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/externs.rs
+224
-9
224 additions, 9 deletions
src/externs.rs
with
224 additions
and
9 deletions
src/externs.rs
+
224
−
9
View file @
0a72d1cb
/// Memcpy
///
/// Copy N bytes of memory from one location to another.
///
/// This faster implementation works by copying bytes not one-by-one, but in
/// groups of 8 bytes (or 4 bytes in the case of 32-bit architectures).
#[cfg(target_pointer_width
=
"64"
)]
#[no_mangle]
pub
unsafe
extern
fn
memcpy
(
dest
:
*
mut
u8
,
src
:
*
const
u8
,
n
:
usize
)
->
*
mut
u8
{
let
mut
i
=
0
;
let
n_64
:
usize
=
n
/
8
;
// Number of 64-bit groups
let
mut
i
:
usize
=
0
;
// Copy 8 bytes at a time
while
i
<
n_64
{
*
((
dest
as
usize
+
i
)
as
*
mut
u64
)
=
*
((
src
as
usize
+
i
)
as
*
const
u64
);
i
+=
8
;
}
// Copy 1 byte at a time
while
i
<
n
{
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
i
+=
1
;
}
dest
}
// 32-bit version of the function above
#[cfg(target_pointer_width
=
"32"
)]
#[no_mangle]
pub
unsafe
extern
fn
memcpy
(
dest
:
*
mut
u8
,
src
:
*
const
u8
,
n
:
usize
)
->
*
mut
u8
{
let
n_32
:
usize
=
n
/
8
;
// Number of 32-bit groups
let
mut
i
:
usize
=
0
;
// Copy 4 bytes at a time
while
i
<
n_32
{
*
((
dest
as
usize
+
i
)
as
*
mut
u32
)
=
*
((
src
as
usize
+
i
)
as
*
const
u32
);
i
+=
4
;
}
// Copy 1 byte at a time
while
i
<
n
{
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
i
+=
1
;
...
...
@@ -16,19 +54,93 @@ pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8,
/// Memmove
///
/// Copy N bytes of memory from src to dest. The memory areas may overlap.
///
/// This faster implementation works by copying bytes not one-by-one, but in
/// groups of 8 bytes (or 4 bytes in the case of 32-bit architectures).
#[cfg(target_pointer_width
=
"64"
)]
#[no_mangle]
pub
unsafe
extern
fn
memmove
(
dest
:
*
mut
u8
,
src
:
*
const
u8
,
n
:
usize
)
->
*
mut
u8
{
if
src
<
dest
as
*
const
u8
{
let
mut
i
=
n
;
let
n_64
:
usize
=
n
/
8
;
// Number of 64-bit groups
let
mut
i
:
usize
=
n_64
;
// Copy 8 bytes at a time
while
i
!=
0
{
i
-=
8
;
*
((
dest
as
usize
+
i
)
as
*
mut
u64
)
=
*
((
src
as
usize
+
i
)
as
*
const
u64
);
}
let
mut
i
:
usize
=
n
;
// Copy 1 byte at a time
while
i
!=
n_64
*
8
{
i
-=
1
;
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
}
}
else
{
let
n_64
:
usize
=
n
/
8
;
// Number of 64-bit groups
let
mut
i
:
usize
=
0
;
// Copy 8 bytes at a time
while
i
<
n_64
{
*
((
dest
as
usize
+
i
)
as
*
mut
u64
)
=
*
((
src
as
usize
+
i
)
as
*
const
u64
);
i
+=
8
;
}
// Copy 1 byte at a time
while
i
<
n
{
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
i
+=
1
;
}
}
dest
}
// 32-bit version of the function above
#[cfg(target_pointer_width
=
"32"
)]
#[no_mangle]
pub
unsafe
extern
fn
memmove
(
dest
:
*
mut
u8
,
src
:
*
const
u8
,
n
:
usize
)
->
*
mut
u8
{
if
src
<
dest
as
*
const
u8
{
let
n_32
:
usize
=
n
/
4
;
// Number of 32-bit groups
let
mut
i
:
usize
=
n_32
;
// Copy 4 bytes at a time
while
i
!=
0
{
i
-=
4
;
*
((
dest
as
usize
+
i
)
as
*
mut
u32
)
=
*
((
src
as
usize
+
i
)
as
*
const
u32
);
}
let
mut
i
:
usize
=
n
;
// Copy 1 byte at a time
while
i
!=
n_32
*
4
{
i
-=
1
;
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
}
}
else
{
let
mut
i
=
0
;
let
n_32
:
usize
=
n
/
4
;
// Number of 32-bit groups
let
mut
i
:
usize
=
0
;
// Copy 4 bytes at a time
while
i
<
n_32
{
*
((
dest
as
usize
+
i
)
as
*
mut
u32
)
=
*
((
src
as
usize
+
i
)
as
*
const
u32
);
i
+=
4
;
}
// Copy 1 byte at a time
while
i
<
n
{
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
*
((
src
as
usize
+
i
)
as
*
const
u8
);
i
+=
1
;
}
}
...
...
@@ -39,11 +151,55 @@ pub unsafe extern fn memmove(dest: *mut u8, src: *const u8,
/// Memset
///
/// Fill a block of memory with a specified value.
///
/// This faster implementation works by setting bytes not one-by-one, but in
/// groups of 8 bytes (or 4 bytes in the case of 32-bit architectures).
#[cfg(target_pointer_width
=
"64"
)]
#[no_mangle]
pub
unsafe
extern
fn
memset
(
dest
:
*
mut
u8
,
c
:
i32
,
n
:
usize
)
->
*
mut
u8
{
let
mut
i
=
0
;
let
c
=
c
as
u64
;
let
c
=
(
c
<<
56
)
|
(
c
<<
48
)
|
(
c
<<
40
)
|
(
c
<<
32
)
|
(
c
<<
24
)
|
(
c
<<
16
)
|
(
c
<<
8
)
|
c
;
let
n_64
:
usize
=
n
/
8
;
let
mut
i
:
usize
=
0
;
// Set 8 bytes at a time
while
i
<
n_64
{
*
((
dest
as
usize
+
i
)
as
*
mut
u64
)
=
c
;
i
+=
8
;
}
let
c
=
c
as
u8
;
// Set 1 byte at a time
while
i
<
n
{
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
c
as
u8
;
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
c
;
i
+=
1
;
}
dest
}
// 32-bit version of the function above
#[cfg(target_pointer_width
=
"32"
)]
#[no_mangle]
pub
unsafe
extern
fn
memset
(
dest
:
*
mut
u8
,
c
:
i32
,
n
:
usize
)
->
*
mut
u8
{
let
c
=
c
as
u32
;
let
c
=
(
c
<<
24
)
|
(
c
<<
16
)
|
(
c
<<
8
)
|
c
;
let
n_32
:
usize
=
n
/
4
;
let
mut
i
:
usize
=
0
;
// Set 4 bytes at a time
while
i
<
n_32
{
*
((
dest
as
usize
+
i
)
as
*
mut
u32
)
=
c
;
i
+=
4
;
}
let
c
=
c
as
u8
;
// Set 1 byte at a time
while
i
<
n
{
*
((
dest
as
usize
+
i
)
as
*
mut
u8
)
=
c
;
i
+=
1
;
}
...
...
@@ -53,15 +209,74 @@ pub unsafe extern fn memset(dest: *mut u8, c: i32, n: usize) -> *mut u8 {
/// Memcmp
///
/// Compare two blocks of memory.
///
/// This faster implementation works by comparing bytes not one-by-one, but in
/// groups of 8 bytes (or 4 bytes in the case of 32-bit architectures).
#[cfg(target_pointer_width
=
"64"
)]
#[no_mangle]
pub
unsafe
extern
fn
memcmp
(
s1
:
*
const
u8
,
s2
:
*
const
u8
,
n
:
usize
)
->
i32
{
let
mut
i
=
0
;
let
n_64
:
usize
=
n
/
8
;
let
mut
i
:
usize
=
0
;
while
i
<
n_64
{
let
a
=
*
((
s1
as
usize
+
i
)
as
*
const
u64
);
let
b
=
*
((
s2
as
usize
+
i
)
as
*
const
u64
);
if
a
!=
b
{
let
n
:
usize
=
i
+
8
;
// Find the one byte that is not equal
while
i
<
n
{
let
a
=
*
((
s1
as
usize
+
i
)
as
*
const
u8
);
let
b
=
*
((
s2
as
usize
+
i
)
as
*
const
u8
);
if
a
!=
b
{
return
a
as
i32
-
b
as
i32
;
}
i
+=
1
;
}
}
i
+=
8
;
}
while
i
<
n
{
let
a
=
*
((
s1
as
usize
+
i
)
as
*
const
u8
);
let
b
=
*
((
s2
as
usize
+
i
)
as
*
const
u8
);
if
a
!=
b
{
return
a
as
i32
-
b
as
i32
;
}
i
+=
1
;
}
0
}
#[cfg(target_pointer_width
=
"32"
)]
#[no_mangle]
pub
unsafe
extern
fn
memcmp
(
s1
:
*
const
u8
,
s2
:
*
const
u8
,
n
:
usize
)
->
i32
{
let
n_32
:
usize
=
n
/
4
;
let
mut
i
:
usize
=
0
;
while
i
<
n_32
{
let
a
=
*
((
s1
as
usize
+
i
)
as
*
const
u32
);
let
b
=
*
((
s2
as
usize
+
i
)
as
*
const
u32
);
if
a
!=
b
{
let
n
:
usize
=
i
+
4
;
// Find the one byte that is not equal
while
i
<
n
{
let
a
=
*
((
s1
as
usize
+
i
)
as
*
const
u8
);
let
b
=
*
((
s2
as
usize
+
i
)
as
*
const
u8
);
if
a
!=
b
{
return
a
as
i32
-
b
as
i32
;
}
i
+=
1
;
}
}
i
+=
4
;
}
while
i
<
n
{
let
a
=
*
((
s1
as
usize
+
i
)
as
*
const
u8
);
let
b
=
*
((
s2
as
usize
+
i
)
as
*
const
u8
);
if
a
!=
b
{
return
a
as
i32
-
b
as
i32
return
a
as
i32
-
b
as
i32
;
}
i
+=
1
;
}
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment