Is it normal for python to be as fast or faster than C?
by jmgibson1981 from LinuxQuestions.org on (#6JDA4)
I was tinkering with a random number generator in c.
Code:
#include <stdio.h>
#include <jmgeneral.h>
int main()
{
int a = 0;
for (int i = 0; i < 1000000; i++) {
a = random_gen(100);
}
}
// these from shared library
int
ret_nano_secs()
{
int retval = 0;
#ifdef _WIN32
LARGE_INTEGER now;
QueryPerformanceCounter(&now);
retval = (unsigned int) now.QuadPart;
#else
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
retval = (unsigned int) now.tv_nsec;
#endif
return(retval);
}
/*
seeds srand with nanoseconds then returns random within specified range
*/
int
random_gen(int high)
{
// seed rand with current nano second number
srand(ret_nano_secs());
return((unsigned int)rand() % high);
}Running this 1M iterations
Code:time ./a.out
real 0m1.237s
user 0m1.237s
sys 0m0.000sPython however, same iterations with random.
Code:#!/usr/bin/python3
import random
for i in range(1000000):
random.randrange(1, 100)
Code:time ./test.py
real 0m0.603s
user 0m0.603s
sys 0m0.000sWhile I make no claims about the efficiency of my c code I'm quite confused. I wouldn't remotely expect this result. In pure C it's twice as long to run where random is a library written in 1k lines of interpreted python, not compiled. Is this type of result common in all but the most extreme cases?
Code:
#include <stdio.h>
#include <jmgeneral.h>
int main()
{
int a = 0;
for (int i = 0; i < 1000000; i++) {
a = random_gen(100);
}
}
// these from shared library
int
ret_nano_secs()
{
int retval = 0;
#ifdef _WIN32
LARGE_INTEGER now;
QueryPerformanceCounter(&now);
retval = (unsigned int) now.QuadPart;
#else
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
retval = (unsigned int) now.tv_nsec;
#endif
return(retval);
}
/*
seeds srand with nanoseconds then returns random within specified range
*/
int
random_gen(int high)
{
// seed rand with current nano second number
srand(ret_nano_secs());
return((unsigned int)rand() % high);
}Running this 1M iterations
Code:time ./a.out
real 0m1.237s
user 0m1.237s
sys 0m0.000sPython however, same iterations with random.
Code:#!/usr/bin/python3
import random
for i in range(1000000):
random.randrange(1, 100)
Code:time ./test.py
real 0m0.603s
user 0m0.603s
sys 0m0.000sWhile I make no claims about the efficiency of my c code I'm quite confused. I wouldn't remotely expect this result. In pure C it's twice as long to run where random is a library written in 1k lines of interpreted python, not compiled. Is this type of result common in all but the most extreme cases?