Comments (5)
Hi @stebos100 yeah a full file that we can run to reproduce would be helpful here (and ideally an enzyme.mit.edu/explorer link) .
from enzyme.
And yes Enzyme can deal with intermediate functions which return void, as well as recursive functions -- so seeing the full case and expected results would be helpful.
It would be also helpful to know what version of Enzyme and LLVM you are using.
from enzyme.
Hi @wsmoses , thanks for checking up, I have altered the script slightly to get the point across.
I am using LLVM v16, and Enzyme v0.0.103.
It seems that when I use a recursive function within a void function Enzyme produces the incorrect derivative result. And when we perform the same function body on CPU vs GPU different results are produced. Please see the script below and please feel free to ask any questions if they are needed.
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <iostream>
#include <cstdio>
#include <stdlib.h>
#include <cmath>
#include <device_launch_parameters.h>
#include <random>
int __device__ enzyme_dup;
int __device__ enzyme_out;
int __device__ enzyme_const;
int __device__ enzyme_dupnoneed;
void __enzyme_autodiff(...);
template<typename RT, typename ... Args>
RT __enzyme_autodiff(void*, Args ...);
#define N 12
inline
cudaError_t cudaCheck(cudaError_t result) {
if(result != cudaSuccess){
fprintf(stderr, "CUDA runtime error: %s \n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
inline
__host__ __device__ void gbmSimulation(double* short_rate, double* gbm , double* rand, int maturity) {
int size = maturity;
double dt = 1.0/12.0;
double s = 1.15;
double vol = 0.02;
gbm[0] = s;
for (int i = 1; i < size; i++){
gbm[i] = gbm[i-1] * exp((short_rate[i] - 0.5*vol*vol)*dt + vol*sqrt(dt)*rand[i]);
}
}
inline
__host__ __device__ double sumtil( double* vec, double* gbms, double* randomNumbers, int size) {
double ret = 0.0;
double volatility = 0.023;
double dt = 1.0/12.0;
double s = 1.15;
gbms[0] = s;
//the following line of code, if called and used in the calculations that follow produce the incorrect derivative result
//when compared with the finite difference approximation
gbmSimulation(vec,gbms,randomNumbers, size);
for (int i = 0; i < size; i++) {
ret += gbms[i];
}
//the following commented section highlights the second issue (please comment out the void function call as well as the previous for loop),
// which is that the CPU and GPU implementations produce different results with the CPU based implementation producing the correct result when compared with the FDA
//for (int i = 1; i < size; i++) {
//correct CPU derivative result, the GPU implementation however does not !!
//gbms[i] = gbms[i - 1] * exp((vec[i] - 0.5 * volatility * volatility) * dt + volatility * (0.5 * dt) * randomNumbers[i]);
//ret += gbms[i];
//}
return ret;
}
__host__ __device__ double sumtilFda(double* vec, double* gbms, double* randomNumber, int size, int index) {
double bump = 0.000000085;
vec[index] += bump;
double upVal = sumtil(vec, gbms, randomNumber, size);
vec[index] -= 2*bump;
double downVal = sumtil(vec, gbms, randomNumber, size);
double sensi = (upVal - downVal)/(2*bump);
vec[index] += bump;
return sensi;
}
typedef double (*f_ptr)(double*, double*, double*, int);
extern void __device__ __enzyme_autodiffCuda(f_ptr,
int, double*, double*,
int, double*,
int, double*,
int, int
);
__global__ void computeEnzymeGrad( double* d_vec, double* d_vec_res, double* d_gbms, double* d_randomNumbers, int maturity) {
__enzyme_autodiffCuda(sumtil,enzyme_dup, d_vec, d_vec_res, enzyme_const, d_gbms, enzyme_const,d_randomNumbers, enzyme_const, maturity);
}
int main() {
std::random_device rd{};
std::mt19937 gen(42);
std::normal_distribution<double> norm(5.15, 2.85);
std::normal_distribution<double> normTwo(1.15, 0.65);
std::normal_distribution<double> normThree(5.0, 3.0);
size_t bytes = N*sizeof(double);
double *vec = (double*)malloc(bytes);
double *gbms = (double*)malloc(bytes);
double *rands = (double*)malloc(bytes);
double *results_x = (double*)malloc(bytes);
double *device_vec, *device_gbms, *device_rands, *device_der_vec;
cudaCheck(cudaMalloc(&device_vec, bytes));
cudaCheck(cudaMalloc(&device_gbms, bytes));
cudaCheck(cudaMalloc(&device_rands, bytes));
cudaCheck(cudaMalloc(&device_der_vec, bytes));
for (int i = 0; i < N; i++){
vec[i] = norm(gen);
gbms[i] = 0.0;
rands[i] = normThree(gen);
results_x[i] = 0.0;
}
int n = N;
cudaCheck(cudaMemcpy(device_vec, vec, bytes, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(device_gbms, gbms, bytes, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(device_rands, rands, bytes, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(device_der_vec, results_x, bytes, cudaMemcpyHostToDevice));
computeEnzymeGrad<<<1,1>>>(device_vec, device_der_vec, device_gbms, device_rands,n);
cudaCheck(cudaDeviceSynchronize());
cudaCheck(cudaMemcpy(results_x, device_der_vec, bytes, cudaMemcpyDeviceToHost));
//=================================================== FDA & HOST CHECK ====================================================
double* fdaResults = (double*)malloc(bytes);
double* EnzymeHost = (double*)malloc(bytes);
for (int i = 0; i < N; i++) {
fdaResults[i] = 0.0;
}
for (int i = 0; i < N; i++) {
fdaResults[i] = sumtilFda(vec, gbms, rands, n, i);
}
__enzyme_autodiff((void*)sumtil,enzyme_dup, vec, EnzymeHost, enzyme_const, gbms, enzyme_const, rands, enzyme_const, n);
for (int i = 0; i < N; i++) {
printf("\nx[%d]='%f';\n", i, vec[i]);
printf("FDA for grad_x[%d]='%.18f';\n", i, fdaResults[i]);
printf("AAD CUDA Enzyme for grad_x[%d]='%.18f';\n", i, results_x[i]);
printf("AAD Enzyme Host for grad_x[%d]='%.18f';\n", i, EnzymeHost[i]);
}
free(vec);
free(gbms);
free(rands);
free(results_x);
free(fdaResults);
cudaFree(device_vec);
cudaFree(device_gbms);
cudaFree(device_rands);
cudaFree(device_der_vec);
}
//=======================================================================================================================
from enzyme.
Ah since you're on GPU my guess is that you may run out of device memory for the caches (and cuda throw an error not being caught).
To reduce unnecessary caching can you add restrict on all pointers arguments of the function you're autodiffing (assuming that they point to different locations in memory)?
If that doen't resolve I'll take a closer look and find a GPU machine to test on.
from enzyme.
HI @wsmoses, even after adding the restrict qualifier it produces the incorrect derivative result on the GPU when compared to the CPU example.
Both the CPU and GPU results however do not accommodate the first issue (calling the void recursive function), which I believe is also an important issue to address. I have a GPU handy, so if needs be I could always try and run test scripts for you if needed.
Thanks so much again !
from enzyme.
Related Issues (20)
- Many failures when running ninja check-enzyme HOT 2
- Enzyme: Cannot cast __enzyme_autodiff primal argument 16 HOT 7
- enzyme_dupped parameter doesn't return gradient
- New C++ interface with lambda HOT 2
- Can't compile eigensumsqdyn-notmp.cpp with Eigen 3.4.0
- Injected headers for c++ break tooling
- abort cmake when -DLLVM_DIR is an invalid path.
- check-enzyme-integration tests failures HOT 3
- Branch mismatcharg fails to compile HOT 6
- Building Enzyme CMake - Undefined symbol: main HOT 3
- Unnecessary caching for recursive functions
- Bug in Enzyme gsl branch HOT 1
- compilation slowdown associated with PreserveNVVMNewPM HOT 1
- Is this N/3 correct? HOT 10
- incorrect derivative of function that returns struct HOT 2
- C++ interface templates appear to be broken HOT 7
- `std::vector.push_back()` causes segementation fault in Enzyme HOT 2
- EnzymeCreateForwardDiff missing from CApi.h HOT 1
- Clarify usage through linking HOT 4
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from enzyme.