A GPU memory problem occurred while executing the 'frames_to_features_pipe' part, so I changed the batch size to 1, but the problem still occurs.
So I want to reduce the image size, how can I change it?
frames_to_features_pipe(
DATA_DIR,
MEAN_STD,
ext='.allframes.npy',
target_ext='.proc.c3d-avg.npy',
model=MODEL,
classes=CLASSES,
frames_per_clip=16,
frames_step=8,
batch_size=1,
max_elements=13320)
ResourceExhaustedError: Graph execution error:
Detected at node 'sequential/pool1/MaxPool3D' defined at (most recent call last):
File "C:\Users\user\anaconda3\envs\conda\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\user\anaconda3\envs\conda\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel_launcher.py", line 16, in
app.launch_new_instance()
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\traitlets\config\application.py", line 845, in launch_instance
app.start()
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\kernelapp.py", line 667, in start
self.io_loop.start()
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\Users\user\anaconda3\envs\conda\lib\asyncio\base_events.py", line 570, in run_forever
self._run_once()
File "C:\Users\user\anaconda3\envs\conda\lib\asyncio\base_events.py", line 1859, in _run_once
handle._run()
File "C:\Users\user\anaconda3\envs\conda\lib\asyncio\events.py", line 81, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\kernelbase.py", line 456, in dispatch_queue
await self.process_one()
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\kernelbase.py", line 445, in process_one
await dispatch(*args)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\kernelbase.py", line 352, in dispatch_shell
await result
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\kernelbase.py", line 647, in execute_request
reply_content = await reply_content
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\ipkernel.py", line 345, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\ipykernel\zmqshell.py", line 532, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\IPython\core\interactiveshell.py", line 2898, in run_cell
result = self._run_cell(
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\IPython\core\interactiveshell.py", line 2944, in _run_cell
return runner(coro)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\IPython\core\async_helpers.py", line 68, in pseudo_sync_runner
coro.send(None)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\IPython\core\interactiveshell.py", line 3169, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\IPython\core\interactiveshell.py", line 3361, in run_ast_nodes
if (await self.run_code(code, result, async=asy)):
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\IPython\core\interactiveshell.py", line 3441, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\user\AppData\Local\Temp/ipykernel_36644/2967937518.py", line 1, in
frames_to_features_pipe(
File "C:\Users\user\Sign Language\v1.6\python\mpypl_pipes.py", line 101, in frames_to_features_pipe
(mp.get_datastream(data_dir, classes=classes, ext=ext)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\pipe.py", line 100, in ror
return self.function(other)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\mPyPl\utils\pipeutils.py", line 174, in execute
list(l)
File "C:\Users\user\Sign Language\v1.6\python\mpypl_pipe_func.py", line 17, in cachecomputex
for x in seq:
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\mPyPl\core.py", line 519, in silly_progress
for x in seq:
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\mPyPl\core.py", line 114, in apply_batch
res = func(arg)
File "C:\Users\user\Sign Language\v1.6\python\mpypl_pipes.py", line 120, in
lambda x: predict_c3d(x, model),
File "C:\Users\user\Sign Language\v1.6\python\data_prep.py", line 289, in predict_c3d
pred.append(model.predict(batch))
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\training.py", line 1982, in predict
tmp_batch_outputs = self.predict_function(iterator)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\training.py", line 1801, in predict_function
return step_function(self, iterator)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\training.py", line 1790, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\training.py", line 1783, in run_step
outputs = model.predict_step(data)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\training.py", line 1751, in predict_step
return self(x, training=False)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\base_layer.py", line 1096, in call
outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\utils\traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\sequential.py", line 374, in call
return super(Sequential, self).call(inputs, training=training, mask=mask)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\functional.py", line 451, in call
return self._run_internal_graph(
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\functional.py", line 589, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\engine\base_layer.py", line 1096, in call
outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\utils\traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "C:\Users\user\anaconda3\envs\conda\lib\site-packages\keras\layers\pooling.py", line 699, in call
outputs = self.pool_function(
Node: 'sequential/pool1/MaxPool3D'
OOM when allocating tensor with shape[32,64,16,56,56] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[{{node sequential/pool1/MaxPool3D}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.
[Op:__inference_predict_function_476]