logo
Loading...

執行GRU時報錯Cannot convert a symbolic Tensor (gru_1/strided_slice:0) to a numpy array. - Cupoy

MAX_CHAR_LENGTH = 120data_2d = cs.load_char('2...

執行GRU時報錯Cannot convert a symbolic Tensor (gru_1/strided_slice:0) to a numpy array.

2021/03/11 上午 02:37
《用 Python 打造你的 AI 股票交易引擎》業界專家實戰教學
鍾河柏
觀看數:55
回答數:2
收藏數:0

MAX_CHAR_LENGTH = 120 data_2d = cs.load_char('2d_char.csv', MAX_CHAR_LENGTH) cs.save_model(split_date, data_2d, target_days=60, dim=MAX_CHAR_LENGTH, output_file='2d_60D_model', epochs=50) 60 Days GRU Model Training --------------------------------------------------------------------------- NotImplementedError Traceback (most recent call last) in 1 MAX_CHAR_LENGTH = 120 2 data_2d = cs.load_char('2d_char.csv', MAX_CHAR_LENGTH) ----> 3 cs.save_model(split_date, data_2d, target_days=60, dim=MAX_CHAR_LENGTH, output_file='2d_60D_model', epochs=50) ~/文件/AI股票交易引擎原始碼/CH4_CStock_v1_0_5/cstock.py in save_model(split_date, data_list, target_days, dim, output_file, batch_size, epochs, param) 993 else: 994 print(f'1 Day GRU Model Training') --> 995 gru_output(df, valid_date=valid_date, test_date=test_date, close_60=close_60, model_file=output_file, batch_size=batch_size, epochs=epochs) 996 else: 997 train_X, train_Y, train_Y60, valid_X, valid_Y, valid_Y60, test_X, test_Y, test_Y60 = data_split(df, valid_date, test_date, select_code) ~/文件/AI股票交易引擎原始碼/CH4_CStock_v1_0_5/cstock.py in gru_output(df, valid_date, test_date, model_file, dim, batch_size, close_60, epochs) 965 early_stopping = EarlyStopping(monitor='val_loss', patience=20) 966 model_checkpoint = ModelCheckpoint(f'{ROOT_PATH}{MODEL_PATH}{model_file}.h5', save_best_only=True, save_weights_only=True) --> 967 model = build_model() 968 #hist = model.fit_generator(generator=train_generator, validation_data=valid_generator, epochs=epochs, callbacks=[early_stopping, model_checkpoint]) 969 hist = model.fit(x=train_generator, validation_data=valid_generator, epochs=epochs, callbacks=[early_stopping, model_checkpoint]) ~/文件/AI股票交易引擎原始碼/CH4_CStock_v1_0_5/cstock.py in build_model() 947 window_length = 64 948 inputs = Input(shape=(dim, 10))#int(dim) --> 949 x = GRU(window_length, return_sequences=True)(inputs)#int(dim) #input_shape=(dim, 10), 950 x = Dropout(0.5)(x) 951 x = GRU(window_length, return_sequences=False)(x)#input_shape=(dim, 10), ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs) 652 653 if initial_state is None and constants is None: --> 654 return super(RNN, self).__call__(inputs, **kwargs) 655 656 # If any of `initial_state` or `constants` are specified and are Keras ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs) 920 not base_layer_utils.is_in_eager_or_tf_function()): 921 with auto_control_deps.AutomaticControlDependencies() as acd: --> 922 outputs = call_fn(cast_inputs, *args, **kwargs) 923 # Wrap Tensors in `outputs` in `tf.identity` to avoid 924 # circular dependencies. ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in call(self, inputs, mask, training, initial_state) 2073 def call(self, inputs, mask=None, training=None, initial_state=None): 2074 self._maybe_reset_cell_dropout_mask(self.cell) -> 2075 return super(GRU, self).call( 2076 inputs, mask=mask, training=training, initial_state=initial_state) 2077 ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in call(self, inputs, mask, training, initial_state, constants) 719 self._validate_args_if_ragged(is_ragged_input, mask) 720 --> 721 inputs, initial_state, constants = self._process_inputs( 722 inputs, initial_state, constants) 723 ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _process_inputs(self, inputs, initial_state, constants) 846 initial_state = self.states 847 elif initial_state is None: --> 848 initial_state = self.get_initial_state(inputs) 849 850 if len(initial_state) != len(self.states): ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs) 634 dtype = inputs.dtype 635 if get_initial_state_fn: --> 636 init_state = get_initial_state_fn( 637 inputs=None, batch_size=batch_size, dtype=dtype) 638 else: ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs, batch_size, dtype) 1908 1909 def get_initial_state(self, inputs=None, batch_size=None, dtype=None): -> 1910 return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype) 1911 1912 ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype) 2924 batch_size = array_ops.shape(inputs)[0] 2925 dtype = inputs.dtype -> 2926 return _generate_zero_filled_state(batch_size, cell.state_size, dtype) 2927 2928 ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state(batch_size_tensor, state_size, dtype) 2942 return nest.map_structure(create_zeros, state_size) 2943 else: -> 2944 return create_zeros(state_size) 2945 2946 ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in create_zeros(unnested_state_size) 2937 flat_dims = tensor_shape.as_shape(unnested_state_size).as_list() 2938 init_state_size = [batch_size_tensor] + flat_dims -> 2939 return array_ops.zeros(init_state_size, dtype=dtype) 2940 2941 if nest.is_sequence(state_size): ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in wrapped(*args, **kwargs) 2675 2676 def wrapped(*args, **kwargs): -> 2677 tensor = fun(*args, **kwargs) 2678 tensor._is_zeros_tensor = True 2679 return tensor ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in zeros(shape, dtype, name) 2719 # Create a constant if it won't be very big. Otherwise create a fill 2720 # op to prevent serialized GraphDefs from becoming too large. -> 2721 output = _constant_if_small(zero, shape, dtype, name) 2722 if output is not None: 2723 return output ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in _constant_if_small(value, shape, dtype, name) 2660 def _constant_if_small(value, shape, dtype, name): 2661 try: -> 2662 if np.prod(shape) < 1000: 2663 return constant(value, shape=shape, dtype=dtype, name=name) 2664 except TypeError: <__array_function__ internals> in prod(*args, **kwargs) ~/.local/lib/python3.8/site-packages/numpy/core/fromnumeric.py in prod(a, axis, dtype, out, keepdims, initial, where) 3028 10 3029 """ -> 3030 return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, 3031 keepdims=keepdims, initial=initial, where=where) 3032 ~/.local/lib/python3.8/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs) 85 return reduction(axis=axis, out=out, **passkwargs) 86 ---> 87 return ufunc.reduce(obj, axis, dtype, out, **passkwargs) 88 89 ~/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in __array__(self) 746 747 def __array__(self): --> 748 raise NotImplementedError("Cannot convert a symbolic Tensor ({}) to a numpy" 749 " array.".format(self.name)) 750 NotImplementedError: Cannot convert a symbolic Tensor (gru_1/strided_slice:0) to a numpy array.

回答列表

  • 2021/03/12 下午 05:23
    CUPOY
    贊同數:0
    不贊同數:0
    留言數:0

    同學 您好, 感謝您的提問,我們會與專家聯繫,並盡快處理相關問題。

  • 2021/03/13 下午 09:35
    張維元 (WeiYuan)
    贊同數:0
    不贊同數:0
    留言數:0

    嗨,你好
    看起來是取回的資料錯誤了,你可能要檢查有沒有拉回正確的資料。

    嗨,你好,我是維元,持續在不同的平台發表對 #資料科學、 #網頁開發 或 #軟體職涯 相關的文章。如果對於內文有疑問都歡迎與我們進一步的交流,都可以追蹤 我的粉絲專頁 ヽ(●´∀`●)ノ