TypeError: ('Keyword argument not understood:', 'inputs') 类型错误: ('未识别的关键字参数:', 'inputs')

7 浏览
0 Comments

TypeError: ('Keyword argument not understood:', 'inputs') 类型错误: ('未识别的关键字参数:', 'inputs')

下面的代码是使用Tensorflow和Keras进行疾病检测的CNN模型。出现了一个错误,TypeError参数是'inputs'。我不明白为什么会出现这个错误。以下是我的代码:

from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
import os
thisFolder = os.path.dirname(os.path.realpath(__file__))
print(thisFolder)
print(tf.__version__)
infected = os.listdir(thisFolder + '/cell_images/cell_images/Parasitized/')
uninfected = os.listdir(thisFolder +'/cell_images/cell_images/Uninfected/')
data = []
labels = []
for i in infected:
    try: 
        image = cv2.imread(thisFolder + "/cell_images/cell_images/Parasitized/"+i)
        image_array = Image.fromarray(image , 'RGB')
        resize_img = image_array.resize((50 , 50))
        rotated45 = resize_img.rotate(45)
        rotated75 = resize_img.rotate(75)
        blur = cv2.blur(np.array(resize_img) ,(10, 10))
        data.append(np.array(resize_img)) 
        data.append(np.array(rotated45))
        data.append(np.array(rotated75))
        data.append(np.array(blur))
        labels.append(1)
        labels.append(1)
        labels.append(1)
        labels.append(1)
    except AttributeError:
        print('')
for u in uninfected:
    try:
        image = cv2.imread("../input/cell_images/cell_images/Uninfected/"+u)
        image_array = Image.fromarray(image , 'RGB')
        resize_img = image_array.resize((50 , 50))
        rotated45 = resize_img.rotate(45)
        rotated75 = resize_img.rotate(75)
        data.append(np.array(resize_img))
        data.append(np.array(rotated45))
        data.append(np.array(rotated75))
        labels.append(0)
        labels.append(0)
        labels.append(0)
    except AttributeError:
        print('')
cells = np.array(data)
labels = np.array(labels)
np.save('Cells' , cells)
np.save('Labels' , labels)
print('Cells : {} | labels : {}'.format(cells.shape , labels.shape))
n = 0
for i in range(49):
    n += 1
    r = np.random.randint(0 , cells.shape[0] , 1)
    plt.subplot(7 , 7, n)
    plt.subplots_adjust(hspace = 0.5 , wspace = 0.5)
    plt.imshow(cells[r[0]])
    plt.title('{} : {}'.format('Infected' if labels[r[0]] == 1 else 'Uninfected', labels[r[0]]))
    plt.xticks([]) , plt.yticks([])
plt.figure(1, figsize = (15 , 7))
plt.subplot(1 , 2 , 1)
plt.imshow(cells[0])
plt.title('Infected Cell')
plt.xticks([]) , plt.yticks([])
n = np.arange(cells.shape[0])
np.random.shuffle(n)
cells = cells[n]
labels = labels[n]
cells = cells.astype(np.float32)
labels = labels.astype(np.int32)
cells = cells/255
from sklearn.model_selection import train_test_split
train_x , x , train_y , y = train_test_split(cells , labels ,
                                            test_size = 0.2 ,
                                            random_state = 111)
eval_x , test_x , eval_y , test_y = train_test_split(x , y ,
                                                    test_size = 0.5 ,
                                                    random_state = 111)
n = 0
for z , j in zip([train_y , eval_y , test_y] , ['train labels','eval labels','test labels']):
    n += 1
    plt.subplot(1 , 3  , n)
    sns.countplot(x = z )
    plt.title(j)
print('train data shape {} ,eval data shape {} , test data shape {}'.format(train_x.shape,
                                                                           eval_x.shape ,
                                                                           test_x.shape))
from tensorflow.python.framework import ops
ops.reset_default_graph()
def cnn_model_fn(features , labels , mode):
    input_layers = tf.reshape(features['x'] , [-1 , 50 , 50 ,3])
    conv1 = tf.compat.v1.layers.Conv2D(
        inputs = input_layers ,
        filters = 50 ,
        kernel_size = [7 , 7],
        padding = 'same',
        activation = tf.nn.relu
        )
    conv2 = tf.layers.conv2d(
        inputs = conv1,
        filters = 90,
        kernel_size = [3 , 3],
        padding = 'valid',
        activation = tf.nn.relu
        )
    conv3 = tf.layers.conv2d(
        inputs = conv2 ,
        filters = 10,
        kernel_size = [5 , 5],
        padding = 'same',
        activation = tf.nn.relu
        )
    pool1 = tf.layers.max_pooling2d(inputs = conv3 , pool_size = [2 , 2] ,
                                    strides = 2 )
    conv4 = tf.layers.conv2d(
        inputs = pool1 ,
        filters = 5,
        kernel_size = [3 , 3],
        padding = 'same',
        activation = tf.nn.relu
        )
    pool2 = tf.layers.max_pooling2d(inputs = conv4 , pool_size = [2 , 2] ,
                                    strides = 2 , padding = 'same')
    pool2_flatten = tf.layers.flatten(pool2)
    fc1 = tf.layers.dense(
        inputs = pool2_flatten,
        units = 2000,
        activation = tf.nn.relu
        )
    fc2 = tf.layers.dense(
        inputs = fc1,
        units = 1000,
        activation = tf.nn.relu
        )
    fc3 = tf.layers.dense(
        inputs = fc2 ,
        units = 500 ,
        activation = tf.nn.relu
        )
    logits = tf.layers.dense(
        inputs = fc3 ,
        units = 2
        )
    predictions = {
        'classes': tf.argmax(input = logits , axis = 1),
        'probabilities': tf.nn.softmax(logits , name = 'softmax_tensor')
    }
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode = mode ,
                                          predictions = predictions)
    loss = tf.losses.sparse_softmax_cross_entropy(labels = labels ,
                                                 logits = logits)
    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001)
        train_op = optimizer.minimize(loss = loss ,
                                      global_step = tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode = mode ,
                                            loss = loss ,
                                            train_op = train_op
                                           )
    eval_metric_op = {'accuracy' : tf.metrics.accuracy(labels = labels ,
                                         predictions =  predictions['classes'])}
    logging_hook = tf.train.LoggingTensorHook(
        tensors = tensors_to_log , every_n_iter = 50
        )
    return tf.estimator.EstimatorSpec(mode = mode ,
                                      loss = loss ,
                                      eval_metric_ops = eval_metric_op)
malaria_detector = tf.estimator.Estimator(model_fn = cnn_model_fn ,
                                         model_dir = '/tmp/modelchkpt')
tensors_to_log = {'probabilities':'softmax_tensor'}
logging_hook = tf.estimator.LoggingTensorHook(
    tensors = tensors_to_log , every_n_iter = 50
    )
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
    x = {'x': train_x},
    y = train_y,
    batch_size = 100 ,
    num_epochs = None ,
    shuffle = True
    )
malaria_detector.train(input_fn = train_input_fn , steps = 1 , hooks = [logging_hook])
malaria_detector.train(input_fn = train_input_fn , steps = 10000)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
    x = {'x': eval_x},
    y = eval_y ,
    num_epochs = 1 ,
    shuffle = False
    )
eval_results = malaria_detector.evaluate(input_fn = eval_input_fn)
print(eval_results)
pred_input_fn = tf.estimator.inputs.numpy_input_fn(
    x = {'x' : test_x},
    y = test_y,
    num_epochs = 1,
    shuffle = False
    )
y_pred = malaria_detector.predict(input_fn = pred_input_fn)
classes = [p['classes'] for p in y_pred]
from sklearn.metrics import confusion_matrix , classification_report , accuracy_score
print('{} \n{} \n{}'.format(confusion_matrix(test_y , classes) ,
                           classification_report(test_y , classes) ,
                           accuracy_score(test_y , classes)))
n = 0
for i in range(49):
    n += 1
    r = np.random.randint( 0  , test_x.shape[0] , 1)
    plt.subplot(7 , 7 , n)
    plt.subplots_adjust(hspace = 0.5 , wspace = 0.5)
    plt.imshow(test_x[r[0]])
    plt.title('true {} : pred {}'.format(test_y[r[0]] , classes[r[0]]) )
    plt.xticks([]) , plt.yticks([])
    
plt.show()
print("done")

以下是错误信息:

File "CNN.py", line 240, in 
    malaria_detector.train(input_fn = train_input_fn , steps = 1 , hooks = [logging_hook])
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 374, in train
    loss = self._train_model(input_fn, hooks, saving_listeners)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1164, in _train_model
    return self._train_model_default(input_fn, hooks, saving_listeners)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1194, in _train_model_default
    features, labels, ModeKeys.TRAIN, self.config)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_estimator/python/estimator/estimator.py", line 1152, in _call_model_fn
    model_fn_results = self._model_fn(features=features, **kwargs)
  File "CNN.py", line 136, in cnn_model_fn
    activation = tf.nn.relu
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/layers/convolutional.py", line 314, in __init__
    name=name, **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/layers/convolutional.py", line 527, in __init__
    **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/layers/convolutional.py", line 122, in __init__
    **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/layers/base.py", line 213, in __init__
    **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
    result = method(self, *args, **kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 186, in __init__
    generic_utils.validate_kwargs(kwargs, allowed_kwargs)
  File "/usr/local/lib/python2.7/site-packages/tensorflow_core/python/keras/utils/generic_utils.py", line 718, in validate_kwargs
    raise TypeError(error_message, kwarg)
TypeError: ('Keyword argument not understood:', 'inputs')

如何修复这个TypeError错误?我已安装Tensorflow 2.1并升级了Keras。不确定是否与此错误相关,这似乎是一个语法错误。谢谢!- Satya

0