a
    +ca                  
   @   sx  d Z ddlZddlmZ ddlmZ ddl	m
Z G dd dejZedkrtddlZddlZddlZddlmZ ddlmZ dZejd	Zejed
Zdd Ze \\Z Z!\Z"Z#e$e d%dd Z e$e"d%dd Z"e&de j' e&de"j' e Z(ee dZ)ee)eZ)ej*j+ddddZ,e)j-de,dgd e).  e)j/e(j0e e!ddddde"e#fej1j2eddgd  dS )!a  
Mask R-CNN
Multi-GPU Support for Keras.

Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla

Ideas and a small code snippets from these sources:
https://github.com/fchollet/keras/issues/2436
https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012
https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/
https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py
    Nc                       s@   e Zd ZdZ fddZ fddZ fddZdd	 Z  ZS )
ParallelModela$  Subclasses the standard Keras Model and adds multi-GPU support.
    It works by creating a copy of the model on each GPU. Then it slices
    the inputs and sends a slice to each copy of the model, and then
    merges the outputs together and applies the loss on the combined
    outputs.
    c                    s0   || _ || _|  }tt| j| j j|d dS )z~Class constructor.
        keras_model: The Keras model to parallelize
        gpu_count: Number of GPUs. Must be > 1
        )inputsoutputsN)inner_model	gpu_countmake_parallelsuperr   __init__r   )selfZkeras_modelr   Zmerged_outputs	__class__ ^C:/Users/landamar/grass_packager/grass786/addons/i.ann.maskrcnn/etc/maskrcnn/parallel_model.pyr	      s    
zParallelModel.__init__c                    s,   d|v sd|v rt | j|S tt| |S )zdRedirect loading and saving methods to the inner model. That's where
        the weights are stored.loadZsave)getattrr   r   r   __getattribute__)r
   attrnamer   r   r   r   *   s    zParallelModel.__getattribute__c                    s,   t t| j|i | | jj|i | dS )zVOverride summary() to display summaries of both, the wrapper
        and inner models.N)r   r   summaryr   )r
   argskwargsr   r   r   r   1   s    zParallelModel.summaryc           
   
      s  fddt jjjjD jj}g }ttjjD ] |g  q<tj	D ] t
d   t
d  p t jjjj} fdd|D }|ttsgtD ]\}}|| | qW d   n1 s0    Y  W d   qV1 s0    Y  qVt
dv g }t ||D ]V\}td	 d
krltjfdd|d}	ntjd	|d}	||	 q4W d   n1 s0    Y  |S )zCreates a new wrapper model that consists of multiple replicas of
        the original model placed on different GPUs.
        c                    s    i | ]\}}|t | jqS r   )tfsplitr   ).0namex)r
   r   r   
<dictcomp>=   s   z/ParallelModel.make_parallel.<locals>.<dictcomp>z/gpu:%dztower_%dc                    s2   g | ]*\ }t j fd ddd d|qS )c                    s      S Nr   s)iinput_slicesr   r   r   <lambda>Q       z8ParallelModel.make_parallel.<locals>.<listcomp>.<lambda>c                 S   s   d| dd   S )Nr      r   r   r   r   r   r!   R   r"   )Zoutput_shape)KLLambda)r   Ztensor)r   r    r   r   
<listcomp>O   s   z/ParallelModel.make_parallel.<locals>.<listcomp>Nz/cpu:0r   r   c                    s   t | t  S r   )r   Zadd_nlen)o)r   r   r   r!   i   r"   z-ParallelModel.make_parallel.<locals>.<lambda>r&   )Zaxisr   )zipr   Zinput_namesr   output_namesranger(   r   appendr   r   ZdeviceZ
name_scope
isinstancelist	enumerateKZ	int_shaper$   r%   ZConcatenate)
r
   r+   Zoutputs_allZzipped_inputsr   lr)   Zmergedr   mr   )r   r    r   r
   r   r   7   s>    


P.zParallelModel.make_parallel)	__name__
__module____qualname____doc__r	   r   r   r   __classcell__r   r   r   r   r      s
   r   __main__)mnist)ImageDataGenerator   z../Zlogsc                 C   s   t   tj| jdd  dd}tjdddddd	|}tjd
ddddd	|}tjddd|}tjdd|}tjdddd|}tj|ddd|}t	
||dS )Nr#   Zinput_image)shaper       )   r?   ZreluZsameZconv1)
activationZpaddingr   @   Zconv2)r<   r<   Zpool1)Z	pool_sizer   Zflat1r&      Zdense1)r@   r   ZsoftmaxZdense2Zdigit_classifier_model)r   Zreset_default_graphr$   ZInputr=   ZConv2DZMaxPooling2DZFlattenZDenseKMModel)x_trainZnum_classesr   r   r   r   r   build_model   s    rF   Zfloat32   zx_train shape:zx_test shape:
   g{Gz?g?g      @)ZlrZmomentumZclipnormZsparse_categorical_crossentropyZaccuracy)Zloss	optimizerZmetricsrA   )Z
batch_size2   r#   T)Zlog_dirZwrite_graph)Zsteps_per_epochZepochsverboseZvalidation_data	callbacks)3r7   Z
tensorflowr   Zkeras.backendZbackendr1   Zkeras.layersZlayersr$   Zkeras.modelsZmodelsrC   rD   r   r4   osZnumpyZnpZkeras.optimizersZkerasZkeras.datasetsr:   Zkeras.preprocessing.imager;   Z	GPU_COUNTpathabspathZROOT_DIRjoinZ	MODEL_DIRrF   Z	load_datarE   Zy_trainZx_testZy_testZexpand_dimsZastypeprintr=   ZdatagenZmodelZ
optimizersZSGDrJ   compiler   Zfit_generatorZflowrM   ZTensorBoardr   r   r   r   <module>   sL   ]


