tensorflow: Backport upstream as bazel build failing for current
This commit is contained in:
@@ -7,6 +7,10 @@
|
||||
|
||||
self: super: pkgs: with pkgs; {
|
||||
|
||||
# tensorflow requires cudatoolkit90
|
||||
inherit (callPackages ./tf-cudatoolkit.nix { }) cudatoolkit90;
|
||||
inherit (callPackages ./tf-cudnn.nix { }) cudnn_cudatoolkit90;
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/issues/44426
|
||||
python27 = super.python27.override { packageOverrides = pythonOverrides; };
|
||||
python34 = super.python34.override { packageOverrides = pythonOverrides; };
|
||||
|
||||
@@ -1,3 +1,19 @@
|
||||
pkgs: self: super: with self; {
|
||||
|
||||
# Backport upstream master tensorflow (bazel build is failing)
|
||||
astunparse = callPackage ./tf-astunparse.nix { };
|
||||
|
||||
gast = callPackage ./tf-gast.nix { };
|
||||
|
||||
tensorflow-tensorboard = callPackage ./tf-tensorboard.nix {
|
||||
protobuf = protobuf3_5;
|
||||
};
|
||||
|
||||
tensorflow = callPackage ./tf.nix {
|
||||
protobuf = protobuf3_5;
|
||||
cudaSupport = pkgs.config.cudaSupport or false;
|
||||
cudatoolkit = pkgs.cudatoolkit90;
|
||||
cudnn = pkgs.cudnn_cudatoolkit90;
|
||||
inherit (pkgs.linuxPackages) nvidia_x11;
|
||||
};
|
||||
}
|
||||
|
||||
17
temporary/tf-astunparse.nix
Normal file
17
temporary/tf-astunparse.nix
Normal file
@@ -0,0 +1,17 @@
|
||||
{ stdenv, fetchPypi, buildPythonPackage, six }:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "astunparse";
|
||||
version = "1.5.0";
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "1kc9lm2jvfcip3z8snj04dar5a9jh857a704m6lvcv4xclm3rpsm";
|
||||
};
|
||||
propagatedBuildInputs = [ six ];
|
||||
doCheck = false; # no tests
|
||||
meta = with stdenv.lib; {
|
||||
description = "This is a factored out version of unparse found in the Python source distribution";
|
||||
license = licenses.bsd3;
|
||||
maintainers = with maintainers; [ jyp ];
|
||||
};
|
||||
}
|
||||
161
temporary/tf-cudatoolkit.nix
Normal file
161
temporary/tf-cudatoolkit.nix
Normal file
@@ -0,0 +1,161 @@
|
||||
{ lib, stdenv, makeWrapper, fetchurl, requireFile, patchelf, perl, ncurses, expat, python27, zlib
|
||||
, gcc6
|
||||
, xorg, gtk2, glib, fontconfig, freetype, unixODBC, alsaLib, glibc
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
common =
|
||||
args@{ gcc, version, sha256
|
||||
, url ? ""
|
||||
, name ? ""
|
||||
, developerProgram ? false
|
||||
, python ? python27
|
||||
, runPatches ? []
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "cudatoolkit-${version}";
|
||||
inherit version runPatches;
|
||||
|
||||
dontPatchELF = true;
|
||||
dontStrip = true;
|
||||
|
||||
src =
|
||||
if developerProgram then
|
||||
requireFile {
|
||||
message = ''
|
||||
This nix expression requires that ${args.name} is already part of the store.
|
||||
Register yourself to NVIDIA Accelerated Computing Developer Program, retrieve the CUDA toolkit
|
||||
at https://developer.nvidia.com/cuda-toolkit, and run the following command in the download directory:
|
||||
nix-prefetch-url file://\$PWD/${args.name}
|
||||
'';
|
||||
inherit (args) name sha256;
|
||||
}
|
||||
else
|
||||
fetchurl {
|
||||
inherit (args) url sha256;
|
||||
};
|
||||
|
||||
outputs = [ "out" "lib" "doc" ];
|
||||
|
||||
nativeBuildInputs = [ perl makeWrapper ];
|
||||
|
||||
runtimeDependencies = [
|
||||
ncurses expat python zlib glibc
|
||||
xorg.libX11 xorg.libXext xorg.libXrender xorg.libXt xorg.libXtst xorg.libXi xorg.libXext
|
||||
gtk2 glib fontconfig freetype unixODBC alsaLib
|
||||
];
|
||||
|
||||
rpath = "${stdenv.lib.makeLibraryPath runtimeDependencies}:${stdenv.cc.cc.lib}/lib64";
|
||||
|
||||
phases = [ "unpackPhase" "installPhase" "fixupPhase" ];
|
||||
|
||||
unpackPhase = ''
|
||||
sh $src --keep --noexec
|
||||
|
||||
cd pkg/run_files
|
||||
sh cuda-linux*.run --keep --noexec
|
||||
sh cuda-samples*.run --keep --noexec
|
||||
mv pkg ../../$(basename $src)
|
||||
cd ../..
|
||||
rm -rf pkg
|
||||
|
||||
for patch in $runPatches; do
|
||||
sh $patch --keep --noexec
|
||||
mv pkg $(basename $patch)
|
||||
done
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cd $(basename $src)
|
||||
perl ./install-linux.pl --prefix="$out"
|
||||
cd ..
|
||||
for patch in $runPatches; do
|
||||
cd $(basename $patch)
|
||||
perl ./install_patch.pl --silent --accept-eula --installdir="$out"
|
||||
cd ..
|
||||
done
|
||||
|
||||
rm $out/tools/CUDA_Occupancy_Calculator.xls # FIXME: why?
|
||||
|
||||
# let's remove the 32-bit libraries, they confuse the lib64->lib mover
|
||||
rm -rf $out/lib
|
||||
|
||||
# Remove some cruft.
|
||||
rm $out/bin/uninstall*
|
||||
|
||||
# Fixup path to samples (needed for cuda 6.5 or else nsight will not find them)
|
||||
if [ -d "$out"/cuda-samples ]; then
|
||||
mv "$out"/cuda-samples "$out"/samples
|
||||
fi
|
||||
|
||||
# Change the #error on GCC > 4.9 to a #warning.
|
||||
sed -i $out/include/host_config.h -e 's/#error\(.*unsupported GNU version\)/#warning\1/'
|
||||
|
||||
# Fix builds with newer glibc version
|
||||
sed -i "1 i#define _BITS_FLOATN_H" "$out/include/host_defines.h"
|
||||
|
||||
# Ensure that cmake can find CUDA.
|
||||
mkdir -p $out/nix-support
|
||||
echo "cmakeFlags+=' -DCUDA_TOOLKIT_ROOT_DIR=$out'" >> $out/nix-support/setup-hook
|
||||
|
||||
# Move some libraries to the lib output so that programs that
|
||||
# depend on them don't pull in this entire monstrosity.
|
||||
mkdir -p $lib/lib
|
||||
mv -v $out/lib64/libcudart* $lib/lib/
|
||||
|
||||
# Remove OpenCL libraries as they are provided by ocl-icd and driver.
|
||||
rm -f $out/lib64/libOpenCL*
|
||||
|
||||
# Set compiler for NVCC.
|
||||
wrapProgram $out/bin/nvcc \
|
||||
--prefix PATH : ${gcc}/bin
|
||||
'' + lib.optionalString (lib.versionOlder version "8.0") ''
|
||||
# Hack to fix building against recent Glibc/GCC.
|
||||
echo "NIX_CFLAGS_COMPILE+=' -D_FORCE_INLINES'" >> $out/nix-support/setup-hook
|
||||
'';
|
||||
|
||||
preFixup = ''
|
||||
while IFS= read -r -d ''$'\0' i; do
|
||||
if ! isELF "$i"; then continue; fi
|
||||
echo "patching $i..."
|
||||
if [[ ! $i =~ \.so ]]; then
|
||||
patchelf \
|
||||
--set-interpreter "''$(cat $NIX_CC/nix-support/dynamic-linker)" $i
|
||||
fi
|
||||
if [[ $i =~ libcudart ]]; then
|
||||
rpath2=
|
||||
else
|
||||
rpath2=$rpath:$lib/lib:$out/jre/lib/amd64/jli:$out/lib:$out/lib64:$out/nvvm/lib:$out/nvvm/lib64
|
||||
fi
|
||||
patchelf --set-rpath $rpath2 --force-rpath $i
|
||||
done < <(find $out $lib $doc -type f -print0)
|
||||
'';
|
||||
|
||||
passthru = {
|
||||
cc = gcc;
|
||||
majorVersion =
|
||||
let versionParts = lib.splitString "." version;
|
||||
in "${lib.elemAt versionParts 0}.${lib.elemAt versionParts 1}";
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "A compiler for NVIDIA GPUs, math libraries, and tools";
|
||||
homepage = "https://developer.nvidia.com/cuda-toolkit";
|
||||
platforms = [ "x86_64-linux" ];
|
||||
license = licenses.unfree;
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
|
||||
cudatoolkit90 = common {
|
||||
version = "9.0.176.1";
|
||||
url = "https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run";
|
||||
sha256 = "0308rmmychxfa4inb1ird9bpgfppgr9yrfg1qp0val5azqik91ln";
|
||||
gcc = gcc6;
|
||||
};
|
||||
}
|
||||
|
||||
56
temporary/tf-cudnn-generic.nix
Normal file
56
temporary/tf-cudnn-generic.nix
Normal file
@@ -0,0 +1,56 @@
|
||||
{ version
|
||||
, srcName
|
||||
, sha256
|
||||
}:
|
||||
|
||||
{ stdenv
|
||||
, lib
|
||||
, requireFile
|
||||
, cudatoolkit
|
||||
}:
|
||||
|
||||
stdenv.mkDerivation rec {
|
||||
name = "cudatoolkit-${cudatoolkit.majorVersion}-cudnn-${version}";
|
||||
|
||||
inherit version;
|
||||
|
||||
src = requireFile rec {
|
||||
name = srcName;
|
||||
inherit sha256;
|
||||
message = ''
|
||||
This nix expression requires that ${name} is already part of the store.
|
||||
Register yourself to NVIDIA Accelerated Computing Developer Program, retrieve the cuDNN library
|
||||
at https://developer.nvidia.com/cudnn, and run the following command in the download directory:
|
||||
nix-prefetch-url file://\$PWD/${name}
|
||||
'';
|
||||
};
|
||||
|
||||
installPhase = ''
|
||||
function fixRunPath {
|
||||
p=$(patchelf --print-rpath $1)
|
||||
patchelf --set-rpath "$p:${lib.makeLibraryPath [ stdenv.cc.cc ]}" $1
|
||||
}
|
||||
fixRunPath lib64/libcudnn.so
|
||||
|
||||
mkdir -p $out
|
||||
cp -a include $out/include
|
||||
cp -a lib64 $out/lib64
|
||||
'';
|
||||
|
||||
propagatedBuildInputs = [
|
||||
cudatoolkit
|
||||
];
|
||||
|
||||
passthru = {
|
||||
inherit cudatoolkit;
|
||||
majorVersion = lib.head (lib.splitString "." version);
|
||||
};
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "NVIDIA CUDA Deep Neural Network library (cuDNN)";
|
||||
homepage = "https://developer.nvidia.com/cudnn";
|
||||
license = licenses.unfree;
|
||||
platforms = [ "x86_64-linux" ];
|
||||
maintainers = with maintainers; [ mdaiter ];
|
||||
};
|
||||
}
|
||||
17
temporary/tf-cudnn.nix
Normal file
17
temporary/tf-cudnn.nix
Normal file
@@ -0,0 +1,17 @@
|
||||
{ callPackage, cudatoolkit90 }:
|
||||
|
||||
let
|
||||
generic = args: callPackage (import ./tf-cudnn-generic.nix (removeAttrs args ["cudatoolkit"])) {
|
||||
inherit (args) cudatoolkit;
|
||||
};
|
||||
|
||||
in
|
||||
|
||||
{
|
||||
cudnn_cudatoolkit90 = generic rec {
|
||||
version = "7.0.5";
|
||||
cudatoolkit = cudatoolkit90;
|
||||
srcName = "cudnn-${cudatoolkit.majorVersion}-linux-x64-v7.tgz";
|
||||
sha256 = "03mbv4m5lhwnc181xz8li067pjzzhxqbxgnrfc68dffm8xj0fghs";
|
||||
};
|
||||
}
|
||||
16
temporary/tf-gast.nix
Normal file
16
temporary/tf-gast.nix
Normal file
@@ -0,0 +1,16 @@
|
||||
{ stdenv, fetchPypi, buildPythonPackage, astunparse }:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "gast";
|
||||
version = "0.2.0";
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "0c296xm1vz9x4w4inmdl0k8mnc0i9arw94si2i7pglpc461r0s3h";
|
||||
};
|
||||
checkInputs = [ astunparse ] ;
|
||||
meta = with stdenv.lib; {
|
||||
description = "GAST provides a compatibility layer between the AST of various Python versions, as produced by ast.parse from the standard ast module.";
|
||||
license = licenses.bsd3;
|
||||
maintainers = with maintainers; [ jyp ];
|
||||
};
|
||||
}
|
||||
34
temporary/tf-hashes-1.9.0.nix
Normal file
34
temporary/tf-hashes-1.9.0.nix
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
linux_py_27_cpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp27-none-linux_x86_64.whl";
|
||||
sha256 = "1dvmajv5ddgzrazdnxpfhk9dkj0lfiviw4jmvk00d4q5v68z6ihg";
|
||||
};
|
||||
linux_py_35_cpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp35-cp35m-linux_x86_64.whl";
|
||||
sha256 = "07ilrxbhz9p3xwqhl2p8c40y1gsq68x10f34pzayrvcg2i52bvpv";
|
||||
};
|
||||
linux_py_36_cpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp36-cp36m-linux_x86_64.whl";
|
||||
sha256 = "0x2l64ab7i8nr0dzvsryblhn869qyjb85xkhy69nwahqswb68hxl";
|
||||
};
|
||||
linux_py_27_gpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp27-none-linux_x86_64.whl";
|
||||
sha256 = "0q7i82jgy0mzn2mw8i1z0pvv7ssi6m7zqkkjg2i4zxpk8djg8k6z";
|
||||
};
|
||||
linux_py_35_gpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp35-cp35m-linux_x86_64.whl";
|
||||
sha256 = "0nhq2s8fanm095x1sc9h40dvqcv9bc2aj47crv3c2sajbj7dn43g";
|
||||
};
|
||||
linux_py_36_gpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp36-cp36m-linux_x86_64.whl";
|
||||
sha256 = "1m4b9cd8ghghqxaqrlzq7j9499ddidgr78bx0c20pgyrzg38jiz1";
|
||||
};
|
||||
mac_py_2_cpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py2-none-any.whl";
|
||||
sha256 = "1djj0xahzrmxhfc6kyjx1lkyfh7jhq3ix0gz9j3iq4smb3ca9z01";
|
||||
};
|
||||
mac_py_3_cpu = {
|
||||
url = "https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py3-none-any.whl";
|
||||
sha256 = "0igffy7r0d8mhkn4pybp19jmc25alfpfl4k4fxh2s3rvgii8gk22";
|
||||
};
|
||||
}
|
||||
39
temporary/tf-tensorboard.nix
Normal file
39
temporary/tf-tensorboard.nix
Normal file
@@ -0,0 +1,39 @@
|
||||
{ stdenv, lib, fetchPypi, buildPythonPackage, isPy3k
|
||||
, numpy
|
||||
, werkzeug
|
||||
, protobuf
|
||||
, grpcio
|
||||
, markdown
|
||||
, futures
|
||||
}:
|
||||
|
||||
# tensorflow/tensorboard is built from a downloaded wheel, because
|
||||
# https://github.com/tensorflow/tensorboard/issues/719 blocks
|
||||
# buildBazelPackage.
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "tensorflow-tensorboard";
|
||||
version = "1.9.0";
|
||||
format = "wheel";
|
||||
|
||||
src = fetchPypi ({
|
||||
pname = "tensorboard";
|
||||
inherit version;
|
||||
format = "wheel";
|
||||
} // (if isPy3k then {
|
||||
python = "py3";
|
||||
sha256 = "42a04637a636e16054b065907c81396b83a9702948ecd14218f19dc5cf85de98";
|
||||
} else {
|
||||
python = "py2";
|
||||
sha256 = "97661706fbe857c372405e0f5bd7c3db2197b5e70cec88f6924b726fde65c2c1";
|
||||
}));
|
||||
|
||||
propagatedBuildInputs = [ numpy werkzeug protobuf markdown grpcio ] ++ lib.optional (!isPy3k) futures;
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "TensorFlow's Visualization Toolkit";
|
||||
homepage = http://tensorflow.org;
|
||||
license = licenses.asl20;
|
||||
maintainers = with maintainers; [ abbradar ];
|
||||
};
|
||||
}
|
||||
91
temporary/tf.nix
Normal file
91
temporary/tf.nix
Normal file
@@ -0,0 +1,91 @@
|
||||
{ stdenv
|
||||
, lib
|
||||
, fetchurl
|
||||
, buildPythonPackage
|
||||
, isPy3k, isPy36, pythonOlder
|
||||
, astor
|
||||
, gast
|
||||
, numpy
|
||||
, six
|
||||
, termcolor
|
||||
, protobuf
|
||||
, absl-py
|
||||
, grpcio
|
||||
, mock
|
||||
, backports_weakref
|
||||
, enum34
|
||||
, tensorflow-tensorboard
|
||||
, cudaSupport ? false
|
||||
, cudatoolkit ? null
|
||||
, cudnn ? null
|
||||
, nvidia_x11 ? null
|
||||
, zlib
|
||||
, python
|
||||
, symlinkJoin
|
||||
}:
|
||||
|
||||
# We keep this binary build for two reasons:
|
||||
# - the source build doesn't work on Darwin.
|
||||
# - the source build is currently brittle and not easy to maintain
|
||||
|
||||
assert cudaSupport -> cudatoolkit != null
|
||||
&& cudnn != null
|
||||
&& nvidia_x11 != null;
|
||||
let
|
||||
cudatoolkit_joined = symlinkJoin {
|
||||
name = "unsplit_cudatoolkit";
|
||||
paths = [ cudatoolkit.out
|
||||
cudatoolkit.lib ];};
|
||||
|
||||
in buildPythonPackage rec {
|
||||
pname = "tensorflow";
|
||||
version = "1.9.0";
|
||||
format = "wheel";
|
||||
|
||||
src = let
|
||||
pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) "${python.majorVersion}";
|
||||
pyver = if stdenv.isDarwin then builtins.substring 0 1 pyVerNoDot else pyVerNoDot;
|
||||
platform = if stdenv.isDarwin then "mac" else "linux";
|
||||
unit = if cudaSupport then "gpu" else "cpu";
|
||||
key = "${platform}_py_${pyver}_${unit}";
|
||||
dls = import ./tf-hashes-1.9.0.nix;
|
||||
in fetchurl dls.${key};
|
||||
|
||||
propagatedBuildInputs = [ protobuf numpy termcolor grpcio six astor absl-py gast tensorflow-tensorboard ]
|
||||
++ lib.optional (!isPy3k) mock
|
||||
++ lib.optionals (pythonOlder "3.4") [ backports_weakref enum34 ];
|
||||
|
||||
# Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
|
||||
# and the propageted input tensorflow-tensorboard which causes environment collisions.
|
||||
# another possibility would be to have tensorboard only in the buildInputs
|
||||
# https://github.com/tensorflow/tensorflow/blob/v1.7.1/tensorflow/tools/pip_package/setup.py#L79
|
||||
postInstall = ''
|
||||
rm $out/bin/tensorboard
|
||||
'';
|
||||
|
||||
installFlags = "--no-dependencies"; # tensorflow wants setuptools 39, can't allow that.
|
||||
# Note that we need to run *after* the fixup phase because the
|
||||
# libraries are loaded at runtime. If we run in preFixup then
|
||||
# patchelf --shrink-rpath will remove the cuda libraries.
|
||||
postFixup = let
|
||||
rpath = stdenv.lib.makeLibraryPath
|
||||
([ stdenv.cc.cc.lib zlib ] ++ lib.optionals cudaSupport [ cudatoolkit_joined cudnn nvidia_x11 ]);
|
||||
in
|
||||
lib.optionalString (stdenv.isLinux) ''
|
||||
rrPath="$out/${python.sitePackages}/tensorflow/:${rpath}"
|
||||
internalLibPath="$out/${python.sitePackages}/tensorflow/python/_pywrap_tensorflow_internal.so"
|
||||
find $out -name '*${stdenv.hostPlatform.extensions.sharedLibrary}' -exec patchelf --set-rpath "$rrPath" {} \;
|
||||
'';
|
||||
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "Computation using data flow graphs for scalable machine learning";
|
||||
homepage = http://tensorflow.org;
|
||||
license = licenses.asl20;
|
||||
maintainers = with maintainers; [ jyp abbradar ];
|
||||
platforms = with platforms; linux ++ lib.optionals (!cudaSupport) darwin;
|
||||
# Python 2.7 build uses different string encoding.
|
||||
# See https://github.com/NixOS/nixpkgs/pull/37044#issuecomment-373452253
|
||||
broken = stdenv.isDarwin && !isPy3k;
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user