summary refs log tree commit diff
path: root/pkgs/development/python-modules/scrapy
diff options
context:
space:
mode:
authorMario Rodas <marsam@users.noreply.github.com>2018-12-22 22:21:54 -0500
committerMario Rodas <marsam@users.noreply.github.com>2018-12-22 22:33:16 -0500
commit706aa7127efe3f47068dfad29b90b9b6d87a5abf (patch)
tree400138489b50b43aab7313eced175bee9a80f534 /pkgs/development/python-modules/scrapy
parent3f9ffdde5a37956f35f5a058cfc9138c071c731a (diff)
downloadnixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.tar
nixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.tar.gz
nixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.tar.bz2
nixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.tar.lz
nixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.tar.xz
nixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.tar.zst
nixpkgs-706aa7127efe3f47068dfad29b90b9b6d87a5abf.zip
pythonPackages.scrapy: fix build on Python 3.7
Diffstat (limited to 'pkgs/development/python-modules/scrapy')
-rw-r--r--pkgs/development/python-modules/scrapy/default.nix34
1 files changed, 24 insertions, 10 deletions
diff --git a/pkgs/development/python-modules/scrapy/default.nix b/pkgs/development/python-modules/scrapy/default.nix
index fcac013b6de..8bb332fa9da 100644
--- a/pkgs/development/python-modules/scrapy/default.nix
+++ b/pkgs/development/python-modules/scrapy/default.nix
@@ -1,6 +1,6 @@
 { stdenv, buildPythonPackage, fetchPypi, glibcLocales, mock, pytest, botocore,
   testfixtures, pillow, six, twisted, w3lib, lxml, queuelib, pyopenssl,
-  service-identity, parsel, pydispatcher, cssselect, lib }:
+  service-identity, parsel, pydispatcher, cssselect, lib, fetchpatch }:
 buildPythonPackage rec {
   version = "1.5.1";
   pname = "Scrapy";
@@ -10,18 +10,26 @@ buildPythonPackage rec {
     six twisted w3lib lxml cssselect queuelib pyopenssl service-identity parsel pydispatcher
   ];
 
-  # Scrapy is usually installed via pip where copying all
-  # permissions makes sense. In Nix the files copied are owned by
-  # root and readonly. As a consequence scrapy can't edit the
-  # project templates.
-  patches = [ ./permissions-fix.patch ];
+  patches = [
+    # Scrapy is usually installed via pip where copying all
+    # permissions makes sense. In Nix the files copied are owned by
+    # root and readonly. As a consequence scrapy can't edit the
+    # project templates.
+    ./permissions-fix.patch
+    # fix python37 issues. Remove with the next release
+    (fetchpatch {
+      url = https://github.com/scrapy/scrapy/commit/f4f39057cbbfa4daf66f82061e57101b88d88d05.patch;
+      sha256 = "1f761qkji362i20i5bzcxz44sihvl29prm02i5l2xyhgl1hp91hv";
+    })
+  ];
 
   LC_ALL="en_US.UTF-8";
 
+  # Ignore proxy tests because requires mitmproxy
+  # Ignore test_retry_dns_error because tries to resolve an invalid dns and weirdly fails with "Reactor was unclean"
+  # Ignore xml encoding test on darwin because lxml can't find encodings https://bugs.launchpad.net/lxml/+bug/707396
   checkPhase = ''
-    py.test --ignore=tests/test_linkextractors_deprecated.py --ignore=tests/test_proxy_connect.py ${lib.optionalString stdenv.isDarwin "--ignore=tests/test_utils_iterators.py"}
-    # The ignored tests require mitmproxy, which depends on protobuf, but it's disabled on Python3
-    # Ignore iteration test, because lxml can't find encodings on darwin https://bugs.launchpad.net/lxml/+bug/707396
+    pytest --ignore=tests/test_linkextractors_deprecated.py --ignore=tests/test_proxy_connect.py --deselect tests/test_crawl.py::CrawlTestCase::test_retry_dns_error ${lib.optionalString stdenv.isDarwin "--deselect tests/test_utils_iterators.py::LxmlXmliterTestCase::test_xmliter_encoding"}
   '';
 
   src = fetchPypi {
@@ -29,11 +37,17 @@ buildPythonPackage rec {
     sha256 = "5a398bf6818f87dcc817c919408a195f19ba46414ae12f259119336cfa862bb6";
   };
 
+  postInstall = ''
+    install -m 644 -D extras/scrapy.1 $out/share/man/man1/scrapy.1
+    install -m 644 -D extras/scrapy_bash_completion $out/share/bash-completion/completions/scrapy
+    install -m 644 -D extras/scrapy_zsh_completion $out/share/zsh/site-functions/_scrapy
+  '';
+
   meta = with lib; {
     description = "A fast high-level web crawling and web scraping framework, used to crawl websites and extract structured data from their pages";
     homepage = https://scrapy.org/;
     license = licenses.bsd3;
-    maintainers = with maintainers; [ drewkett ];
+    maintainers = with maintainers; [ drewkett marsam ];
     platforms = platforms.unix;
   };
 }