Anda di halaman 1dari 3

#!

/usr/bin/env python3
#Written by X41
#
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
#
Version 2, December 2004
#
# Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
# edited by Toonfish
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
#
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
### HOW TO USE ###
# Look for you API-key at https://derpibooru.org/users/edit
# Enter the tags like you would in Derpibooru (separated with a comma)
# Files will be saved in the folder where you put the script
# Updated by Aroddo 4r0dd0@gmail.com
# Thanks to the original author!
#
# Changes:
# - Streamlined for faster artist gallery downloads - You basically enter arti
st name and go
# - Predetermined taglist. I basically excluded all the disgusting shit. Perso
nal taste, though.
#
Change the "tags"-String according to your own needs
# - Creates directory based on artist name in current location.
#
Customize the "newdir"-string according to your own needs
# - Skips already downloaded images. Unless you keep the original name or the
source name doesn't change,
#
you can use this script to comfortable keep your files up-to-date.
# - Fixed the "Something went wrong with picture XX" bug. Unless something rea
lly fucks up, you shouldn't
#
see that message again.
import
import
import
import

json
urllib.request
sys
os

key = "pVjzJNQVb4XddpR2_fes"
if key == "" :
print("You MUST get your own API-key at https://derpibooru.org/users/edit")
print("Then you MUST add your API-key to this script, or it won't work!")
input("Press enter to quit.")
exit()
artist = input("artist name:")
newdir = "[r34] " + artist + " (mlp)"
if not os.path.exists(newdir) : os.mkdir(newdir)
os.chdir(newdir)

links = []
pic = 0
# default taglist:
# ",-safe,-amputee,-diaper,-guro,-vore,-diaper,-scat,-birth"
# The "-safe" tag ensures you get to download the saucy stuff, since all sexual
content is consider 'unsafe'
# Alternatively, replace "-safe" with "explicit".
# The other tags (",-amputee,-diaper,-guro,-vore,-diaper,-scat,-birth") ensure t
hat no pictures of these
# fetishes land in my download directory. Delete or add according to your own ta
stes.
tags = ("artist:" + artist + ",-safe,-amputee,-diaper,-guro,-vore,-diaper,-scat,
-birth").split(",")
for index in range(0, len(tags)):
aces
if tags[index][0] == ' ':
tags[index] = tags[index][1:]
if tags[index][-1] == ' ':
tags[index] = tags[index][:-1]
tags[index] = tags[index].replace(' ', '+')
#scoreMin = int(input("Enter the minimum score: "))
scoreMin = 1
#scoreMax = int(input("Enter the maximum score: "))
scoreMax = 10000
#sortByScore = str(input("Sort by score? (y/n): "))
sortByScore = "n"

# remove whitesp

print ("The page numbers correspondend directly to the pages you would see in th
e derpibooru-search")
print("if you used the following tags (determined by your taglist):")
print(tags)
print()
page = max(int(input("Enter the first page to download: ")),1)
lastPage = int(input("Enter the last page to download: "))
print()
data = ""
newLink = ""
while page <= lastPage:
url = "http://derpibooru.org/search.json?key="
url += key + "&q="
for tag in tags:
url += tag + "%2C+"
url = url[:-4] + "&min_score=" + str(scoreMin)
url += "&max_score=" + str(scoreMax)
if sortByScore == "y":
or not
url += "&sf=score&sd=desc"
url += "&page=" + str(page)

# base URL
# add API key
# add tags
# add min score
# add max score
# sort by score
# add page

print("Downloading page " + str(page))


json_data = urllib.request.urlopen(url).read().decode("utf-8") # grab c
ontent of the url's page
if len(str(json_data)) <= 20:
# catch
emtpy page

break
data = str(json.loads(json_data))
while len(data) > 50:
x = (data.find("//derpicdn.net/img/view/"))
tart of an image
y = min((data.find('score', x)-3),(data.find("', '",x)))
he end
newLink = ("http:" + data[x:y])
t link
if (newLink not in links) and (newLink != "http:"):
links.append(newLink)
data = data[y:]
link from data
page += 1

# find s
# find t
# extrac

# remove

if len(links) == 0:
print("No pics with that tag found")
exit()
print()
for link in links:
pic +=1
sLink = link.split("'")[0]
sFile = link.split("/")[8].split("'")[0]
print("Downloading pic " + str(pic) + " of " + str(len(links)))
print("Link: %s" % sLink)
try:
if os.path.exists(sFile) :
print ("File exists! Skipping: %s \n" % sFile)
else:
print ("Retrieving new file: %s \n" % sFile)
urllib.request.urlretrieve(sLink,sFile)
except:
if pic % 100 != 99:
print("Something went wrong with picture " + str(pic) +". Co
ntinuing with other pics.")
print("All done. May Celestia be with you.")
input("Press enter to quit.")
exit()

Anda mungkin juga menyukai